path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105197097/cell_20
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 dim = int(np.sqrt(X_scale.shape[1])) dim N = X_scale.shape[0] N
code
105197097/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.info()
code
105197097/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1]
code
105197097/cell_19
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 dim = int(np.sqrt(X_scale.shape[1])) dim
code
105197097/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105197097/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum()
code
105197097/cell_18
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 print(X_scale.shape[1]) print(np.sqrt(X_scale.shape[1])) print(int(np.sqrt(X_scale.shape[1])))
code
105197097/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() plt.figure(figsize=(8, 7)) sns.countplot(x='label', data=train_df) plt.title('Label distribution') plt.show()
code
105197097/cell_15
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] y.shape
code
105197097/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 print('Max value: ', X_scale.max()) print('Max value: ', X_scale.min())
code
105197097/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape
code
105197097/cell_22
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 dim = int(np.sqrt(X_scale.shape[1])) dim N = X_scale.shape[0] N X_scale = X_scale.reshape((N, dim, dim, 1)) X_scale
code
105197097/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0]
code
105197097/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:]
code
105197097/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape
code
2015893/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 train_y
code
2015893/cell_25
[ "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train) pred = my_model.predict(train_x) pred
code
2015893/cell_23
[ "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train) pred = my_model.predict(train_x) my_model.evaluate(test_x, Y_test)
code
2015893/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train) pred = my_model.predict(train_x) predlabel = np.argmax(pred, axis=1) np.sum(predlabel == train_y) / 1080
code
2015893/cell_11
[ "text_plain_output_1.png" ]
import keras.backend as K from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.models import Model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input from keras.initializers import glorot_uniform from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model import keras.backend as K K.set_image_data_format('channels_last')
code
2015893/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape)
code
2015893/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train)
code
2015893/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import h5py from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2015893/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32)
code
2015893/cell_22
[ "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train) pred = my_model.predict(train_x) predlabel = np.argmax(pred, axis=1) np.sum(predlabel == train_y) / 1080 np.sum(predlabel)
code
2015893/cell_10
[ "text_plain_output_1.png" ]
import h5py import matplotlib.pyplot as plt import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) import matplotlib.pyplot as plt plt.subplots(2, 2) plt.subplots_adjust(top=0.92, bottom=0.08, left=0.1, right=0.95, hspace=0.45, wspace=0.45) plt.subplot(2, 2, 1) plt.title('train_x[5] label : 4') plt.imshow(train_x[5]) plt.subplot(2, 2, 2) plt.title('train_x[10] label : 2') plt.imshow(train_x[10]) plt.subplot(2, 2, 3) plt.title('test_x[5] label : 0') plt.imshow(test_x[5]) plt.subplot(2, 2, 4) plt.title('test_x[10] label : 5') plt.imshow(test_x[10])
code
2015893/cell_27
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model import h5py import numpy as np # linear algebra def load_dataset(): train_dataset = h5py.File('../input/hand-sign/train_signs.h5', 'r') train_set_x_orig = np.array(train_dataset['train_set_x'][:]) train_set_y_orig = np.array(train_dataset['train_set_y'][:]) test_dataset = h5py.File('../input/hand-sign-test/test_signs.h5', 'r') test_set_x_orig = np.array(test_dataset['test_set_x'][:]) test_set_y_orig = np.array(test_dataset['test_set_y'][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return (train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig) train_x, train_y, test_x, test_y = load_dataset() (train_x.shape, train_y.shape, test_x.shape, test_y.shape) train_y = train_y.reshape((1080,)) test_y = test_y.reshape((120,)) Y_train = np.zeros([1080, 6]) count = 0 for i in train_y: Y_train[count, i] = 1 count = count + 1 Y_test = np.zeros([120, 6]) count = 0 for i in test_y: Y_test[count, i] = 1 count = count + 1 def plain_layer(X, n_c): X_in = X X = Conv2D(n_c, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = MaxPooling2D(pool_size=(2, 2))(X) return X def identity_block(X, F): X_in = X F1, F2, F3 = F X = Conv2D(F1, kernel_size=(3, 3), padding='same')(X_in) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F2, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(F3, kernel_size=(3, 3), padding='same')(X) X = BatchNormalization()(X) X = Add()([X, X_in]) X = Activation('relu')(X) return X def Resnet(input_shape=(64, 64, 3), classes=6): X_in = Input(input_shape) X = plain_layer(X_in, 32) F = [16, 32, 32] X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = identity_block(X, F) X = MaxPooling2D(pool_size=(2, 2))(X) X = plain_layer(X, 16) X = Flatten()(X) X = Dense(512, activation='relu')(X) X = Dense(128, activation='relu')(X) X = Dense(classes, activation='softmax')(X) model = Model(inputs=X_in, outputs=X, name='Resnet') return model train_x = train_x / 255 test_x = test_x / 255 my_model = Resnet() my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) my_model.fit(x=train_x, y=Y_train, epochs=10, batch_size=32) my_model.evaluate(train_x, Y_train) pred = my_model.predict(train_x) predlabel = np.argmax(pred, axis=1) np.sum(predlabel == train_y) / 1080 np.sum(predlabel) pred = cnn(Variable(torch.Tensor(test_x.reshape(120, 3, 64, 64).astype(float)))) pred_np = pred.data.numpy() pred_label = np.argmax(pred_np, axis=1) pred_label.shape target = np.squeeze(test_y) np.sum(pred_label == target) / 120
code
106196793/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pokemon = pd.read_csv('../input/pokemon/Pokemon.csv') pokemon['Type'] = np.where(pokemon['Type 2'].notnull(), pokemon['Type 1'] + '/' + pokemon['Type 2'], pokemon['Type 1']) pokemon_new = pokemon.drop(['Type 1', 'Type 2'], axis=1) print(pokemon['Type'].unique()) print(pokemon_new.info()) print(pokemon_new.describe())
code
106196793/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd pokemon = pd.read_csv('../input/pokemon/Pokemon.csv') print(pokemon.info()) print(pokemon.describe())
code
129007439/cell_21
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 df2 = df['Gender'] df2 x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] plt.figure(figsize=(20, 9)) x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] plt.subplot(321) plt.title('K-means Predictions') sns.scatterplot(data=df, x=x, y=y, hue=clusters) plt.subplot(322) plt.title('Actual Clusters') sns.scatterplot(data=df, x=x, y=y, hue=df2) x = df1['Age'] y = df1['Spending Score (1-100)'] plt.subplot(323) plt.title('K-means Predictions') sns.scatterplot(data=df, x=x, y=y, hue=clusters) plt.subplot(324) plt.title('Actual Clusters') sns.scatterplot(data=df, x=x, y=y, hue=df2) x = df1['Age'] y = df1['Annual Income (k$)'] plt.subplot(325) plt.title('K-means Predictions') sns.scatterplot(data=df, x=x, y=y, hue=clusters) plt.subplot(326) plt.title('Actual Clusters') sns.scatterplot(data=df, x=x, y=y, hue=df2) plt.tight_layout()
code
129007439/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1
code
129007439/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum()
code
129007439/cell_23
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] kmeans.inertia_ sse = {} for k in range(1, 10): kmeans = KMeans(n_clusters=k, max_iter=1000).fit(df1) sse[k] = kmeans.inertia_ sse
code
129007439/cell_20
[ "text_html_output_2.png" ]
from sklearn.cluster import KMeans import pandas as pd import plotly.express as px df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 fig = px.scatter(df, x="Annual Income (k$)", y="Spending Score (1-100)", color='Gender') fig.show() kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] fig = px.scatter(df1, x='Annual Income (k$)', y='Spending Score (1-100)', color=clusters) fig.show()
code
129007439/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() plt.figure(figsize=(12, 6)) sns.scatterplot(data=df, x=df['Annual Income (k$)'], y=df['Spending Score (1-100)'], hue=df['Gender'])
code
129007439/cell_29
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] kmeans.inertia_ sse = {} for k in range(1, 10): kmeans = KMeans(n_clusters=k, max_iter=1000).fit(df1) sse[k] = kmeans.inertia_ sse for n_cluster in range(2, 11): kmeans = KMeans(n_clusters=n_cluster).fit(df1) label = kmeans.labels_ sil_coeff = silhouette_score(df1, label, metric='euclidean') print('For n_clusters={}, The Silhouette Coefficient is {}'.format(n_cluster, sil_coeff))
code
129007439/cell_26
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 model = KMeans(n_clusters=4, max_iter=1000) model.fit(df1)
code
129007439/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df
code
129007439/cell_19
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] plt.scatter(x, y, c=clusters)
code
129007439/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129007439/cell_18
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60]
code
129007439/cell_28
[ "image_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 model = KMeans(n_clusters=4, max_iter=1000) model.fit(df1) model.predict([[31, 17, 40]]) from sklearn.metrics import silhouette_score score = silhouette_score(df1, model.labels_) score
code
129007439/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df_copy
code
129007439/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] plt.scatter(x, y)
code
129007439/cell_16
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1)
code
129007439/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.info()
code
129007439/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_
code
129007439/cell_24
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 df2 = df['Gender'] df2 x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] x = df1['Annual Income (k$)'] y = df1['Spending Score (1-100)'] x = df1['Age'] y = df1['Spending Score (1-100)'] x = df1['Age'] y = df1['Annual Income (k$)'] plt.tight_layout() kmeans.inertia_ sse = {} for k in range(1, 10): kmeans = KMeans(n_clusters=k, max_iter=1000).fit(df1) sse[k] = kmeans.inertia_ sse plt.plot(list(sse.keys()), list(sse.values())) plt.xlabel('Number of cluster') plt.ylabel('SSE')
code
129007439/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.express as px df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 fig = px.scatter(df, x='Annual Income (k$)', y='Spending Score (1-100)', color='Gender') fig.show()
code
129007439/cell_22
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 kmeans = KMeans(n_clusters=4, max_iter=1000) kmeans.fit(df1) kmeans.cluster_centers_ clusters = kmeans.predict(df1) clusters[50:60] kmeans.inertia_
code
129007439/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 df2 = df['Gender'] df2
code
129007439/cell_27
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df_copy = df.copy() df1 = df.drop(['CustomerID', 'Gender'], axis=1) df1 model = KMeans(n_clusters=4, max_iter=1000) model.fit(df1) model.predict([[31, 17, 40]])
code
129007439/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df df.isna().sum() df.describe()
code
33099181/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') from sklearn.preprocessing import LabelEncoder train_data['MSZoning'] = LabelEncoder().fit_transform(train_data['MSZoning']) train_data['Street'] = LabelEncoder().fit_transform(train_data['Street']) train_data['Alley'] = train_data['Alley'].fillna('Not') train_data['Alley'] = LabelEncoder().fit_transform(train_data['Alley']) train_data['LotShape'] = LabelEncoder().fit_transform(train_data['LotShape']) train_data['LandContour'] = LabelEncoder().fit_transform(train_data['LandContour']) train_data['Utilities'] = LabelEncoder().fit_transform(train_data['Utilities']) train_data['LotConfig'] = LabelEncoder().fit_transform(train_data['LotConfig']) train_data['LandSlope'] = LabelEncoder().fit_transform(train_data['LandSlope']) train_data['Neighborhood'] = LabelEncoder().fit_transform(train_data['Neighborhood']) train_data['Condition1'] = LabelEncoder().fit_transform(train_data['Condition1']) train_data['Condition2'] = LabelEncoder().fit_transform(train_data['Condition2']) train_data['BldgType'] = LabelEncoder().fit_transform(train_data['BldgType']) train_data['HouseStyle'] = LabelEncoder().fit_transform(train_data['HouseStyle']) train_data['RoofStyle'] = LabelEncoder().fit_transform(train_data['RoofStyle']) train_data['RoofMatl'] = LabelEncoder().fit_transform(train_data['RoofMatl']) train_data['Exterior1st'] = LabelEncoder().fit_transform(train_data['Exterior1st']) train_data['Exterior2nd'] = LabelEncoder().fit_transform(train_data['Exterior2nd']) train_data['MasVnrType'] = train_data['MasVnrType'].fillna('Not') train_data['MasVnrType'] = LabelEncoder().fit_transform(train_data['MasVnrType']) train_data['ExterQual'] = LabelEncoder().fit_transform(train_data['ExterQual']) train_data['ExterCond'] = LabelEncoder().fit_transform(train_data['ExterCond']) train_data['Foundation'] = LabelEncoder().fit_transform(train_data['Foundation']) train_data['BsmtQual'] = train_data['BsmtQual'].fillna('Not') train_data['BsmtQual'] = LabelEncoder().fit_transform(train_data['BsmtQual']) train_data['BsmtCond'] = train_data['BsmtCond'].fillna('Not') train_data['BsmtCond'] = LabelEncoder().fit_transform(train_data['BsmtCond']) train_data['BsmtExposure'] = train_data['BsmtExposure'].fillna('Not') train_data['BsmtExposure'] = LabelEncoder().fit_transform(train_data['BsmtExposure']) train_data['BsmtFinType1'] = train_data['BsmtFinType1'].fillna('Not') train_data['BsmtFinType1'] = LabelEncoder().fit_transform(train_data['BsmtFinType1']) train_data['BsmtFinType2'] = train_data['BsmtFinType2'].fillna('Not') train_data['BsmtFinType2'] = LabelEncoder().fit_transform(train_data['BsmtFinType2']) train_data['Heating'] = LabelEncoder().fit_transform(train_data['Heating']) train_data['HeatingQC'] = LabelEncoder().fit_transform(train_data['HeatingQC']) train_data['CentralAir'] = LabelEncoder().fit_transform(train_data['CentralAir']) train_data['Electrical'] = train_data['Electrical'].fillna('SBrkr') train_data['Electrical'] = LabelEncoder().fit_transform(train_data['Electrical']) train_data['KitchenQual'] = LabelEncoder().fit_transform(train_data['KitchenQual']) train_data['Functional'] = LabelEncoder().fit_transform(train_data['Functional']) train_data['FireplaceQu'] = train_data['FireplaceQu'].fillna('Not') train_data['FireplaceQu'] = LabelEncoder().fit_transform(train_data['FireplaceQu']) train_data['GarageType'] = train_data['GarageType'].fillna('Not') train_data['GarageType'] = LabelEncoder().fit_transform(train_data['GarageType']) train_data['GarageFinish'] = train_data['GarageFinish'].fillna('Not') train_data['GarageFinish'] = LabelEncoder().fit_transform(train_data['GarageFinish']) train_data['GarageQual'] = train_data['GarageQual'].fillna('Not') train_data['GarageQual'] = LabelEncoder().fit_transform(train_data['GarageQual']) train_data['GarageCond'] = train_data['GarageCond'].fillna('Not') train_data['GarageCond'] = LabelEncoder().fit_transform(train_data['GarageCond']) train_data['PavedDrive'] = LabelEncoder().fit_transform(train_data['PavedDrive']) train_data['PoolQC'] = train_data['PoolQC'].fillna('Not') train_data['PoolQC'] = LabelEncoder().fit_transform(train_data['PoolQC']) train_data['Fence'] = train_data['Fence'].fillna('Not') train_data['Fence'] = LabelEncoder().fit_transform(train_data['Fence']) train_data['MiscFeature'] = train_data['MiscFeature'].fillna('Not') train_data['MiscFeature'] = LabelEncoder().fit_transform(train_data['MiscFeature']) train_data['SaleType'] = LabelEncoder().fit_transform(train_data['SaleType']) train_data['SaleCondition'] = LabelEncoder().fit_transform(train_data['SaleCondition']) train_data['LotFrontage'] = train_data['LotFrontage'].fillna(np.mean(train_data['LotFrontage'])) train_data['MasVnrArea'] = train_data['MasVnrArea'].fillna(np.mean(train_data['MasVnrArea'])) train_data['GarageYrBlt'] = train_data['GarageYrBlt'].fillna(np.mean(train_data['GarageYrBlt'])) test_data['Street'] = LabelEncoder().fit_transform(test_data['Street']) test_data['Alley'] = test_data['Alley'].fillna('Not') test_data['Alley'] = LabelEncoder().fit_transform(test_data['Alley']) test_data['LotShape'] = LabelEncoder().fit_transform(test_data['LotShape']) test_data['LandContour'] = LabelEncoder().fit_transform(test_data['LandContour']) test_data['LotConfig'] = LabelEncoder().fit_transform(test_data['LotConfig']) test_data['LandSlope'] = LabelEncoder().fit_transform(test_data['LandSlope']) test_data['Neighborhood'] = LabelEncoder().fit_transform(test_data['Neighborhood']) test_data['Condition1'] = LabelEncoder().fit_transform(test_data['Condition1']) test_data['Condition2'] = LabelEncoder().fit_transform(test_data['Condition2']) test_data['BldgType'] = LabelEncoder().fit_transform(test_data['BldgType']) test_data['HouseStyle'] = LabelEncoder().fit_transform(test_data['HouseStyle']) test_data['RoofStyle'] = LabelEncoder().fit_transform(test_data['RoofStyle']) test_data['RoofMatl'] = LabelEncoder().fit_transform(test_data['RoofMatl']) test_data['MasVnrType'] = test_data['MasVnrType'].fillna('Not') test_data['MasVnrType'] = LabelEncoder().fit_transform(test_data['MasVnrType']) test_data['ExterQual'] = LabelEncoder().fit_transform(test_data['ExterQual']) test_data['ExterCond'] = LabelEncoder().fit_transform(test_data['ExterCond']) test_data['Foundation'] = LabelEncoder().fit_transform(test_data['Foundation']) test_data['BsmtQual'] = test_data['BsmtQual'].fillna('Not') test_data['BsmtQual'] = LabelEncoder().fit_transform(test_data['BsmtQual']) test_data['BsmtCond'] = test_data['BsmtCond'].fillna('Not') test_data['BsmtCond'] = LabelEncoder().fit_transform(test_data['BsmtCond']) test_data['BsmtExposure'] = test_data['BsmtExposure'].fillna('Not') test_data['BsmtExposure'] = LabelEncoder().fit_transform(test_data['BsmtExposure']) test_data['BsmtFinType1'] = test_data['BsmtFinType1'].fillna('Not') test_data['BsmtFinType1'] = LabelEncoder().fit_transform(test_data['BsmtFinType1']) test_data['BsmtFinType2'] = test_data['BsmtFinType2'].fillna('Not') test_data['BsmtFinType2'] = LabelEncoder().fit_transform(test_data['BsmtFinType2']) test_data['Heating'] = LabelEncoder().fit_transform(test_data['Heating']) test_data['HeatingQC'] = LabelEncoder().fit_transform(test_data['HeatingQC']) test_data['CentralAir'] = LabelEncoder().fit_transform(test_data['CentralAir']) test_data['Electrical'] = test_data['Electrical'].fillna('SBrkr') test_data['Electrical'] = LabelEncoder().fit_transform(test_data['Electrical']) test_data['FireplaceQu'] = test_data['FireplaceQu'].fillna('Not') test_data['FireplaceQu'] = LabelEncoder().fit_transform(test_data['FireplaceQu']) test_data['GarageType'] = test_data['GarageType'].fillna('Not') test_data['GarageType'] = LabelEncoder().fit_transform(test_data['GarageType']) test_data['GarageFinish'] = test_data['GarageFinish'].fillna('Not') test_data['GarageFinish'] = LabelEncoder().fit_transform(test_data['GarageFinish']) test_data['GarageQual'] = test_data['GarageQual'].fillna('Not') test_data['GarageQual'] = LabelEncoder().fit_transform(test_data['GarageQual']) test_data['GarageCond'] = test_data['GarageCond'].fillna('Not') test_data['GarageCond'] = LabelEncoder().fit_transform(test_data['GarageCond']) test_data['PavedDrive'] = LabelEncoder().fit_transform(test_data['PavedDrive']) test_data['PoolQC'] = test_data['PoolQC'].fillna('Not') test_data['PoolQC'] = LabelEncoder().fit_transform(test_data['PoolQC']) test_data['Fence'] = test_data['Fence'].fillna('Not') test_data['Fence'] = LabelEncoder().fit_transform(test_data['Fence']) test_data['MiscFeature'] = test_data['MiscFeature'].fillna('Not') test_data['MiscFeature'] = LabelEncoder().fit_transform(test_data['MiscFeature']) test_data['SaleCondition'] = LabelEncoder().fit_transform(test_data['SaleCondition']) test_data['LotFrontage'] = test_data['LotFrontage'].fillna(np.mean(test_data['LotFrontage'])) test_data['MasVnrArea'] = test_data['MasVnrArea'].fillna(np.mean(test_data['MasVnrArea'])) test_data['GarageYrBlt'] = test_data['GarageYrBlt'].fillna(np.mean(test_data['GarageYrBlt'])) print(test_data.head(10))
code
33099181/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') print(train_data.head()) test_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') print(test_data.head())
code
33099181/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33099181/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test_data = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') from sklearn.preprocessing import LabelEncoder train_data['MSZoning'] = LabelEncoder().fit_transform(train_data['MSZoning']) train_data['Street'] = LabelEncoder().fit_transform(train_data['Street']) train_data['Alley'] = train_data['Alley'].fillna('Not') train_data['Alley'] = LabelEncoder().fit_transform(train_data['Alley']) train_data['LotShape'] = LabelEncoder().fit_transform(train_data['LotShape']) train_data['LandContour'] = LabelEncoder().fit_transform(train_data['LandContour']) train_data['Utilities'] = LabelEncoder().fit_transform(train_data['Utilities']) train_data['LotConfig'] = LabelEncoder().fit_transform(train_data['LotConfig']) train_data['LandSlope'] = LabelEncoder().fit_transform(train_data['LandSlope']) train_data['Neighborhood'] = LabelEncoder().fit_transform(train_data['Neighborhood']) train_data['Condition1'] = LabelEncoder().fit_transform(train_data['Condition1']) train_data['Condition2'] = LabelEncoder().fit_transform(train_data['Condition2']) train_data['BldgType'] = LabelEncoder().fit_transform(train_data['BldgType']) train_data['HouseStyle'] = LabelEncoder().fit_transform(train_data['HouseStyle']) train_data['RoofStyle'] = LabelEncoder().fit_transform(train_data['RoofStyle']) train_data['RoofMatl'] = LabelEncoder().fit_transform(train_data['RoofMatl']) train_data['Exterior1st'] = LabelEncoder().fit_transform(train_data['Exterior1st']) train_data['Exterior2nd'] = LabelEncoder().fit_transform(train_data['Exterior2nd']) train_data['MasVnrType'] = train_data['MasVnrType'].fillna('Not') train_data['MasVnrType'] = LabelEncoder().fit_transform(train_data['MasVnrType']) train_data['ExterQual'] = LabelEncoder().fit_transform(train_data['ExterQual']) train_data['ExterCond'] = LabelEncoder().fit_transform(train_data['ExterCond']) train_data['Foundation'] = LabelEncoder().fit_transform(train_data['Foundation']) train_data['BsmtQual'] = train_data['BsmtQual'].fillna('Not') train_data['BsmtQual'] = LabelEncoder().fit_transform(train_data['BsmtQual']) train_data['BsmtCond'] = train_data['BsmtCond'].fillna('Not') train_data['BsmtCond'] = LabelEncoder().fit_transform(train_data['BsmtCond']) train_data['BsmtExposure'] = train_data['BsmtExposure'].fillna('Not') train_data['BsmtExposure'] = LabelEncoder().fit_transform(train_data['BsmtExposure']) train_data['BsmtFinType1'] = train_data['BsmtFinType1'].fillna('Not') train_data['BsmtFinType1'] = LabelEncoder().fit_transform(train_data['BsmtFinType1']) train_data['BsmtFinType2'] = train_data['BsmtFinType2'].fillna('Not') train_data['BsmtFinType2'] = LabelEncoder().fit_transform(train_data['BsmtFinType2']) train_data['Heating'] = LabelEncoder().fit_transform(train_data['Heating']) train_data['HeatingQC'] = LabelEncoder().fit_transform(train_data['HeatingQC']) train_data['CentralAir'] = LabelEncoder().fit_transform(train_data['CentralAir']) train_data['Electrical'] = train_data['Electrical'].fillna('SBrkr') train_data['Electrical'] = LabelEncoder().fit_transform(train_data['Electrical']) train_data['KitchenQual'] = LabelEncoder().fit_transform(train_data['KitchenQual']) train_data['Functional'] = LabelEncoder().fit_transform(train_data['Functional']) train_data['FireplaceQu'] = train_data['FireplaceQu'].fillna('Not') train_data['FireplaceQu'] = LabelEncoder().fit_transform(train_data['FireplaceQu']) train_data['GarageType'] = train_data['GarageType'].fillna('Not') train_data['GarageType'] = LabelEncoder().fit_transform(train_data['GarageType']) train_data['GarageFinish'] = train_data['GarageFinish'].fillna('Not') train_data['GarageFinish'] = LabelEncoder().fit_transform(train_data['GarageFinish']) train_data['GarageQual'] = train_data['GarageQual'].fillna('Not') train_data['GarageQual'] = LabelEncoder().fit_transform(train_data['GarageQual']) train_data['GarageCond'] = train_data['GarageCond'].fillna('Not') train_data['GarageCond'] = LabelEncoder().fit_transform(train_data['GarageCond']) train_data['PavedDrive'] = LabelEncoder().fit_transform(train_data['PavedDrive']) train_data['PoolQC'] = train_data['PoolQC'].fillna('Not') train_data['PoolQC'] = LabelEncoder().fit_transform(train_data['PoolQC']) train_data['Fence'] = train_data['Fence'].fillna('Not') train_data['Fence'] = LabelEncoder().fit_transform(train_data['Fence']) train_data['MiscFeature'] = train_data['MiscFeature'].fillna('Not') train_data['MiscFeature'] = LabelEncoder().fit_transform(train_data['MiscFeature']) train_data['SaleType'] = LabelEncoder().fit_transform(train_data['SaleType']) train_data['SaleCondition'] = LabelEncoder().fit_transform(train_data['SaleCondition']) train_data['LotFrontage'] = train_data['LotFrontage'].fillna(np.mean(train_data['LotFrontage'])) train_data['MasVnrArea'] = train_data['MasVnrArea'].fillna(np.mean(train_data['MasVnrArea'])) train_data['GarageYrBlt'] = train_data['GarageYrBlt'].fillna(np.mean(train_data['GarageYrBlt'])) print(train_data.head(10))
code
33099181/cell_5
[ "text_plain_output_1.png" ]
code
89127515/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89127515/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots from urllib.request import urlopen from PIL import Image from math import sin,cos,pi import catboost as cb from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score import shap from pycaret.regression import * !pip install pycaret
code
333462/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10)
code
333462/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) clf.score(X_test, y_test)
code
333462/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') plt.legend() plt.show()
code
333462/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1 people2 = pd.merge(people, hstry, on='people_id', how='inner') people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64) people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64) people2['profit'] = people2['profit'].fillna('0').astype(np.int64) xfeats = list(people2.columns) xfeats.remove('people_id') xfeats.remove('profit') xfeats.remove('prof_label') xfeats.remove('positive_counts') xfeats.remove('negative_counts') print(xfeats) X, Y = (people2[xfeats], people2['prof_label'])
code
333462/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1 people2 = pd.merge(people, hstry, on='people_id', how='inner') people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64) people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64) people2['profit'] = people2['profit'].fillna('0').astype(np.int64) obs = ['group_1'] for i in range(1, 10): obs.append('char_' + str(i)) for x in obs: people2[x] = people2[x].fillna('type 0') people2[x] = people2[x].str.split(' ').str[1] bools = [] for i in range(10, 38): bools.append('char_' + str(i)) for x in list(set(obs).union(set(bools))): people2[x] = pd.to_numeric(people2[x]).astype(int) people2['date'] = pd.to_numeric(people2['date']).astype(int) for x in bools: plt.figure() fig, ax = plt.subplots() ax.set_xticks([1.5, 2.5, 3.5, 4.5]) ax.set_xticklabels(('Very\nGood', 'Good', 'Bad', 'Very\nBad')) fig.suptitle(x, fontsize=15) neg = people2[people2[x] == 0] pos = people2[people2[x] == 1] plt.hist([pos['prof_label'], neg['prof_label']], 4, range=(1, 5), normed=True, stacked=True, label=['Has Trait', 'No Trait']) plt.legend() plt.show()
code
333462/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') plt.legend() plt.show()
code
333462/cell_26
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) clf.score(X_test, y_test) print(zip(clf.predict(X_test), y_test))
code
333462/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1 people2 = pd.merge(people, hstry, on='people_id', how='inner') people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64) people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64) people2['profit'] = people2['profit'].fillna('0').astype(np.int64) xfeats = list(people2.columns) xfeats.remove('people_id') xfeats.remove('profit') xfeats.remove('prof_label') xfeats.remove('positive_counts') xfeats.remove('negative_counts') X, Y = (people2[xfeats], people2['prof_label']) people2[['prof_label', 'pred']].sample(20)
code
333462/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) plt.figure() plt.hist(hstry['prof_label'], 4, range=(1, 5)) plt.show()
code
333462/cell_24
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "text_plain_output_5.png", "text_plain_output_15.png", "image_output_17.png", "text_plain_output_9.png", "image_output_14.png", "image_output_28.png", "text_plain_output_20.png", "image_output_23.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "image_output_18.png", "image_output_21.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "image_output_20.png", "text_plain_output_18.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_22.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_16.png", "image_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "image_output_27.png", "image_output_6.png", "text_plain_output_23.png", "image_output_12.png", "text_plain_output_28.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_26.png" ]
from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) print(clf.feature_importances_)
code
333462/cell_22
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import auc from sklearn.cross_validation import train_test_split, cross_val_score
code
333462/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10)
code
333462/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='outer') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10) hstry['profit'].describe()
code
128030251/cell_13
[ "text_html_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X
code
128030251/cell_9
[ "text_plain_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X
code
128030251/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data
code
128030251/cell_34
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.neural_network import MLPClassifier import seaborn as sns clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) sns.set(rc={'figure.figsize': (5, 3)}) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) sns.set(rc={'figure.figsize': (5, 3)}) clf = RandomForestClassifier(max_depth=2000) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print('Confusion Matrix :') sns.set(rc={'figure.figsize': (5, 3)}) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
code
128030251/cell_23
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn.metrics import classification_report clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) print(clf.score(X_test, y_test))
code
128030251/cell_33
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.neural_network import MLPClassifier clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = RandomForestClassifier(max_depth=2000) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) print(clf.score(X_test, y_test))
code
128030251/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy
code
128030251/cell_29
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.neural_network import MLPClassifier import seaborn as sns clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) sns.set(rc={'figure.figsize': (5, 3)}) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print('Confusion Matrix :') sns.set(rc={'figure.figsize': (5, 3)}) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
code
128030251/cell_2
[ "text_plain_output_1.png" ]
pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
code
128030251/cell_11
[ "text_plain_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data
code
128030251/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import networkx as nx import pandas as pd import matplotlib.pyplot as plt import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.decomposition import PCA import os import networkx as nx import numpy as np import pandas as pd import networkx as nx import gensim import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split import pickle from sklearn.metrics import classification_report from sklearn import tree import matplotlib.pyplot as plt from sklearn.neural_network import MLPClassifier from sklearn.ensemble import StackingClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from tpot import TPOTClassifier from sklearn.ensemble import ExtraTreesClassifier from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import seaborn as sns from imblearn.over_sampling import SMOTE
code
128030251/cell_7
[ "text_plain_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data
code
128030251/cell_18
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data sc = StandardScaler() X = sc.fit_transform(X) pca = PCA(n_components=182) X_pca = pca.fit_transform(X) explained_variance = pca.explained_variance_ratio_ explained_variance pca = PCA(n_components=170) X_pca = pca.fit_transform(X) comp = [] for i in range(1, 171): comp.append('comp' + str(i)) comp data_preprocessed = pd.DataFrame(data=X_pca, columns=comp) data_preprocessed.shape
code
128030251/cell_32
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.neural_network import MLPClassifier clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = RandomForestClassifier(max_depth=2000) clf.fit(X_train, y_train)
code
128030251/cell_28
[ "image_output_1.png" ]
from sklearn import tree from sklearn.metrics import classification_report from sklearn.neural_network import MLPClassifier clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) print(clf.score(X_test, y_test))
code
128030251/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]
code
128030251/cell_15
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data sc = StandardScaler() X = sc.fit_transform(X) pca = PCA(n_components=182) X_pca = pca.fit_transform(X) explained_variance = pca.explained_variance_ratio_ explained_variance plt.rcParams['figure.figsize'] = (12, 6) fig, ax = plt.subplots() xi = np.arange(0, 182, step=1) y = np.cumsum(explained_variance) plt.ylim(0.0, 1.1) plt.plot(xi, y, marker='o', linestyle='--', color='b') plt.xlabel('Number of Components') plt.xticks(np.arange(0, 182, step=5)) plt.ylabel('Cumulative variance (%)') plt.title('The number of components needed to explain variance') plt.axhline(y=0.98, color='r', linestyle='-') plt.text(0.5, 0.85, '98% cut-off threshold', color='red', fontsize=16) ax.grid(axis='x') plt.show()
code
128030251/cell_38
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.neural_network import MLPClassifier clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = RandomForestClassifier(max_depth=2000) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = GradientBoostingClassifier(n_estimators=300, max_depth=300, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) print(clf.score(X_test, y_test))
code
128030251/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns
code
128030251/cell_17
[ "text_html_output_1.png" ]
comp = [] for i in range(1, 171): comp.append('comp' + str(i)) comp
code
128030251/cell_24
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix import seaborn as sns clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print('Confusion Matrix :') sns.set(rc={'figure.figsize': (5, 3)}) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
code
128030251/cell_14
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data sc = StandardScaler() X = sc.fit_transform(X) pca = PCA(n_components=182) X_pca = pca.fit_transform(X) explained_variance = pca.explained_variance_ratio_ explained_variance
code
128030251/cell_22
[ "text_html_output_1.png" ]
from sklearn import tree clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train)
code
128030251/cell_10
[ "text_plain_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature'])
code
128030251/cell_27
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.metrics import classification_report from sklearn.neural_network import MLPClassifier clf = tree.DecisionTreeClassifier(max_depth=400, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) clf = MLPClassifier(hidden_layer_sizes=(50, 100, 200, 400, 800, 1600, 3200, 1600, 800, 400, 200, 100, 50), max_iter=3000, random_state=42) clf.fit(X_train, y_train)
code
128030251/cell_12
[ "text_html_output_1.png" ]
from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy final_copy.drop(['lncRNA', 'miRNA'], axis=1, inplace=True) final_copy X = final_copy.drop('label', axis=1) vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data X.drop(list(vif_data[vif_data['VIF'] > vif_data['VIF'].mean()]['feature']), axis=1, inplace=True) X vif_data = pd.DataFrame() vif_data['feature'] = X.columns vif_data['VIF'] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] vif_data vif_data[vif_data['VIF'] > 20]
code
128030251/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) final_data = pd.read_csv('/kaggle/input/new-01-05-2023-update-1/new_final_updated_dataset.csv') final_data.columns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score final_data.drop('Unnamed: 0', axis=1, inplace=True) final_data final_copy = final_data.copy(deep=True) final_copy
code