path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106202407/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') (train.shape, test.shape)
code
106202407/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') (train.shape, test.shape) train_X = train.copy() train_Y = train_X.pop('Transported') def displayAllCateFeatInfo(df): pass def splitCabinForNewFeatures(df): my_df = df.copy() split_cabin_df = my_df.Cabin.str.split('/', expand=True) my_df['CabinDeck'] = split_cabin_df[0] my_df['CabinSide'] = split_cabin_df[2] my_df.pop('Cabin') return my_df def splitNameToGenerateFamilyName(df): my_df = df.copy() split_name_df = my_df.Name.str.split(' ', expand=True) my_df['FamilyName'] = split_name_df[1] my_df.pop('Name') return my_df displayAllCateFeatInfo(splitNameToGenerateFamilyName(train_X))
code
105189461/cell_4
[ "text_plain_output_1.png" ]
score1 = 100 score2 = 145.9 type(score1)
code
105189461/cell_6
[ "text_plain_output_1.png" ]
score1 = 100 score2 = 145.9 total_score = score1 + score2 print(total_score)
code
105189461/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
score1 = 100 score2 = 145.9 total_score = score1 + score2 print('total score of tom is', total_score)
code
105189461/cell_10
[ "text_plain_output_1.png" ]
sale1 = input('sales in store1') sale2 = input('sales in store2')
code
105189461/cell_5
[ "text_plain_output_1.png" ]
score1 = 100 score2 = 145.9 type(score2)
code
74055897/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) data.columns def get_num_cat_features(type_features, data): return data.select_dtypes(include=type_features) numerics = ['int64', 'float64'] newdf_num = get_num_cat_features(numerics, data) newdf_cat = data.select_dtypes(['object']) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test.drop(columns=['Id', 'Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu'], inplace=True) test.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) test.fillna(test.mean()) numericstest = ['int64', 'float64'] newdf_numtest = get_num_cat_features(numerics, test) test_num = get_num_cat_features(numerics, test) newdf_cattest = test.select_dtypes(['object']) trainTest = newdf_cat.append(newdf_cattest) def encode_labels(data): encoded_categoric_train_set = data.copy() for c in data.columns: data[c] = data[c].astype('category') encoded_categoric_train_set[c] = data[c].cat.codes return encoded_categoric_train_set encoded_categoric_train_set = encode_labels(trainTest) newdf_cat = encoded_categoric_train_set[:1460] test = encoded_categoric_train_set[1460:] test.shape
code
74055897/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.info()
code
74055897/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) data.columns def get_num_cat_features(type_features, data): return data.select_dtypes(include=type_features) numerics = ['int64', 'float64'] newdf_num = get_num_cat_features(numerics, data) newdf_cat = data.select_dtypes(['object']) newdf_num.shape newdf_num.shape
code
74055897/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False)
code
74055897/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) plt.figure(figsize=(10, 10)) sns.heatmap(data.corr())
code
74055897/cell_7
[ "text_plain_output_1.png" ]
import missingno import pandas as pd data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingno.bar(data)
code
74055897/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) data.columns def get_num_cat_features(type_features, data): return data.select_dtypes(include=type_features) numerics = ['int64', 'float64'] newdf_num = get_num_cat_features(numerics, data) newdf_cat = data.select_dtypes(['object']) newdf_num.shape
code
74055897/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20)
code
74055897/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) y
code
74055897/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) data.columns
code
74055897/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] data.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) y = data.SalePrice data.drop(columns=['SalePrice'], inplace=True) data.columns def get_num_cat_features(type_features, data): return data.select_dtypes(include=type_features) numerics = ['int64', 'float64'] newdf_num = get_num_cat_features(numerics, data) newdf_cat = data.select_dtypes(['object']) test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') test.drop(columns=['Id', 'Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu'], inplace=True) test.drop(columns=['GarageArea', 'GrLivArea', 'GarageYrBlt'], inplace=True) test.fillna(test.mean()) numericstest = ['int64', 'float64'] newdf_numtest = get_num_cat_features(numerics, test) test_num = get_num_cat_features(numerics, test) newdf_cattest = test.select_dtypes(['object']) trainTest = newdf_cat.append(newdf_cattest) test.info()
code
74055897/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False) data.kurt().sort_values(ascending=False) missingVals = data.isnull().mean() * 100 missingVals.sort_values(ascending=False).head(20) data.drop(columns=['Alley', 'Fence', 'PoolQC', 'MiscFeature', 'FireplaceQu', 'Id'], inplace=True) corr = data.corr() kot = corr[np.abs(corr) >= 0.7] plt.figure(figsize=(12, 8)) sns.heatmap(kot, cmap='Reds')
code
74055897/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') y = data['SalePrice'] data.drop(columns=['SalePrice'], inplace=True) data.skew().sort_values(ascending=False)
code
128000744/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd pd.set_option('display.max_columns', None) df = pd.read_csv('/kaggle/input/road-accidents-rome-june2022/426c71f0-7181-417a-b149-33ba943382b0.csv', sep=';', encoding='latin-1') df.columns
code
128000744/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd pd.set_option('display.max_columns', None) df = pd.read_csv('/kaggle/input/road-accidents-rome-june2022/426c71f0-7181-417a-b149-33ba943382b0.csv', sep=';', encoding='latin-1') df.columns df[['NUM_MORTI']].sum()
code
33101088/cell_6
[ "image_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Activation, Flatten from keras.models import Sequential, load_model from keras.preprocessing.image import ImageDataGenerator from sklearn import preprocessing from sklearn.model_selection import StratifiedShuffleSplit import numpy as np import os import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') X_train = [] Y_train = [] X_test = [] for index, row in train_df.iterrows(): X_train.append(row.values[1:].reshape((28, 28, 1))) Y_train.append(row['label']) for index, row in test_df.iterrows(): X_test.append(row.values.reshape((28, 28, 1))) X_train = np.array(X_train) / 255.0 Y_train = np.array(Y_train) X_test = np.array(X_test) / 255.0 lb = preprocessing.LabelBinarizer() lb.fit(Y_train) Y_train = lb.transform(Y_train) sss = StratifiedShuffleSplit(10, 0.2, random_state=15) for train_idx, val_idx in sss.split(X_train, Y_train): X_train_tmp, X_val = (X_train[train_idx], X_train[val_idx]) Y_train_tmp, Y_val = (Y_train[train_idx], Y_train[val_idx]) X_train = X_train_tmp Y_train = Y_train_tmp img_size = (28, 28, 1) n_classes = 10 if os.path.exists('keras_model.h5'): model = load_model('keras_model.h5') else: model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=img_size, kernel_initializer='normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (5, 5), kernel_initializer='normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) model.fit_generator(datagen.flow(X_train, Y_train, batch_size=1000), epochs=20, validation_data=(X_val, Y_val), steps_per_epoch=X_train.shape[0] / 1000, verbose=1) score, acc = model.evaluate(X_val, Y_val, verbose=1) print('\nLoss:', score, '\nAcc:', acc) model.save('keras_model.h5')
code
33101088/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import StratifiedShuffleSplit from sklearn import preprocessing from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.preprocessing.image import ImageDataGenerator
code
33101088/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.initializers import Ones, Zeros from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Input, Conv2DTranspose from keras.models import Model from keras.models import Sequential, load_model from keras.preprocessing.image import ImageDataGenerator from sklearn import preprocessing from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np import os import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') X_train = [] Y_train = [] X_test = [] for index, row in train_df.iterrows(): X_train.append(row.values[1:].reshape((28, 28, 1))) Y_train.append(row['label']) for index, row in test_df.iterrows(): X_test.append(row.values.reshape((28, 28, 1))) X_train = np.array(X_train) / 255.0 Y_train = np.array(Y_train) X_test = np.array(X_test) / 255.0 lb = preprocessing.LabelBinarizer() lb.fit(Y_train) Y_train = lb.transform(Y_train) sss = StratifiedShuffleSplit(10, 0.2, random_state=15) for train_idx, val_idx in sss.split(X_train, Y_train): X_train_tmp, X_val = (X_train[train_idx], X_train[val_idx]) Y_train_tmp, Y_val = (Y_train[train_idx], Y_train[val_idx]) X_train = X_train_tmp Y_train = Y_train_tmp img_size = (28, 28, 1) n_classes = 10 if os.path.exists('keras_model.h5'): model = load_model('keras_model.h5') else: model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=img_size, kernel_initializer='normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (5, 5), kernel_initializer='normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) model.fit_generator(datagen.flow(X_train, Y_train, batch_size=1000), epochs=20, validation_data=(X_val, Y_val), steps_per_epoch=X_train.shape[0] / 1000, verbose=1) score, acc = model.evaluate(X_val, Y_val, verbose=1) model.save('keras_model.h5') Y_test = model.predict(X_test) Y_test = lb.inverse_transform(Y_test) Y_test = [[y] for y in Y_test] index = [[i] for i in range(1, X_test.shape[0] + 1)] output_np = np.concatenate((index, Y_test), axis=1) output_df = pd.DataFrame(data=output_np, columns=['ImageId', 'Label']) output_df.to_csv('out.csv', index=False) Y_train_label = lb.inverse_transform(Y_train) Y_train_label[:30] class_indices = [3, 5, 0, 22, 1, 9, 2, 28, 4, 7] from keras import backend as K K.set_learning_phase(1) import tensorflow as tf model = load_model('keras_model.h5') layer_dict = dict([(layer.name, layer) for layer in model.layers]) def deprocess_image(x): x -= x.mean() x /= x.std() + 1e-05 x *= 0.1 x += 0.5 x = np.clip(x, 0, 1) x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x from keras.layers import Input, Conv2DTranspose from keras.models import Model from keras.initializers import Ones, Zeros class SaliencyMask(object): def __init__(self, model, output_index=0): pass def get_mask(self, input_image): pass def get_smoothed_mask(self, input_image, stdev_spread=0.2, nsamples=50): stdev = stdev_spread * (np.max(input_image) - np.min(input_image)) total_gradients = np.zeros_like(input_image, dtype=np.float64) for i in range(nsamples): noise = np.random.normal(0, stdev, input_image.shape) x_value_plus_noise = input_image + noise total_gradients += self.get_mask(x_value_plus_noise) return total_gradients / nsamples class GradientSaliency(SaliencyMask): def __init__(self, model, output_index=0): input_tensors = [model.input] gradients = model.optimizer.get_gradients(model.output[0][output_index], model.input) self.compute_gradients = K.function(inputs=input_tensors, outputs=gradients) def get_mask(self, input_image): x_value = np.expand_dims(input_image, axis=0) gradients = self.compute_gradients([x_value])[0][0] return gradients class VisualBackprop(SaliencyMask): def __init__(self, model, output_index=0): inps = [model.input] outs = [layer.output for layer in model.layers] self.forward_pass = K.function(inps, outs) self.model = model def get_mask(self, input_image): x_value = np.expand_dims(input_image, axis=0) visual_bpr = None layer_outs = self.forward_pass([x_value, 0]) for i in range(len(self.model.layers) - 1, -1, -1): if 'Conv2D' in str(type(self.model.layers[i])): layer = np.mean(layer_outs[i], axis=3, keepdims=True) layer = layer - np.min(layer) layer = layer / (np.max(layer) - np.min(layer) + 1e-06) if visual_bpr is not None: if visual_bpr.shape != layer.shape: visual_bpr = self._deconv(visual_bpr) visual_bpr = visual_bpr * layer else: visual_bpr = layer return visual_bpr[0] def _deconv(self, feature_map): x = Input(shape=(None, None, 1)) y = Conv2DTranspose(filters=1, kernel_size=(3, 3), strides=(2, 2), padding='same', kernel_initializer=Ones(), bias_initializer=Zeros())(x) deconv_model = Model(inputs=[x], outputs=[y]) inps = [deconv_model.input] outs = [deconv_model.layers[-1].output] deconv_func = K.function(inps, outs) return deconv_func([feature_map, 0])[0] Y_train_label = lb.inverse_transform(Y_train) fig, ax = plt.subplots(10, 2, figsize=(5, 25)) i = -1 for c in class_indices: img = np.array(X_train[c]) i = i + 1 vanilla = GradientSaliency(model, Y_train_label[c]) mask = vanilla.get_mask(img) filter_mask = (mask > 0.0).reshape((28, 28)) smooth_mask = vanilla.get_smoothed_mask(img) filter_smoothed_mask = (smooth_mask > 0.0).reshape((28, 28)) fig.subplots_adjust(hspace=0.8) ax[i, 0].imshow(img.reshape((28, 28)), cmap='gray') cax = ax[i, 1].imshow(mask.reshape((28, 28)), cmap='jet') fig.colorbar(cax, ax=ax[i, 1])
code
327813/cell_6
[ "text_plain_output_1.png" ]
#machine learning train_data = train_df.values test_data = test_df.values X_train = train_data[:,1:] y_train = train_data[:,0] X_test = test_data[:,1:] idx = test_data[:,0] #random forest classifier rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) score_rfc = rfc.score(X_train, y_train) out_rfc = rfc.predict(X_test) print ("random forest classifier score: %f", score_rfc) #logistic regression logreg = LogisticRegression() logreg.fit(X_train, y_train) score_logreg = logreg.score(X_train, y_train) out_logreg = logreg.predict(X_test) print ("logistic regression score: %f", score_logreg) #SVM svc = SVC() svc.fit(X_train, y_train) score_svc = svc.score(X_train, y_train) out_svc = svc.predict(X_test) print ("SVM score: %f", score_svc) #knn classifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) score_knn = knn.score(X_train, y_train) out_knn = knn.predict(X_test) print ("knn score: %f", score_knn) #write out predictions #predictions_file = open("titanic_pred.csv", "wb") #open_file_object = csv.writer(predictions_file) #open_file_object.writerow(["PassengerId","Survived"]) #open_file_object.writerows(zip(idx, out_rfc)) #predictions_file.close()
code
327813/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd if __name__ == '__main__': train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.head() train_df.info() test_df.info()
code
105198337/cell_21
[ "text_html_output_2.png", "text_plain_output_1.png" ]
lr = create_model('lr') tuned_lr = tune_model(lr) plot_model(tuned_lr)
code
105198337/cell_13
[ "text_plain_output_1.png" ]
rf = create_model('rf') tuned_rf = tune_model(rf) evaluate_model(tuned_rf)
code
105198337/cell_9
[ "text_html_output_2.png" ]
top_model = compare_models(sort='AUC', fold=5, n_select=3)
code
105198337/cell_4
[ "text_html_output_2.png", "text_plain_output_1.png" ]
!pip install --pre pycaret
code
105198337/cell_23
[ "image_png_output_1.png" ]
lr = create_model('lr') tuned_lr = tune_model(lr) predict_model(tuned_lr)
code
105198337/cell_20
[ "text_html_output_2.png" ]
lr = create_model('lr') tuned_lr = tune_model(lr)
code
105198337/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/text-sim-out/Output_0907.csv') data.dtypes
code
105198337/cell_11
[ "text_plain_output_1.png" ]
rf = create_model('rf') tuned_rf = tune_model(rf)
code
105198337/cell_19
[ "image_png_output_1.png" ]
lr = create_model('lr')
code
105198337/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105198337/cell_7
[ "image_png_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/text-sim-out/Output_0907.csv') data.dtypes data.columns
code
105198337/cell_18
[ "text_html_output_2.png", "text_html_output_1.png" ]
top_model = compare_models(sort='AUC', fold=5, n_select=3) top_model
code
105198337/cell_8
[ "text_html_output_2.png", "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/text-sim-out/Output_0907.csv') data.dtypes data.columns from pycaret.classification import * setup(data=data[['label', 'bert_score', 'jarowinkler', 'levenshtein', 'ratcliff']], target='label')
code
105198337/cell_15
[ "text_html_output_2.png" ]
catboost = create_model('catboost') tuned_catboost = tune_model(catboost)
code
105198337/cell_16
[ "text_html_output_2.png" ]
catboost = create_model('catboost') tuned_catboost = tune_model(catboost) interpret_model(tuned_catboost)
code
105198337/cell_3
[ "text_html_output_2.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/text-sim-out/Output_0907.csv') data
code
105198337/cell_17
[ "text_html_output_2.png", "text_plain_output_1.png" ]
catboost = create_model('catboost') tuned_catboost = tune_model(catboost) evaluate_model(tuned_catboost)
code
105198337/cell_24
[ "text_plain_output_1.png" ]
rf = create_model('rf') tuned_rf = tune_model(rf) catboost = create_model('catboost') tuned_catboost = tune_model(catboost) lr = create_model('lr') tuned_lr = tune_model(lr) blend = blend_models(estimator_list=[tuned_lr, tuned_catboost, tuned_rf])
code
105198337/cell_14
[ "text_html_output_2.png", "text_plain_output_1.png" ]
catboost = create_model('catboost')
code
105198337/cell_22
[ "image_output_1.png" ]
lr = create_model('lr') tuned_lr = tune_model(lr) evaluate_model(tuned_lr)
code
105198337/cell_10
[ "text_html_output_1.png" ]
rf = create_model('rf')
code
105198337/cell_12
[ "text_plain_output_1.png" ]
rf = create_model('rf') tuned_rf = tune_model(rf) predict_model(tuned_rf)
code
128034494/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns
code
128034494/cell_25
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) new_features.head(3)
code
128034494/cell_23
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum()
code
128034494/cell_79
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) target.value_counts() col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() new_target = le.fit_transform(target) new_target from sklearn.metrics import roc_auc_score from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report def get_metrics(classifier, Xvalid, yvalid): """Function to Get the metrics of the given Classifier""" y_train_pred = classifier.predict_proba(X_train)[:, 1] y_valid_pred = classifier.predict_proba(Xvalid)[:, 1] y_valid_predict = classifier.predict(Xvalid) cm = confusion_matrix(yvalid, y_valid_predict) dist = ConfusionMatrixDisplay(cm) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier() dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier(max_depth=4, criterion='gini', min_samples_split=2, min_samples_leaf=4) dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) test[col_str] = si.transform(test[col_str]) test[col_float] = knn.transform(test[col_float]) test_encoded = pd.DataFrame(encoder.fit_transform(test[col_str])) new_df_test = pd.DataFrame(test_encoded) new_df_test[col_float] = test[col_float] prediction = dc.predict(new_df_test) prediction prediction_decode = le.inverse_transform(prediction) prediction_decode submission = pd.DataFrame(columns=['id', 'Made_Purchase']) submission['id'] = [i for i in range(len(prediction_decode))] submission['Made_Purchase'] = prediction_decode submission.head()
code
128034494/cell_33
[ "text_html_output_1.png" ]
print('training set shape: ', X_train.shape, y_train.shape) print('Validation set shape: ', X_valid.shape, y_valid.shape)
code
128034494/cell_74
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) test[col_str] = si.transform(test[col_str]) test[col_float] = knn.transform(test[col_float]) test_encoded = pd.DataFrame(encoder.fit_transform(test[col_str])) new_df_test = pd.DataFrame(test_encoded) new_df_test[col_float] = test[col_float] new_df_test.head()
code
128034494/cell_76
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report from sklearn.metrics import roc_auc_score from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) from sklearn.metrics import roc_auc_score from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report def get_metrics(classifier, Xvalid, yvalid): """Function to Get the metrics of the given Classifier""" y_train_pred = classifier.predict_proba(X_train)[:, 1] y_valid_pred = classifier.predict_proba(Xvalid)[:, 1] y_valid_predict = classifier.predict(Xvalid) cm = confusion_matrix(yvalid, y_valid_predict) dist = ConfusionMatrixDisplay(cm) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier() dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier(max_depth=4, criterion='gini', min_samples_split=2, min_samples_leaf=4) dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) test[col_str] = si.transform(test[col_str]) test[col_float] = knn.transform(test[col_float]) test_encoded = pd.DataFrame(encoder.fit_transform(test[col_str])) new_df_test = pd.DataFrame(test_encoded) new_df_test[col_float] = test[col_float] prediction = dc.predict(new_df_test) prediction
code
128034494/cell_26
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) new_features[col_float] = features[col_float] new_features.head()
code
128034494/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() train.info()
code
128034494/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.head(3)
code
128034494/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) print('String columns names : \n', col_str) print() print('Float columns names : \n', col_float)
code
128034494/cell_59
[ "text_plain_output_1.png" ]
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report from sklearn.metrics import roc_auc_score from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() from sklearn.metrics import roc_auc_score from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report def get_metrics(classifier, Xvalid, yvalid): """Function to Get the metrics of the given Classifier""" y_train_pred = classifier.predict_proba(X_train)[:, 1] y_valid_pred = classifier.predict_proba(Xvalid)[:, 1] y_valid_predict = classifier.predict(Xvalid) cm = confusion_matrix(yvalid, y_valid_predict) dist = ConfusionMatrixDisplay(cm) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier() dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier(max_depth=4, criterion='gini', min_samples_split=2, min_samples_leaf=4) dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid)
code
128034494/cell_28
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) target.value_counts() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() new_target = le.fit_transform(target) new_target
code
128034494/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape
code
128034494/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) target.value_counts()
code
128034494/cell_77
[ "text_html_output_1.png" ]
from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) target.value_counts() col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() new_target = le.fit_transform(target) new_target from sklearn.metrics import roc_auc_score from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report def get_metrics(classifier, Xvalid, yvalid): """Function to Get the metrics of the given Classifier""" y_train_pred = classifier.predict_proba(X_train)[:, 1] y_valid_pred = classifier.predict_proba(Xvalid)[:, 1] y_valid_predict = classifier.predict(Xvalid) cm = confusion_matrix(yvalid, y_valid_predict) dist = ConfusionMatrixDisplay(cm) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier() dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier(max_depth=4, criterion='gini', min_samples_split=2, min_samples_leaf=4) dc.fit(X_train, y_train) get_metrics(dc, X_valid, y_valid) test[col_str] = si.transform(test[col_str]) test[col_float] = knn.transform(test[col_float]) test_encoded = pd.DataFrame(encoder.fit_transform(test[col_str])) new_df_test = pd.DataFrame(test_encoded) new_df_test[col_float] = test[col_float] prediction = dc.predict(new_df_test) prediction prediction_decode = le.inverse_transform(prediction) prediction_decode
code
128034494/cell_46
[ "text_html_output_1.png" ]
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report from sklearn.metrics import roc_auc_score from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() from sklearn.metrics import roc_auc_score from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix, classification_report def get_metrics(classifier, Xvalid, yvalid): """Function to Get the metrics of the given Classifier""" y_train_pred = classifier.predict_proba(X_train)[:, 1] y_valid_pred = classifier.predict_proba(Xvalid)[:, 1] y_valid_predict = classifier.predict(Xvalid) cm = confusion_matrix(yvalid, y_valid_predict) dist = ConfusionMatrixDisplay(cm) from sklearn.tree import DecisionTreeClassifier dc = DecisionTreeClassifier() dc.fit(X_train, y_train) print('Training Score : ', dc.score(X_train, y_train)) get_metrics(dc, X_valid, y_valid)
code
128034494/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() plt.figure(figsize=(20, 10)) sns.heatmap(var, annot=True)
code
128034494/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum()
code
128034494/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum()
code
128034494/cell_71
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.impute import KNNImputer from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv') test = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/test_data_v2.csv') train.shape train.columns train.isna().sum() var = train.corr() target = train['Made_Purchase'] features = train.drop('Made_Purchase', axis=1) target.value_counts() col_float = [] col_str = [] for i in features.columns: if features[i].dtype == 'float64': col_float.append(i) else: col_str.append(i) from sklearn.impute import SimpleImputer si = SimpleImputer(strategy='most_frequent') features[col_str] = si.fit_transform(features[col_str]) features.isna().sum() from sklearn.impute import KNNImputer knn = KNNImputer(n_neighbors=7, weights='distance') features[col_float] = knn.fit_transform(features[col_float]) features.isna().sum() from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) encoded = encoder.fit_transform(features[col_str]) new_features = pd.DataFrame(encoded) new_features[col_float] = features[col_float] from sklearn.preprocessing import LabelEncoder le = LabelEncoder() new_target = le.fit_transform(target) new_target from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=3, min_samples_leaf=2, min_samples_split=4) rf.fit(new_features, new_target)
code
33116653/cell_9
[ "image_output_1.png" ]
import json import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) print(results)
code
33116653/cell_11
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def plotTaskTraining(task): """ Plots the first train and test pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap(['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) nTrainingCases = len(task['train']) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15, 15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) plotTaskTraining(task)
code
33116653/cell_7
[ "text_plain_output_1.png" ]
import json import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename)
code
33116653/cell_10
[ "text_plain_output_1.png" ]
import json import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) count = 0 for key, value in results.items(): if value: count += 1 print('Proportion of training examples with the same grid size: ' + str(round(count / len(results), 2)))
code
33116653/cell_5
[ "text_plain_output_1.png" ]
import json import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename)
code
129006229/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates() lang.info()
code
129006229/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang.info()
code
129006229/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates() import matplotlib.pyplot as plt subset_lang = lang[['LoR', 'Edu.day', 'Speaking']] subset_lang = subset_lang.dropna() plt.colorbar(label='Formal Education Days (Edu.day)') import seaborn as sns subset_lang = lang[['Sex', 'Speaking']] subset_lang = lang[['AaA', 'LoR']] subset_lang = lang[['Enroll', 'Speaking', 'Edu.day']] sns.scatterplot(data=subset_lang, x='Enroll', y='Speaking', hue='Edu.day') plt.xlabel('Enrollment Duration (Enroll)') plt.ylabel('Speaking Proficiency Score') plt.title('Relationship between Enroll, Edu.day, and Speaking Proficiency Score') plt.show()
code
129006229/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates() import matplotlib.pyplot as plt subset_lang = lang[['LoR', 'Edu.day', 'Speaking']] subset_lang = subset_lang.dropna() plt.scatter(subset_lang['LoR'], subset_lang['Speaking'], c=subset_lang['Edu.day'], cmap='viridis') plt.xlabel('Length of Residence (LoR)') plt.ylabel('Speaking Proficiency Score') plt.colorbar(label='Formal Education Days (Edu.day)') plt.title('Relationship between LoR, Edu.day, and Speaking Proficiency Score') plt.show()
code
129006229/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129006229/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.info()
code
129006229/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates()
code
129006229/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang
code
129006229/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates() import matplotlib.pyplot as plt subset_lang = lang[['LoR', 'Edu.day', 'Speaking']] subset_lang = subset_lang.dropna() plt.colorbar(label='Formal Education Days (Edu.day)') import seaborn as sns subset_lang = lang[['Sex', 'Speaking']] subset_lang = lang[['AaA', 'LoR']] plt.scatter(subset_lang['AaA'], subset_lang['LoR']) plt.xlabel('Age at Arrival (AaA)') plt.ylabel('Length of Residence (LoR)') plt.title('Relationship between AaA and LoR') plt.show()
code
129006229/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang['morph'] = lang['morph'].fillna(0.0501) lang['new_feat'] = lang['new_feat'].fillna(14.4) lang['new_sounds'] = lang['new_sounds'].fillna(20.1) lang.drop_duplicates() import matplotlib.pyplot as plt subset_lang = lang[['LoR', 'Edu.day', 'Speaking']] subset_lang = subset_lang.dropna() plt.colorbar(label='Formal Education Days (Edu.day)') import seaborn as sns subset_lang = lang[['Sex', 'Speaking']] sns.boxplot(x='Sex', y='Speaking', data=subset_lang) plt.xlabel('Gender') plt.ylabel('Speaking Proficiency Score') plt.title('Distribution of Speaking Proficiency Scores by Gender') plt.show()
code
129006229/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lang = pd.read_csv('/kaggle/input/language-learning/language.csv', encoding='iso-8859-1') lang lang.describe()
code
17121510/cell_9
[ "image_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter import matplotlib.pyplot as plt datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl') LATENT_DIM1 = 16 * 8 LATENT_DIM2 = 16 vae = DenseLadderVAE(input_shape=(100, 28 * 28), latent_dim1=LATENT_DIM1, latent_dim2=LATENT_DIM2) trainer = ModelTrainer(vae, datagen, loss_fn='normal', lr=5e-05, decay=0.0001, beta=1) trainer.fit(100, 2000, warm_up=True) import matplotlib.pyplot as plt plotter = VAEPlotter(trainer, datagen) plotter.grid_plot()
code
17121510/cell_6
[ "image_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl') LATENT_DIM1 = 16 * 8 LATENT_DIM2 = 16 vae = DenseLadderVAE(input_shape=(100, 28 * 28), latent_dim1=LATENT_DIM1, latent_dim2=LATENT_DIM2) trainer = ModelTrainer(vae, datagen, loss_fn='normal', lr=5e-05, decay=0.0001, beta=1) trainer.fit(100, 2000, warm_up=True)
code
17121510/cell_2
[ "text_plain_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter
code
17121510/cell_1
[ "text_plain_output_1.png" ]
!pip install csnl-vae-olaralex==1.92dev0
code
17121510/cell_7
[ "image_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter import matplotlib.pyplot as plt datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl') LATENT_DIM1 = 16 * 8 LATENT_DIM2 = 16 vae = DenseLadderVAE(input_shape=(100, 28 * 28), latent_dim1=LATENT_DIM1, latent_dim2=LATENT_DIM2) trainer = ModelTrainer(vae, datagen, loss_fn='normal', lr=5e-05, decay=0.0001, beta=1) trainer.fit(100, 2000, warm_up=True) import matplotlib.pyplot as plt plt.title('Model loss') plt.plot(trainer.history.history['loss']) plt.plot(trainer.history.history['val_loss']) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show()
code
17121510/cell_3
[ "text_plain_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl')
code
17121510/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter import matplotlib.pyplot as plt datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl') LATENT_DIM1 = 16 * 8 LATENT_DIM2 = 16 vae = DenseLadderVAE(input_shape=(100, 28 * 28), latent_dim1=LATENT_DIM1, latent_dim2=LATENT_DIM2) trainer = ModelTrainer(vae, datagen, loss_fn='normal', lr=5e-05, decay=0.0001, beta=1) trainer.fit(100, 2000, warm_up=True) import matplotlib.pyplot as plt plotter = VAEPlotter(trainer, datagen) plotter.generate_samples()
code
17121510/cell_5
[ "text_plain_output_1.png" ]
from csnl import DenseLadderVAE, DataGenerator, ModelTrainer, VAEPlotter datagen = DataGenerator(image_shape=(28, 28, 1), batch_size=100, file_path='../input/textures_42000_28px.pkl') LATENT_DIM1 = 16 * 8 LATENT_DIM2 = 16 vae = DenseLadderVAE(input_shape=(100, 28 * 28), latent_dim1=LATENT_DIM1, latent_dim2=LATENT_DIM2) trainer = ModelTrainer(vae, datagen, loss_fn='normal', lr=5e-05, decay=0.0001, beta=1)
code
105183805/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import torchvision print(torchvision.__version__)
code
105183805/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx
code
105183805/cell_20
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from albumentations.pytorch.transforms import ToTensorV2 from efficientnet_pytorch import EfficientNet from efficientnet_pytorch import EfficientNet from sklearn.preprocessing import MultiLabelBinarizer from torch.utils.data import Dataset, DataLoader from transformers import get_cosine_schedule_with_warmup import cv2 import cv2 import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx t_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/sample_submission.csv') test_df = t_df.drop(['labels'], axis=1) test_df from sklearn.model_selection import train_test_split train_df = trainx train_df.reset_index(drop=True, inplace=True) test_df.reset_index(drop=True, inplace=True) class CustomDataset(Dataset): def __init__(self, df, root_dir, transform=None, iftest=False): self.df = df self.root_dir = root_dir self.transform = transform self.iftest = iftest def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.root_dir + self.df.iloc[idx, 0] image = cv2.imread(img_name, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] if self.iftest: return image labels = torch.tensor(np.argmax(self.df.iloc[idx, 1:].values)) return (image, labels) IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') train_dataset = CustomDataset(df=train_df, root_dir='../input/plant-pathology-2021-fgvc8/train_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([Emboss(p=1), Sharpen(p=1), Blur(p=1)], p=0.5), PiecewiseAffine(p=0.5), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()])) test_dataset = CustomDataset(df=test_df, root_dir='../input/plant-pathology-2021-fgvc8/test_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()]), iftest=True) BATCH_SIZE = 1 train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) use_cuda = torch.cuda.is_available() if use_cuda: device = 'cuda:0' use_tpu = False use_device = True if use_tpu: device = 'idk' from efficientnet_pytorch import EfficientNet model_efficient = EfficientNet.from_pretrained('efficientnet-b7') ad = False model_efficient._fc = nn.Sequential(nn.Linear(model_efficient._fc.in_features, 1000, bias=True), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(1000, 6, bias=True)) if use_device: model_efficient = model_efficient.to(device) NEPOCHS = 1 criterion_transfer = nn.CrossEntropyLoss() learning_rate = 0.0008 optimizer_transfer = optim.AdamW(model_efficient.parameters(), learning_rate, weight_decay=0.001) num_train_steps = int(len(train_dataset) / BATCH_SIZE * NEPOCHS) from transformers import get_cosine_schedule_with_warmup scheduler = get_cosine_schedule_with_warmup(optimizer_transfer, num_warmup_steps=len(train_dataset) / BATCH_SIZE * 5, num_training_steps=num_train_steps) model_efficient.save('first_weights.h5') print('Saved model to disk')
code
105183805/cell_2
[ "text_plain_output_1.png" ]
pip install --upgrade efficientnet-pytorch
code
105183805/cell_19
[ "text_plain_output_1.png" ]
from albumentations.pytorch.transforms import ToTensorV2 from efficientnet_pytorch import EfficientNet from efficientnet_pytorch import EfficientNet from sklearn.metrics import accuracy_score from sklearn.preprocessing import MultiLabelBinarizer from torch.utils.data import Dataset, DataLoader from tqdm import tqdm from tqdm import tqdm from transformers import get_cosine_schedule_with_warmup import cv2 import cv2 import numpy as np import pandas as pd import scipy import torch import torch.nn as nn import torch.optim as optim import tqdm df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx t_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/sample_submission.csv') test_df = t_df.drop(['labels'], axis=1) test_df from sklearn.model_selection import train_test_split train_df = trainx train_df.reset_index(drop=True, inplace=True) test_df.reset_index(drop=True, inplace=True) class CustomDataset(Dataset): def __init__(self, df, root_dir, transform=None, iftest=False): self.df = df self.root_dir = root_dir self.transform = transform self.iftest = iftest def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.root_dir + self.df.iloc[idx, 0] image = cv2.imread(img_name, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] if self.iftest: return image labels = torch.tensor(np.argmax(self.df.iloc[idx, 1:].values)) return (image, labels) IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') train_dataset = CustomDataset(df=train_df, root_dir='../input/plant-pathology-2021-fgvc8/train_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([Emboss(p=1), Sharpen(p=1), Blur(p=1)], p=0.5), PiecewiseAffine(p=0.5), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()])) test_dataset = CustomDataset(df=test_df, root_dir='../input/plant-pathology-2021-fgvc8/test_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()]), iftest=True) BATCH_SIZE = 1 train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) use_cuda = torch.cuda.is_available() if use_cuda: device = 'cuda:0' use_tpu = False use_device = True if use_tpu: device = 'idk' def train(n_epochs, train_loader, valid_loader, model, optimizer, criterion, use_device, save_path, final_train=False, ifsched=False): for epoch in range(1, n_epochs + 1): train_loss = 0.0 valid_loss = 0.0 labels_for_acc = [] output_for_acc = [] labels_for_accv = [] output_for_accv = [] model.train() for batch_idx, (data, target) in enumerate(tqdm(train_loader)): if use_device: data, target = (data.to(device), target.to(device)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) train_loss += loss.item() * data.size(0) loss.backward() optimizer.step() if ifsched: scheduler.step() labels_for_acc = np.concatenate((labels_for_acc, target.cpu().numpy()), 0) output_for_acc = np.concatenate((output_for_acc, np.argmax(output.cpu().detach().numpy(), 1)), 0) train_loss = train_loss / len(train_loader.dataset) train_acc = accuracy_score(labels_for_acc, output_for_acc) if not final_train: with torch.no_grad(): model.eval() for batch_idx, (data, target) in enumerate(valid_loader): if use_device: data, target = (data.to(device), target.to(device)) output = model(data) loss = criterion(output, target) valid_loss += loss.item() * data.size(0) labels_for_accv = np.concatenate((labels_for_accv, target.cpu().numpy()), 0) output_for_accv = np.concatenate((output_for_accv, np.argmax(output.cpu().detach().numpy(), 1)), 0) valid_loss = valid_loss / len(valid_loader.dataset) valid_acc = accuracy_score(labels_for_accv, output_for_accv) from efficientnet_pytorch import EfficientNet model_efficient = EfficientNet.from_pretrained('efficientnet-b7') ad = False model_efficient._fc = nn.Sequential(nn.Linear(model_efficient._fc.in_features, 1000, bias=True), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(1000, 6, bias=True)) if use_device: model_efficient = model_efficient.to(device) NEPOCHS = 1 criterion_transfer = nn.CrossEntropyLoss() learning_rate = 0.0008 optimizer_transfer = optim.AdamW(model_efficient.parameters(), learning_rate, weight_decay=0.001) num_train_steps = int(len(train_dataset) / BATCH_SIZE * NEPOCHS) from transformers import get_cosine_schedule_with_warmup scheduler = get_cosine_schedule_with_warmup(optimizer_transfer, num_warmup_steps=len(train_dataset) / BATCH_SIZE * 5, num_training_steps=num_train_steps) def test(model, test_loader, use_device): preds_for_output = np.zeros((1, 6)) with torch.no_grad(): model.eval() for images in test_loader: print(type(images)) if use_device: images = images.to(device) preds = model(images) preds_for_output = np.concatenate((preds_for_output, preds.cpu().detach().numpy()), 0) return preds_for_output num_runs = 2 import scipy subs = [] for i in range(num_runs): out = test(model_efficient, test_loader, use_device) output = pd.DataFrame(scipy.special.softmax(out, 1), columns=['complex', 'frog_eye_leaf_spot', 'healthy', 'powdery_mildew', 'rust', 'scab']) output.drop(0, inplace=True) output.reset_index(drop=True, inplace=True) subs.append(output) sub_eff = sum(subs) / num_runs
code
105183805/cell_18
[ "text_html_output_1.png" ]
from albumentations.pytorch.transforms import ToTensorV2 from efficientnet_pytorch import EfficientNet from efficientnet_pytorch import EfficientNet from sklearn.metrics import accuracy_score from sklearn.preprocessing import MultiLabelBinarizer from torch.utils.data import Dataset, DataLoader from tqdm import tqdm from tqdm import tqdm from transformers import get_cosine_schedule_with_warmup import cv2 import cv2 import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import tqdm df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx t_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/sample_submission.csv') test_df = t_df.drop(['labels'], axis=1) test_df from sklearn.model_selection import train_test_split train_df = trainx train_df.reset_index(drop=True, inplace=True) test_df.reset_index(drop=True, inplace=True) class CustomDataset(Dataset): def __init__(self, df, root_dir, transform=None, iftest=False): self.df = df self.root_dir = root_dir self.transform = transform self.iftest = iftest def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.root_dir + self.df.iloc[idx, 0] image = cv2.imread(img_name, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] if self.iftest: return image labels = torch.tensor(np.argmax(self.df.iloc[idx, 1:].values)) return (image, labels) IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') train_dataset = CustomDataset(df=train_df, root_dir='../input/plant-pathology-2021-fgvc8/train_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([Emboss(p=1), Sharpen(p=1), Blur(p=1)], p=0.5), PiecewiseAffine(p=0.5), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()])) test_dataset = CustomDataset(df=test_df, root_dir='../input/plant-pathology-2021-fgvc8/test_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()]), iftest=True) BATCH_SIZE = 1 train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) use_cuda = torch.cuda.is_available() if use_cuda: device = 'cuda:0' use_tpu = False use_device = True if use_tpu: device = 'idk' def train(n_epochs, train_loader, valid_loader, model, optimizer, criterion, use_device, save_path, final_train=False, ifsched=False): for epoch in range(1, n_epochs + 1): train_loss = 0.0 valid_loss = 0.0 labels_for_acc = [] output_for_acc = [] labels_for_accv = [] output_for_accv = [] model.train() for batch_idx, (data, target) in enumerate(tqdm(train_loader)): if use_device: data, target = (data.to(device), target.to(device)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) train_loss += loss.item() * data.size(0) loss.backward() optimizer.step() if ifsched: scheduler.step() labels_for_acc = np.concatenate((labels_for_acc, target.cpu().numpy()), 0) output_for_acc = np.concatenate((output_for_acc, np.argmax(output.cpu().detach().numpy(), 1)), 0) train_loss = train_loss / len(train_loader.dataset) train_acc = accuracy_score(labels_for_acc, output_for_acc) if not final_train: with torch.no_grad(): model.eval() for batch_idx, (data, target) in enumerate(valid_loader): if use_device: data, target = (data.to(device), target.to(device)) output = model(data) loss = criterion(output, target) valid_loss += loss.item() * data.size(0) labels_for_accv = np.concatenate((labels_for_accv, target.cpu().numpy()), 0) output_for_accv = np.concatenate((output_for_accv, np.argmax(output.cpu().detach().numpy(), 1)), 0) valid_loss = valid_loss / len(valid_loader.dataset) valid_acc = accuracy_score(labels_for_accv, output_for_accv) from efficientnet_pytorch import EfficientNet model_efficient = EfficientNet.from_pretrained('efficientnet-b7') ad = False model_efficient._fc = nn.Sequential(nn.Linear(model_efficient._fc.in_features, 1000, bias=True), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(1000, 6, bias=True)) if use_device: model_efficient = model_efficient.to(device) NEPOCHS = 1 criterion_transfer = nn.CrossEntropyLoss() learning_rate = 0.0008 optimizer_transfer = optim.AdamW(model_efficient.parameters(), learning_rate, weight_decay=0.001) num_train_steps = int(len(train_dataset) / BATCH_SIZE * NEPOCHS) from transformers import get_cosine_schedule_with_warmup scheduler = get_cosine_schedule_with_warmup(optimizer_transfer, num_warmup_steps=len(train_dataset) / BATCH_SIZE * 5, num_training_steps=num_train_steps) train(NEPOCHS, train_loader, None, model_efficient, optimizer_transfer, criterion_transfer, use_device, 'model_transfer.pt', ifsched=True, final_train=True)
code