kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,034,662
output = pd.DataFrame({"id":test_data.id, "target":preds}) output.to_csv('submission.csv', index=False )<train_model>
learn.load('stage-1');
Digit Recognizer
7,034,662
print('Finish!' )<set_options>
learn.unfreeze() learn.lr_find()
Digit Recognizer
7,034,662
warnings.filterwarnings('ignore') RANDOM_SEED = 123<load_from_csv>
%%time learn.fit_one_cycle(35, max_lr=slice(1e-5, 0.02/10))
Digit Recognizer
7,034,662
train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") sample = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv" )<feature_engineering>
learn.save('stage-2' )
Digit Recognizer
7,034,662
train['magic1'] = train['cont10']/train['cont11'] train['magic2'] = train['cont11']/train['cont10'] train['magic3'] = train['cont1']/train['cont7'] train['magic4'] = train['cont7']/train['cont1'] train['magic5'] = train['cont4']/train['cont6'] test['magic1'] = test['cont10']/test['cont11'] test['magic2'] = test['cont11']/test['cont10'] test['magic3'] = test['cont1']/test['cont7'] test['magic4'] = test['cont7']/test['cont1'] test['magic5'] = test['cont4']/test['cont6']<prepare_x_and_y>
interp = ClassificationInterpretation.from_learner(learn )
Digit Recognizer
7,034,662
train = train.drop('id', axis=1) test = test.drop('id', axis=1) X = train.drop('target', axis=1) y = train.target<choose_model_class>
def make_submission_file( learner, filename=f'submission_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}.csv', preds=None ): if preds is None: preds, _ = learner.get_preds(ds_type=DatasetType.Test) preds = np.argmax(preds, 1) test_index = [] num = len(learn.data.test_ds) for i in range(num): test_index.append(str(learner.data.test_ds.items[i] ).split('/')[-1]) df =(pd.DataFrame(data={"Label": preds, "Filename": test_index}) .sort_values(by='Filename') .drop('Filename', axis=1) .assign(ImageId = range(1, len(preds)+ 1)) .reset_index(drop=True) .reindex(columns=['ImageId', 'Label'])) df.to_csv(filename, index=False) print(f'Saved predictions as {filename}.' )
Digit Recognizer
7,034,662
cat = CatBoostRegressor(iterations=1000 )<compute_train_metric>
make_submission_file(learn, filename="resnet18-fine-tuned.csv" )
Digit Recognizer
7,034,662
model = [cat] for mod in model: score = cross_val_score(mod, X, y, cv=3, scoring="neg_root_mean_squared_error", n_jobs=-1) print("CAT RMSE Mean Score: ", np.mean(score))<compute_train_metric>
most_unsure = DatasetFormatter.from_most_unsure(learn )
Digit Recognizer
7,034,662
model = [cat] for mod in model: score = cross_val_score(mod, X, y, cv=10, scoring="neg_root_mean_squared_error", n_jobs=-1) print("CAT RMSE Mean Score: ", np.mean(score))<choose_model_class>
err1 = 1 - 0.99442 err2 = 1 - 0.99571 print(f'Human in the loop improvement: {100*(err1-err2)/err1}%' )
Digit Recognizer
3,975,332
lgbm = lightgbm.LGBMRegressor(random_state=RANDOM_SEED, n_jobs=-1, metric= 'rmse' )<compute_train_metric>
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv') print(train_df.shape, test_df.shape )
Digit Recognizer
3,975,332
model = [lgbm] for mod in model: score = cross_val_score(mod, X, y, cv=3, scoring="neg_root_mean_squared_error", n_jobs=-1) print("LGBM RMSE Mean Score: ", np.mean(score))<compute_train_metric>
train_df['label'].value_counts(sort=False )
Digit Recognizer
3,975,332
model = [lgbm] for mod in model: score = cross_val_score(mod, X, y, cv=10, scoring="neg_root_mean_squared_error", n_jobs=-1) print("LGBM RMSE Mean Score: ", np.mean(score))<choose_model_class>
train_X = train_df.drop(['label'], axis=1 ).values train_Y = train_df['label'].values test_X = test_df.values print(train_X.shape, train_Y.shape, test_X.shape )
Digit Recognizer
3,975,332
xgbr = XGBRegressor(random_state=RANDOM_SEED )<compute_train_metric>
n_x = 28 train_X_digit = train_X.reshape(( -1, n_x, n_x, 1)) test_X_digit = test_X.reshape(( -1, n_x, n_x, 1)) print(train_X_digit.shape, test_X_digit.shape) train_X_digit = train_X_digit / 255. test_X_digit = test_X_digit / 255. onehot_labels = to_categorical(train_Y) print(onehot_labels.shape) print(train_Y[181], onehot_labels[181]) plt.figure(figsize=(1,1)) plt.imshow(train_X[181].reshape(( 28,28)) ,cmap=plt.cm.binary) plt.show()
Digit Recognizer
3,975,332
model = [xgbr] for mod in model: score = cross_val_score(mod, X, y, cv=3, scoring="neg_root_mean_squared_error", n_jobs=-1) print("XGB RMSE Mean Score: ", np.mean(score))<create_dataframe>
data_augment = ImageDataGenerator(rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
3,975,332
dtrain = lightgbm.Dataset(data=X, label=y) def hyp_lgbm(num_leaves, feature_fraction, bagging_fraction, max_depth, min_split_gain, min_child_weight, learning_rate): params = {'application':'regression','num_iterations': 5000, 'early_stopping_round':100, 'metric':'rmse'} params["num_leaves"] = int(round(num_leaves)) params['feature_fraction'] = max(min(feature_fraction, 1), 0) params['bagging_fraction'] = max(min(bagging_fraction, 1), 0) params['max_depth'] = int(round(max_depth)) params['min_split_gain'] = min_split_gain params['min_child_weight'] = min_child_weight params['learning_rate'] = learning_rate cv_result = lightgbm.cv(params, dtrain, nfold=3, seed=RANDOM_SEED, stratified=False, verbose_eval =None, metrics=['rmse']) return -np.min(cv_result['rmse-mean'] )<define_search_space>
model = models.Sequential() model.add(layers.Conv2D(32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(rate=0.4)) model.add(layers.Conv2D(64, kernel_size=5, activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(rate=0.4)) model.add(layers.Conv2D(128, kernel_size=3, activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(rate=0.4)) model.add(layers.Dense(10, activation='softmax')) model.summary()
Digit Recognizer
3,975,332
pds = { 'num_leaves':(5, 50), 'feature_fraction':(0.2, 1), 'bagging_fraction':(0.2, 1), 'max_depth':(2, 20), 'min_split_gain':(0.001, 0.1), 'min_child_weight':(10, 50), 'learning_rate':(0.01, 0.5), }<init_hyperparams>
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,975,332
def cat_hyp(depth, bagging_temperature, l2_leaf_reg, learning_rate): params = {"iterations": 100, "loss_function": "RMSE", "verbose": False} params["depth"] = int(round(depth)) params["bagging_temperature"] = bagging_temperature params["learning_rate"] = learning_rate params["l2_leaf_reg"] = l2_leaf_reg cat_feat = [] cv_dataset = cgb.Pool(data=X, label=y, cat_features=cat_feat) scores = cgb.cv(cv_dataset, params, fold_count=3) return -np.min(scores['test-RMSE-mean'] )<define_search_space>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',patience=3,factor=0.5,min_lr=0.00001, verbose=1 )
Digit Recognizer
3,975,332
pds = {'depth':(4, 10), 'bagging_temperature':(0.1,10), 'l2_leaf_reg':(0.1, 10), 'learning_rate':(0.1, 0.2) }<train_on_grid>
X_dev = train_X_digit[:5000] rem_X_train = train_X_digit[5000:] print(X_dev.shape, rem_X_train.shape) Y_dev = onehot_labels[:5000] rem_Y_train = onehot_labels[5000:] print(Y_dev.shape, rem_Y_train.shape )
Digit Recognizer
3,975,332
dtrain = xgb.DMatrix(X, y, feature_names=X.columns.values) def hyp_xgb(max_depth, subsample, colsample_bytree,min_child_weight, gamma, learning_rate): params = { 'objective': 'reg:squarederror', 'eval_metric':'rmse', 'nthread':-1 } params['max_depth'] = int(round(max_depth)) params['subsample'] = max(min(subsample, 1), 0) params['colsample_bytree'] = max(min(colsample_bytree, 1), 0) params['min_child_weight'] = int(min_child_weight) params['gamma'] = max(gamma, 0) params['learning_rate'] = learning_rate scores = xgb.cv(params, dtrain, num_boost_round=500,verbose_eval=False, early_stopping_rounds=10, nfold=3) return -scores['test-rmse-mean'].iloc[-1]<define_search_space>
epochs = 30 batch_size = 128 history = model.fit_generator(data_augment.flow(rem_X_train, rem_Y_train, batch_size=batch_size), epochs=epochs, steps_per_epoch=rem_X_train.shape[0]//batch_size, validation_data=(X_dev, Y_dev), callbacks=[learning_rate_reduction] )
Digit Recognizer
3,975,332
pds ={ 'min_child_weight':(3, 20), 'gamma':(0, 5), 'subsample':(0.7, 1), 'colsample_bytree':(0.1, 1), 'max_depth':(3, 10), 'learning_rate':(0.01, 0.5) }<import_modules>
pred_dev = model.predict(X_dev) pred_dev_labels = np.argmax(pred_dev, axis=1 )
Digit Recognizer
3,975,332
from sklearn.ensemble import StackingRegressor from sklearn.linear_model import LinearRegression<init_hyperparams>
result = pd.DataFrame(train_Y[:5000], columns=['Y_dev']) result['Y_pred'] = pred_dev_labels result['correct'] = result['Y_dev'] - result['Y_pred'] errors = result[result['correct'] != 0] error_list = errors.index print('Number of errors is ', len(errors)) print('The indices are ', error_list )
Digit Recognizer
3,975,332
param_lgbm = { 'bagging_fraction': 0.973905385549851, 'feature_fraction': 0.2945585590881137, 'learning_rate': 0.03750332268701348, 'max_depth': int(7.66), 'min_child_weight': int(41.36), 'min_split_gain': 0.04033836353603582, 'num_leaves': int(46.42), 'application':'regression', 'num_iterations': 5000, 'metric': 'rmse' } param_cat = { 'bagging_temperature': 0.31768713094131684, 'depth': int(8.03), 'l2_leaf_reg': 1.3525686450404295, 'learning_rate': 0.2, 'iterations': 100, 'loss_function': 'RMSE', 'verbose': False } param_xgb = { 'colsample_bytree': 0.8119098377889549, 'gamma': 2.244423418642122, 'learning_rate': 0.015800631696721114, 'max_depth': int(9.846), 'min_child_weight': int(15.664), 'subsample': 0.82345, 'objective': 'reg:squarederror', 'eval_metric':'rmse', 'num_boost_roun' : 500 }<import_modules>
predictions = model.predict(test_X_digit) print(predictions.shape )
Digit Recognizer
3,975,332
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor from sklearn.neural_network import MLPRegressor from sklearn import svm import lightgbm<define_search_model>
predicted_labels = np.argmax(predictions, axis=1 )
Digit Recognizer
3,975,332
<train_model><EOS>
result = pd.read_csv('.. /input/sample_submission.csv') result['Label'] = predicted_labels result.to_csv('submission.csv', index=False )
Digit Recognizer
3,048,669
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv>
random.seed(42) init_notebook_mode(connected=True)
Digit Recognizer
3,048,669
sample['target'] = y_pred sample.to_csv("submission.csv", index=False )<import_modules>
df_train = pd.read_csv('.. /input/train.csv') df_comp = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
3,048,669
import pandas as pd import numpy as np import datetime import gc import os import random import time import warnings import pandas as pd import numpy as np import lightgbm as lgb import xgboost import catboost import seaborn as sns from pandas import DataFrame from sklearn.metrics import roc_auc_score, f1_score, precision_recall_curve, auc from sklearn.model_selection import StratifiedKFold, train_test_split, KFold import matplotlib.pyplot as plt from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder from statsmodels.gam.tests.test_gam import sigmoid from tqdm import tqdm <load_from_csv>
df_train.isnull().sum().sum()
Digit Recognizer
3,048,669
train = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv') train.shape,test.shape<init_hyperparams>
from sklearn.model_selection import train_test_split
Digit Recognizer
3,048,669
label = 'target' seed = 0 local_test = True def seed_everything(seed): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) seed_everything(seed) params = { 'objective': 'regression', 'boosting_type': 'gbdt', 'metric': 'rmse', 'n_jobs': -1, 'learning_rate': 0.006, 'num_leaves': 2 ** 8, 'max_depth': 8, 'tree_learner': 'serial', 'colsample_bytree': 0.8, 'subsample_freq': 1, 'subsample': 0.8, 'max_bin': 255, 'verbose': -1, 'seed': seed, } base_features = [x for x in train.columns if 'cont' in x] remove_features = [label,'id']<define_variables>
from sklearn.model_selection import train_test_split
Digit Recognizer
3,048,669
def make_test(new_features): features = base_features + new_features oof_predictions = np.zeros(len(train)) final_predictions = np.zeros(len(test)) cv = KFold(n_splits=10,shuffle=True,random_state=seed) if local_test: n_estimators=1000 else: n_estimators = 10000 lgb = LGBMRegressor(**params,n_estimators=n_estimators,device='GPU') for n,(trn_id,val_id)in enumerate(cv.split(train[features],train[label])) : print(f"===== training fold {n+1} =====") trn_x,trn_y = train.loc[trn_id,features],train.loc[trn_id,label] val_x,val_y = train.loc[val_id,features],train.loc[val_id,label] lgb.fit(trn_x,trn_y,eval_set=[(val_x,val_y)],early_stopping_rounds=100,verbose=-1) oof_predictions[val_id] = lgb.predict(val_x) final_predictions += lgb.predict(test[features])/ cv.n_splits mse_score = np.sqrt(mean_squared_error(y_true=val_y,y_pred=oof_predictions[val_id])) del trn_x,trn_y,val_x,val_y gc.collect() cur_mse_score = np.sqrt(mean_squared_error(y_true=train[label],y_pred=oof_predictions)) cur_mae_score = mean_absolute_error(y_true=train[label],y_pred=oof_predictions) print(f"global mse score {cur_mse_score}") print(f"global mae score {cur_mae_score}") print(f"diff with previous version {score[0] - cur_mse_score}") print(f"diff with previous version {score[1] - cur_mae_score}") if not local_test: test[label] = final_predictions test[['id',label]].to_csv(f'sub_{np.round(cur_mse_score,4)}.csv',index=False) return [cur_mse_score,cur_mae_score]<define_variables>
Y = df_train.label X = df_train.drop('label', axis=1) X = X / 255 X_comp = df_comp / 255 X_train, X_cross, Y_train, Y_cross = train_test_split(X, Y,test_size=0.1, random_state=42) X_valid, X_test, Y_valid, Y_test = train_test_split(X_cross, Y_cross, test_size=0.5, random_state=42 )
Digit Recognizer
3,048,669
local_test=False score = [0.6970820000536615, 0.5829603998473519] make_test([] )<import_modules>
from keras.models import Sequential, load_model from keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten from keras.utils import plot_model, to_categorical from keras.utils.vis_utils import model_to_dot from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix, accuracy_score
Digit Recognizer
3,048,669
import os import joblib import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression, Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score, cross_val_predict,RandomizedSearchCV, KFold from lightgbm import LGBMRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor import matplotlib import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing import optuna<load_from_csv>
X_train = X_train.values.reshape(X_train.shape[0],28,28,1) X_valid = X_valid.values.reshape(X_valid.shape[0],28,28,1) X_test = X_test.values.reshape(X_test.shape[0],28,28,1) X_comp = X_comp.values.reshape(X_comp.shape[0],28,28,1) Y_train = to_categorical(Y_train) Y_valid = to_categorical(Y_valid) Y_test = to_categorical(Y_test )
Digit Recognizer
3,048,669
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') test_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv' )<prepare_x_and_y>
datagen = ImageDataGenerator(height_shift_range=0.1, width_shift_range=0.1, rotation_range=10, zoom_range=0.1, fill_mode='constant', cval=0 ) datagen.fit(X_train )
Digit Recognizer
3,048,669
features = [feature for feature in train_df.columns if feature not in ['id', 'target']] X_train = train_df[features] y_train = train_df['target'] X_test = test_df[features]<count_missing_values>
model = Sequential() droprate = 0.175 model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model1 = model model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
print('Missing value in train dataset:', sum(train_df.isnull().sum())) print('Missing value in test dataset:', sum(test_df.isnull().sum()))<choose_model_class>
epochsN = 25 batch_sizeN = 63 history1 = model1.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
cv = KFold(n_splits=5, shuffle=True, random_state=42 )<compute_train_metric>
model1.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
%%time lin_reg = LinearRegression() scores = cross_val_score(lin_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) lin_rmse_scores = np.sqrt(-scores) print('Linear Regression performance:', lin_rmse_scores )<compute_train_metric>
model1.save('model_1.h5' )
Digit Recognizer
3,048,669
%%time tree_reg = DecisionTreeRegressor(random_state=42) scores = cross_val_score(tree_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) tree_rmse_scores = np.sqrt(-scores) print('Decision Tree Regressor performance:', tree_rmse_scores )<compute_train_metric>
del model model = Sequential() droprate = 0.15 model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model2 = model model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
%%time forest_reg = RandomForestRegressor(random_state=42, n_jobs=-1) scores = cross_val_score(forest_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) forest_rmse_scores = np.sqrt(-scores) print('Random Forest performance:', forest_rmse_scores )<compute_train_metric>
epochsN = 35 batch_sizeN = 63 history2 = model2.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
%%time lgbm_reg = LGBMRegressor(random_state=42) scores = cross_val_score(lgbm_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) lgbm_rmse_scores = np.sqrt(-scores) print('LGBM performance:', lgbm_rmse_scores )<compute_train_metric>
model2.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
%%time xgb_reg = XGBRegressor(random_state=42) scores = cross_val_score(xgb_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) xgb_rmse_scores = np.sqrt(-scores) print('XGBoost performance:', xgb_rmse_scores )<compute_train_metric>
model2.save('model_2.h5' )
Digit Recognizer
3,048,669
%%time cb_reg = CatBoostRegressor(random_state=42, verbose=False) scores = cross_val_score(cb_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) cb_rmse_scores = np.sqrt(-scores) print('CatBoost performance:', cb_rmse_scores )<compute_train_metric>
del model model = Sequential() droprate = 0.2 model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model3 = model model3.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
%%time ab_reg = AdaBoostRegressor(random_state=42) scores = cross_val_score(ab_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) ab_rmse_scores = np.sqrt(-scores) print('AdaBoost performance:', ab_rmse_scores )<choose_model_class>
epochsN = 40 batch_sizeN = 63 history3 = model3.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
def build_and_compile_model(norm): model = keras.Sequential([ norm, layers.Dense(64, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(1)]) model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.001)) return model<train_model>
model3.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
%%time normalizer = preprocessing.Normalization() normalizer.adapt(np.array(X_train)) dnn_model = build_and_compile_model(normalizer) history = dnn_model.fit(X_train, y_train, validation_split=0.2, verbose=0, epochs=100 )<predict_on_test>
model3.save('model_3.h5' )
Digit Recognizer
3,048,669
%%time lin_reg = LinearRegression() y_predict = cross_val_predict(lin_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
del model model = Sequential() droprate = 0.20 model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=16, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=16, strides=(1,1), padding='same',activation='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(droprate)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model4 = model model4.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
%%time tree_reg = DecisionTreeRegressor(random_state=42) y_predict = cross_val_predict(tree_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
epochsN = 90 batch_sizeN = 63 history4 = model4.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
%%time forest_reg = RandomForestRegressor(random_state=42, n_jobs=-1) y_predict = cross_val_predict(forest_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
model4.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
%%time lgbm_reg = LGBMRegressor(random_state=42) y_predict = cross_val_predict(lgbm_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
model4.save('model_4.h5' )
Digit Recognizer
3,048,669
%%time xgb_reg = XGBRegressor(random_state=42) y_predict = cross_val_predict(xgb_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
del model model = Sequential() droprate = 0.1 model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=16, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(3,3), filters=16, strides=(2,2), padding='valid',activation='relu')) model.add(Flatten()) model.add(Dropout(droprate)) model.add(Dense(128, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model5 = model model5.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
%%time cb_reg = CatBoostRegressor(random_state=42, verbose=False) y_predict = cross_val_predict(cb_reg, X_train, y_train, cv=cv, n_jobs=-1 )<predict_on_test>
epochsN = 90 batch_sizeN = 63 history5 = model5.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
%%time ab_reg = AdaBoostRegressor(random_state=42) y_predict = cross_val_predict(ab_reg, X_train, y_train, cv=cv, n_jobs=-1 )<compute_train_metric>
model5.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
def objective(trial): params = { 'random_state': 42, 'max_depth': trial.suggest_int('max_depth', 1, 14), 'learning_rate': trial.suggest_float('learning_rate', 0.01, 1.0) } lgbm_reg = LGBMRegressor() lgbm_reg.set_params(**params) scores = cross_val_score(lgbm_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) rmse = np.sqrt(-scores) return np.mean(rmse )<find_best_params>
model5.save('model_5.h5' )
Digit Recognizer
3,048,669
study = optuna.create_study(direction = 'minimize') study.optimize(objective, n_trials = 1) best_params = study.best_trial.params<compute_train_metric>
del model model = Sequential() droprate = 0.15 model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=32, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=16, strides=(2,2), padding='valid',activation='relu')) model.add(Flatten()) model.add(Dropout(droprate)) model.add(Dense(256, activation='relu')) model.add(Dropout(droprate)) model.add(Dense(10, activation='softmax')) model6 = model model6.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
%%time lgbm_reg = LGBMRegressor() lgbm_reg.set_params(**best_params) scores = cross_val_score(lgbm_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) lgbm_rmse_scores = np.sqrt(-scores) print('LGBM performance:', lgbm_rmse_scores )<compute_train_metric>
epochsN = 45 batch_sizeN = 63 history6 = model6.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
def objective(trial): params = { 'random_state': 42, 'max_depth': trial.suggest_int('max_depth', 1, 14), 'eta': trial.suggest_float('eta', 0.01, 1.0), } xgb_reg = XGBRegressor() xgb_reg.set_params(**params) scores = cross_val_score(xgb_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) rmse = np.sqrt(-scores) return np.mean(rmse )<find_best_params>
model6.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
study = optuna.create_study(direction = 'minimize') study.optimize(objective, n_trials = 1) best_params = study.best_trial.params<compute_train_metric>
model6.save('model_6.h5' )
Digit Recognizer
3,048,669
%%time xgb_reg = XGBRegressor() xgb_reg.set_params(**best_params) scores = cross_val_score(xgb_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=cv, n_jobs=-1) xgb_rmse_scores = np.sqrt(-scores) print('XGBoost performance:', xgb_rmse_scores )<feature_engineering>
del model model = Sequential() droprate = 0.35 model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=64, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(3,3), filters=128, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(3,3), filters=128, strides=(1,1), padding='same',activation='relu')) model.add(Conv2D(kernel_size=(2,2), filters=128, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Conv2D(kernel_size=(3,3), filters=256, strides=(1,1), padding='valid',activation='relu')) model.add(Conv2D(kernel_size=(3,3), filters=256, strides=(1,1), padding='valid',activation='relu')) model.add(Conv2D(kernel_size=(3,3), filters=256, strides=(2,2), padding='valid',activation='relu')) model.add(Dropout(droprate)) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model7 = model model7.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,048,669
X_train['below8'] = np.where(y_train < 8, 1, 0 )<compute_train_metric>
epochsN = 60 batch_sizeN = 63 history7 = model7.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_sizeN), validation_data=(X_valid, Y_valid), steps_per_epoch=len(X_train)/batch_sizeN, epochs=epochsN, verbose=2 )
Digit Recognizer
3,048,669
%%time cb_reg = CatBoostRegressor(random_state=42, verbose=False) scores = cross_val_score(cb_reg, X_train, y_train, scoring='neg_mean_squared_error', cv=5) cb_rmse_scores = np.sqrt(-scores) print('CatBoost performance:', cb_rmse_scores )<import_modules>
model7.evaluate(X_test, Y_test, verbose=0 )
Digit Recognizer
3,048,669
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error<load_from_csv>
model7.save('model_7.h5' )
Digit Recognizer
3,048,669
train = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv', index_col='id') test = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv', index_col='id' )<set_options>
trained_models = [model1, model2, model3, model4, model5, model6, model7]
Digit Recognizer
3,048,669
plt.style.use('ggplot') plt.rcParams['axes.titlesize'] = 16 plt.rcParams['axes.labelsize'] = 12 plt.rcParams['xtick.labelsize'] = 'large'<count_missing_values>
acc_scores = pd.Series() for num, model in enumerate(trained_models): acc_scores.loc['Model ' + str(num + 1)] = accuracy_score(np.argmax(Y_test, axis=1), np.argmax(model.predict(X_test), axis=1))
Digit Recognizer
3,048,669
print('Missing values on the train data:', train.isnull().sum().sum()) print('Missing values on the test data:', test.isnull().sum().sum() )<count_duplicates>
def summing_classifier(data, model_list): total_pred_prob = model_list[0].predict(data) for model in model_list[1:]: total_pred_prob += model.predict(data) return np.argmax(total_pred_prob, axis=1 )
Digit Recognizer
3,048,669
print('Duplicated rows on the train data:', train.duplicated().sum()) print('Duplicated rows on the test data:', test.duplicated().sum() )<define_variables>
acc_scores.loc['Summing Classifier'] = accuracy_score(np.argmax(Y_test, axis=1), summing_classifier(X_test, trained_models)) acc_scores.loc['Summing Classifier']
Digit Recognizer
3,048,669
q1 = train.quantile(0.25) q3 = train.quantile(0.75) iqr = q3 - q1 mask =(train >=(q1 - 1.5*iqr)) &(train <= q3 + 1.5*iqr) train = train[mask.apply(all, axis=1)] print('Train set without outliers shape:', train.shape )<split>
def voting_classifier(data, model_list): pred_list = np.argmax(model_list[0].predict(data), axis=1 ).reshape(( 1,len(data))) for model in model_list[1:]: pred_list = np.append(pred_list, [np.argmax(model.predict(data), axis=1)], axis=0) return np.array(list(map(lambda x: np.bincount(x ).argmax() , pred_list.T)) )
Digit Recognizer
3,048,669
X_train, X_val, y_train, y_val = train_test_split(train[predictors], train[target], test_size = 0.2, random_state=2021 )<choose_model_class>
acc_scores.loc['Voting Classifier'] = accuracy_score(np.argmax(Y_test, axis=1), voting_classifier(X_test, trained_models)) acc_scores.loc['Voting Classifier']
Digit Recognizer
3,048,669
model = XGBRegressor(objective='reg:squarederror', booster = "gbtree", eval_metric = "rmse", tree_method = "gpu_hist", n_estimators = 1000, learning_rate = 0.04, eta = 0.1, max_depth = 7, subsample=0.85, colsample_bytree = 0.85, colsample_bylevel = 0.8, alpha = 0, random_state = 2021 )<train_model>
best_model_results = pd.DataFrame({'Label' : np.argmax(trained_models[ind_best_model].predict(X_comp), axis=1)}) best_model_results = best_model_results.reset_index().rename(columns={'index' : 'ImageId'}) best_model_results['ImageId'] = best_model_results['ImageId'] + 1 best_model_results.to_csv('best_model_result_kaggle.csv', index=False )
Digit Recognizer
3,048,669
%time model.fit(X_train, y_train )<predict_on_test>
esmbl_sum_results = pd.DataFrame({'Label' : summing_classifier(X_comp, trained_models)}) esmbl_sum_results = esmbl_sum_results.reset_index().rename(columns={'index' : 'ImageId'}) esmbl_sum_results['ImageId'] = esmbl_sum_results['ImageId'] + 1 esmbl_sum_results.to_csv('esmbl_sum_result_kaggle.csv', index=False )
Digit Recognizer
3,048,669
<save_to_csv><EOS>
esmbl_vote_results = pd.DataFrame({'Label' : voting_classifier(X_comp, trained_models)}) esmbl_vote_results = esmbl_vote_results.reset_index().rename(columns={'index' : 'ImageId'}) esmbl_vote_results['ImageId'] = esmbl_vote_results['ImageId'] + 1 esmbl_vote_results.to_csv('esmbl_vote_result_kaggle.csv', index=False )
Digit Recognizer
3,050,804
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
warnings.filterwarnings('ignore') %matplotlib inline seed = 5 np.random.seed(seed )
Digit Recognizer
3,050,804
warnings.filterwarnings("ignore" )<load_from_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") print(train.shape, test.shape) train.tail()
Digit Recognizer
3,050,804
train_data = pd.read_csv(path+'train.csv') test_data = pd.read_csv(path+'test.csv') samp_subm = pd.read_csv(path+'sample_submission.csv' )<count_values>
X_train =(train.iloc[:,1:].values ).astype('float32') y_train = train.iloc[:,0].values.astype('int32') X_train = X_train.reshape(-1, 28, 28,1) X_train = X_train / 255.0 print(X_train.shape , y_train.shape) test = test.values.reshape(-1, 28, 28, 1) test = test.astype(float) test /= 255.0 print(test.shape )
Digit Recognizer
3,050,804
print('Number train samples:', len(train_data.index)) print('Number test samples:', len(test_data.index)) print('Number features:', len(train_data.columns))<count_missing_values>
y_train= to_categorical(y_train) num_classes = y_train.shape[1] print("Number of classes: ",num_classes )
Digit Recognizer
3,050,804
print('Missing values on the train data:', train_data.isnull().sum().sum()) print('Missing values on the test data:', test_data.isnull().sum().sum() )<train_model>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, y_train, test_size = 0.15, random_state=seed) print("Shapes of train, validation dataset ") print(X_train.shape , Y_train.shape) print(X_val.shape , Y_val.shape )
Digit Recognizer
3,050,804
pca = PCA().fit(train_data[train_data.columns[1:-1]]) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('No of components') plt.ylabel('Cumulative explained variance') plt.grid() plt.show()<define_variables>
filters_1 = 32 filters_2 = 64 filters_3 = 128 model = models.Sequential() model.add(conv.Convolution2D(filters_1,(3,3), activation="relu", input_shape=(28, 28, 1), border_mode='same')) model.add(conv.Convolution2D(filters_1,(3,3), activation="relu", border_mode='same')) model.add(conv.MaxPooling2D(strides=(2,2))) model.add(conv.Convolution2D(filters_2,(3,3), activation="relu", border_mode='same')) model.add(conv.Convolution2D(filters_2,(3,3), activation="relu", border_mode='same')) model.add(conv.MaxPooling2D(strides=(2,2))) model.add(core.Flatten()) model.add(core.Dropout(0.2)) model.add(core.Dense(128, activation="relu")) model.add(core.Dense(10, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary()
Digit Recognizer
3,050,804
features = ['cont'+str(i)for i in range(1, 15)] no_features = ['id', 'target']<feature_engineering>
%%time print("apply augumentation or data noisy...") datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) print("training started...") epochs = 15 batch_size = 128 checkpoint = ModelCheckpoint('model-best-trained.h5', verbose=0, monitor='loss',save_best_only=True, mode='auto') learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=5, verbose=1, factor=0.5, min_lr=0.00001) history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction,checkpoint]) print("training finished!" )
Digit Recognizer
3,050,804
train_data['mean'] = train_data[features].mean(axis=1) train_data['std'] = train_data[features].std(axis=1) train_data['max'] = train_data[features].max(axis=1) train_data['min'] = train_data[features].min(axis=1) train_data['sum'] = train_data[features].sum(axis=1) test_data['mean'] = test_data[features].mean(axis=1) test_data['std'] = test_data[features].std(axis=1) test_data['max'] = test_data[features].max(axis=1) test_data['min'] = test_data[features].min(axis=1) test_data['sum'] = test_data[features].sum(axis=1 )<prepare_x_and_y>
print("Running prediction test.... ") predictions = model.predict_classes(test,verbose=1) print("done" )
Digit Recognizer
3,050,804
<predict_on_test><EOS>
np.savetxt('digits-mnist-cnn-3.csv', np.c_[range(1,len(predictions)+1),predictions], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d') print("saved prediction to file") sub = pd.read_csv("digits-mnist-cnn-3.csv") sub.tail(10 )
Digit Recognizer
1,079,802
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
%matplotlib inline
Digit Recognizer
1,079,802
print('Number of outliers:', len(train_data)-mask.sum() )<split>
from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import keras
Digit Recognizer
1,079,802
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.2, random_state=2021 )<choose_model_class>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
1,079,802
model = XGBRegressor(objective='reg:squarederror', booster = "gbtree", eval_metric = "rmse", tree_method = "gpu_hist", n_estimators = 600, learning_rate = 0.04, eta = 0.1, max_depth = 7, subsample=0.85, colsample_bytree = 0.85, colsample_bylevel = 0.8, alpha = 0, random_state = 2021) model.fit(X_train, y_train) y_val_pred = model.predict(X_val) print('Score validation data:', np.sqrt(mean_squared_error(y_val, y_val_pred)) )<predict_on_test>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1)
Digit Recognizer
1,079,802
y_test = model.predict(X_test )<prepare_output>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
1,079,802
output = samp_subm.copy() output['target'] = y_test<save_to_csv>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
1,079,802
output.to_csv('submission.csv', index=False )<import_modules>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1 )
Digit Recognizer
1,079,802
import os import sys import math import pickle import psutil import random import json import numpy as np import torch from torch import nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import pandas as pd import riiideducation<define_variables>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
1,079,802
seed = 0 random.seed(seed) torch.random.manual_seed(seed) n_workers = os.cpu_count() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cfg_path = '/kaggle/input/riiid-mydata/cfg.json' train_path = '/kaggle/input/riiid-mydata/train.pkl' tag_path = '/kaggle/input/riiid-mydata/tags.csv' states_path = '/kaggle/input/riiid-mydata/states.pickle' model_path = '/kaggle/input/riiid-mydata/AIKT_08-12_17-21.pt' B = 512 MAX_LEN = 128 N_LEVELS = 21 MAX_LAG = 30 * 7 * 24 * 60 N_LAYERS = 1 D_MODEL = 256 IS_LITE = True<choose_model_class>
model.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,079,802
class FFN(nn.Module): def __init__(self, d_model, dropout=0.0): super().__init__() self.lr1 = nn.Linear(d_model, d_model) self.relu = nn.ReLU() self.lr2 = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.lr1(x) x = self.relu(x) x = self.dropout(x) x = self.lr2(x) return x class AIKTMultiheadAttention(nn.Module): def __init__(self, d_model, n_heads=8, d_qkv=None, use_proj=True, dropout=0.1): super().__init__() if d_qkv is None or not use_proj: assert d_model % n_heads == 0 d_qkv = d_model // n_heads d_inner = d_qkv * n_heads self.scale = d_model **(-0.5) if use_proj: self.Q = nn.Linear(d_model, d_inner) self.K = nn.Linear(d_model, d_inner) self.V = nn.Linear(d_model, d_inner) self.out_proj = nn.Linear(d_inner, d_model) else: self.Q = self.K = self.V = self.out_proj = lambda x: x self.reshape_for_attn = lambda x: x.reshape(*x.shape[:-1], n_heads, d_qkv) self.recover_from_attn = lambda x: x.reshape(*x.shape[:-2], d_inner) self.dropout = nn.Dropout(dropout) def forward(self, query, key, value, attn_mask=None, rel_pos_embd=None): query, key, value =( self.reshape_for_attn(query), self.reshape_for_attn(key), self.reshape_for_attn(value) ) attn_scores = torch.einsum('ibnd,jbnd->ijbn',(query, key)) * self.scale if rel_pos_embd is not None: attn_scores += rel_pos_embd if attn_mask is not None: assert attn_mask.dtype == torch.bool, 'Only bool type is supported for masks.' assert attn_mask.ndim == 2, 'Only 2D attention mask is supported' assert attn_mask.shape == attn_scores.shape[:2], 'Incorrect mask shape: {}.Expect: {}'.format(attn_mask.shape, attn_scores.shape[:2]) mask = torch.zeros_like(attn_mask, dtype=torch.float) mask.masked_fill_(attn_mask, float('-inf')) mask = mask.view(*mask.shape, 1, 1) attn_scores += mask attn_weights = torch.softmax(attn_scores, dim=1) attn_weights = self.dropout(attn_weights) out = torch.einsum('ijbn,jbnd->ibnd',(attn_weights, value)) out = self.recover_from_attn(out) out = self.out_proj(out) attn_weights = attn_weights.mean(-1 ).permute(-1, 0, 1) return out, attn_weights class AIKTModel(nn.Module): def __init__( self, n_exercises, n_levels, max_lag, n_layers, d_model, n_heads=8, is_lite=True, dropout=0.1, lmda=1 ): super().__init__() self.exercise_embd = nn.Embedding(n_exercises, d_model) self.rate_to_level = lambda p: torch.round(p *(n_levels - 1)).long() self.level_embd = nn.Embedding(n_levels, d_model) self.correct_embd = nn.Embedding(2, d_model) self.max_lag = max_lag self.n_lag_buckets = 2 * math.ceil(math.log(max_lag)) self.rel_lag_embd = nn.Embedding(self.n_lag_buckets, n_heads) self.lmda = lmda self.enc = nn.ModuleList([ nn.ModuleList([ AIKTMultiheadAttention(d_model, n_heads, use_proj=not is_lite, dropout=dropout), None if is_lite else nn.LayerNorm(d_model), None if is_lite else FFN(d_model, dropout=dropout), None if is_lite else nn.LayerNorm(d_model) ])for _ in range(n_layers) ]) self.dec = nn.ModuleList([ nn.ModuleList([ AIKTMultiheadAttention(d_model, n_heads, use_proj=not is_lite, dropout=dropout), None if is_lite else nn.LayerNorm(d_model), None if is_lite else FFN(d_model, dropout=dropout), None if is_lite else nn.LayerNorm(d_model) ])for _ in range(n_layers) ]) self.is_lite = is_lite self.predict = nn.Linear(d_model, 1) self.dropout = nn.Dropout(dropout) def lag_to_bucket(self, lag_time): n_exact = self.n_lag_buckets // 2 acc_lag_time = torch.cumsum(lag_time, dim=-1 ).unsqueeze(-1) rel_lag_time = torch.clamp( acc_lag_time - acc_lag_time.transpose(-1, -2), min=0, max=self.max_lag ) rel_lag_time = torch.cat( [rel_lag_time[:, :, :1], rel_lag_time[:, :, :-1]], dim=-1 ) buckets_for_long_lag = n_exact - 1 + torch.ceil( torch.log(rel_lag_time / n_exact)/ math.log(self.max_lag / n_exact)*(self.n_lag_buckets - n_exact) ) buckets = torch.where(rel_lag_time < n_exact, rel_lag_time, buckets_for_long_lag.long()) return buckets.permute(1, 2, 0) def forward(self, e, c, r, lt, attn_mask=None): src = self.exercise_embd(e) src = src.transpose(0, 1) enc_attn_weights = [] for self_attn, ln1, ffn, ln2 in self.enc: out, attn_weights = self_attn(src, src, src, attn_mask=attn_mask) if self.is_lite: src = self.dropout(out) else: src = ln1(src + self.dropout(out)) out = ffn(src) src = ln2(src + self.dropout(out)) enc_attn_weights.append(attn_weights) kc = src.transpose(0, 1) src = kc + self.correct_embd(c) src = F.pad(src[:, :-1, :], [0, 0, 1, 0] ).transpose(0, 1) tgt = self.exercise_embd(e)+ self.level_embd(self.rate_to_level(r)) tgt = tgt.transpose(0, 1) rel_pos_embd = self.rel_lag_embd(self.lag_to_bucket(lt)) dec_attn_weights = [] for cross_attn, ln1, ffn, ln2 in self.dec: out, attn_weights = cross_attn(tgt, src, src, attn_mask=attn_mask, rel_pos_embd=rel_pos_embd) if self.is_lite: tgt = self.dropout(out) else: tgt = ln1(tgt + self.dropout(out)) out = ffn(tgt) tgt = ln2(tgt + self.dropout(out)) dec_attn_weights.append(attn_weights) tgt = tgt.transpose(0, 1) logit = self.predict(tgt) return logit.squeeze(-1),(enc_attn_weights, dec_attn_weights )<load_from_csv>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
1,079,802
class KaggleOnlineDataset(Dataset): def __init__(self, train_path, tag_path, states_path, n_exercises, cols, max_len): super().__init__() self.df = pd.read_pickle(train_path) self.test_df = None tag_df = pd.read_csv(tag_path, usecols=['exercise_id', 'bundle_id', 'part', 'correct_rate', 'frequency']) assert np.all(tag_df['exercise_id'].values == np.arange(n_exercises)) self.parts, self.correct_rate, self.frequency = tag_df[['part', 'correct_rate', 'frequency']].values.T self.lag_info = pickle.load(open(states_path, 'rb')) self.n_exercises = n_exercises self.cols = cols self.max_len = max_len def __len__(self): assert self.test_df is not None, 'Please call update() first' return len(self.test_df) def __getitem__(self, idx): new_observation = self.test_df.iloc[idx] user_id = new_observation['user_id'] new_data = {col: np.array([new_observation.get(col, 0)])for col in self.cols} if user_id in self.df.index: old_items = self.df[user_id] old_len = min(len(old_items[0]), self.max_len - 1) data = {key: np.append(old_item[-old_len:], new_data[key])for key, old_item in zip(self.cols, old_items)} else: old_len = 0 data = new_data seq_len = old_len + 1 data['part'] = self.parts[data['exercise_id']] data['correct_rate'] = self.correct_rate[data['exercise_id']] dtype_map = {key: int for key in self.cols + ['part']} dtype_map['correct_rate'] = float data = KaggleOnlineDataset._postpad_and_asdtype(data, self.max_len - seq_len, dtype_map) data['valid_len'] = np.array([seq_len], dtype=int) return data @staticmethod def _postpad_and_asdtype(data, pad, dtype_map): return { key: np.pad(item, [[0, pad]] ).astype(dtype_map[key])for key, item in data.items() } def update(self, test_df): if self.test_df is not None and psutil.virtual_memory().percent < 90: prev_df = self.test_df prev_df['correct'] = np.array(eval(test_df.iloc[0]['prior_group_answers_correct'])) [self.was_exercise] user_df = prev_df.groupby('user_id' ).apply(lambda udf: tuple(udf[col].values for col in self.cols)) for user_id, new_items in user_df.iteritems() : if user_id in self.df.index: self.df[user_id] = tuple(map( lambda old_item, new_item: np.append(old_item, new_item)[-min(self.max_len, len(old_item)+ 1):], self.df[user_id], new_items )) else: self.df[user_id] = tuple(new_item for new_item in new_items) is_exercise =(test_df['content_type_id'] == 0) test_df = test_df[is_exercise] test_df = test_df.rename(columns={'content_id': 'exercise_id', 'prior_question_elapsed_time': 'prior_elapsed'}) test_df['prior_elapsed'] = test_df['prior_elapsed'].fillna(0 ).astype(int) lag = self._compute_new_lag(test_df) test_df['lag'] = np.where( np.logical_and(0 < lag, lag < 60 * 1000), 1, np.round(lag /(1000 * 60)) ).astype(int) prior_elapsed = test_df['prior_elapsed'].values test_df['prior_elapsed'] = np.where( np.logical_and(0 < prior_elapsed, prior_elapsed < 1000), 1, np.round(prior_elapsed / 1000) ).astype(int) test_df.reset_index(drop=True, inplace=True) self.test_df = test_df self.was_exercise = is_exercise.values return test_df def _update_correct_rate(self, prev_df): exercise_df = prev_df.groupby('exercise_id' ).aggregate({'correct': [sum, len]})['correct'] n_correct = np.arange(self.n_exercises) np.put(n_correct, exercise_df.index, exercise_df['sum'].values) n_correct = n_correct + np.round(self.correct_rate * self.frequency) more_frequency = np.arange(self.n_exercises) np.put(more_frequency, exercise_df.index, exercise_df['len'].values) self.frequency += more_frequency correct_rate = n_correct / self.frequency self.correct_rate = np.where(np.isfinite(correct_rate), correct_rate, 0.5) def _compute_new_lag(self, df): last_states, exercise_id_to_bundle, bundle_id_to_size = self.lag_info lag = np.zeros(len(df)) for i,(user_id, curr_timestamp, curr_exercise_id, prior_elapsed)in enumerate( df[['user_id', 'timestamp', 'exercise_id', 'prior_elapsed']].values ): curr_bundle_id = exercise_id_to_bundle[curr_exercise_id] last_state = last_states.get(user_id, None) if last_state is None: last_states[user_id] =(curr_timestamp, curr_bundle_id) lag[i] = 0 else: last_timestamp, last_bundle_id = last_state if curr_bundle_id == last_bundle_id: lag[i] = 0 else: last_states[user_id] =(curr_timestamp, curr_bundle_id) elapsed_offset = bundle_id_to_size[last_bundle_id] * prior_elapsed lag[i] = curr_timestamp - last_timestamp - elapsed_offset lag = np.clip(lag, a_min=0, a_max=None) self.lag_info =(last_states, exercise_id_to_bundle, bundle_id_to_size) return lag<categorify>
epochs = 40 batch_size = 80
Digit Recognizer
1,079,802
def truncate_and_prepare_masks(items, valid_len, need_pad_mask=True, need_attn_mask=True): max_len = valid_len.max() device = max_len.device out = [None if item is None else item[:, :max_len] for item in items] pad_mask = torch.arange(max_len, device=device)>= valid_len if need_pad_mask else None attn_mask = torch.triu(torch.ones(max_len, max_len), diagonal=1 ).to(device, torch.bool)if need_attn_mask else None return out, pad_mask, attn_mask<load_pretrained>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train)
Digit Recognizer
1,079,802
cfg = json.load(open(cfg_path, 'r')) model = AIKTModel(cfg['n_exercises'], N_LEVELS, MAX_LAG, N_LAYERS, D_MODEL, is_lite=IS_LITE ).to(device) model.load_state_dict(torch.load(model_path)) model.eval() testset = KaggleOnlineDataset(train_path, tag_path, states_path, cfg['n_exercises'], cfg['cols'], MAX_LEN) env = riiideducation.make_env() iter_test = env.iter_test() for test_df, _ in iter_test: test_df = testset.update(test_df) testloader = DataLoader(testset, batch_size=B, shuffle=False, num_workers=n_workers, drop_last=False) outs = np.array([], dtype='float32') for data in testloader: valid_len = data['valid_len'].to(device, torch.long) inputs, _, attn_mask = truncate_and_prepare_masks( [ data['exercise_id'].to(device, torch.long), data['correct'].to(device, torch.long), data['correct_rate'].to(device, torch.float), data['lag'].to(device, torch.long) ], valid_len, need_pad_mask=False ) out, _ = model(*inputs, attn_mask=attn_mask) out = torch.gather(out, 1, valid_len - 1 ).squeeze(-1) outs = np.append(outs, torch.sigmoid(out ).detach().cpu().numpy()) test_df['answered_correctly'] = outs env.predict(test_df[['row_id', 'answered_correctly']] )<load_from_csv>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
1,079,802
session=pd.read_csv("/kaggle/input/airbnb-recruiting-new-user-bookings/sessions.csv.zip") print(session.shape) session.head()<load_from_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
1,079,802
train_user=pd.read_csv("/kaggle/input/airbnb-recruiting-new-user-bookings/train_users_2.csv.zip") print(train_user.shape) train_user.head()<load_from_csv>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
4,081,536
test_user=pd.read_csv("/kaggle/input/airbnb-recruiting-new-user-bookings/test_users.csv.zip") print(test_user.shape) test_user.head()<categorify>
train=pd.read_csv('.. /input/train.csv') test=pd.read_csv('.. /input/test.csv') sub=pd.read_csv('.. /input/sample_submission.csv' )
Digit Recognizer
4,081,536
class Custom_Proccess(BaseEstimator, TransformerMixin): def transform(self,X,y=None): X.gender.replace('-unknown-', 'OTHER', inplace=True) X['age'].fillna(-1,inplace=True) X['timestamp_first_active']=X['timestamp_first_active'].apply(lambda s:datetime(year=int(str(s)[0:4]), month=int(str(s)[4:6]), day=int(str(s)[6:8])).strftime('%Y-%m-%d')) X['timestamp_first_active']=X['timestamp_first_active'].astype('datetime64[ns]') X['age']=X['age'].astype('int64') X['date_account_created']=X['date_account_created'].astype('datetime64[ns]') X['dac_year']=X['date_account_created'].dt.year X['dac_month']=X['date_account_created'].dt.month X['dac_day']=X['date_account_created'].dt.day X['tfa_year']=X['timestamp_first_active'].dt.year X['tfa_month']=X['timestamp_first_active'].dt.month X['tfa_day']=X['timestamp_first_active'].dt.day X.signup_app.replace(['iOS','Android'],'SmartDevice',inplace=True) X.drop(['date_first_booking','date_account_created','timestamp_first_active','first_device_type','first_browser'],axis=1,inplace=True) return X def fit(self, X, y=None, **fit_params): return self<drop_column>
x=train.drop(['label'],axis=1) x_test=test.copy()
Digit Recognizer
4,081,536
session.drop(['action_detail','device_type'],inplace=True,axis=1) session.dropna(subset=['user_id','action'],inplace=True) session.action_type=session.action_type.fillna('Other') session.secs_elapsed=session.secs_elapsed.fillna(0 )<categorify>
x=x/255 x_test=x_test/255
Digit Recognizer
4,081,536
session_group=session.groupby(['user_id','action_type'] ).agg({'action':'count','secs_elapsed':'sum'} ).reset_index() session_df=pd.get_dummies(session_group,columns=['action_type'] ).groupby(['user_id'] ).sum().reset_index() session_df.head()<merge>
y=train['label']
Digit Recognizer
4,081,536
train_user_df=train_user.merge(session_df,left_on=['id'], right_on=['user_id'],how='left' ).drop(['user_id'],axis=1 ).reset_index(drop=True) train_user_df.shape<merge>
y=pd.Categorical(y )
Digit Recognizer
4,081,536
test_user_df=test_user.merge(session_df,left_on=['id'], right_on=['user_id'],how='left' ).drop(['user_id'],axis=1 ).reset_index(drop=True) test_user_df.shape<data_type_conversions>
y=pd.get_dummies(y )
Digit Recognizer