kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,619,265
df=test for c in df.columns: if df[c].dtype=='object': lbl = LabelEncoder() df[c]=df[c].fillna('N') lbl.fit(list(df[c].values)) df[c] = lbl.transform(df[c].values) test=df<drop_column>
model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adamax(lr=0.01, beta_1=0.49, beta_2=0.999), metrics=['accuracy'] )
Digit Recognizer
2,619,265
target = train['target'] data = train.drop(['target','id'],axis=1 )<split>
batch_size = 32 epochs = 100 time_id = time.strftime("%Y-%m-%d_%H-%M-%S") best_model_filename = 'mnist-inception-best-' + time_id + '.hdf5' pre_train_weights = model.get_layer('conv_1' ).get_weights() [0] pre_train_weights = pre_train_weights.transpose(3, 2, 0, 1) annealer = CosineAnneal(max_lr=0.014, min_lr=0.003, T=5, T_mul=1, decay_rate=0.99) chkpt = keras.callbacks.ModelCheckpoint(best_model_filename, monitor='val_acc', save_best_only=True, verbose=False) datagen = ImageDataGenerator( width_shift_range=2, height_shift_range=2, preprocessing_function=lambda x: elastic_transform(x, alpha_range=[8, 10], sigma=3) ) history = model.fit_generator( datagen.flow(X_train, y_train_smooth, batch_size=batch_size, shuffle=True), epochs=epochs, steps_per_epoch=(len(y_train)- 1)// batch_size + 1, validation_data=(X_val, y_val), callbacks=[annealer, chkpt] )
Digit Recognizer
2,619,265
def objective(trial,data=data,target=target): train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.2,random_state=42) param = { 'objective': trial.suggest_categorical('objective',['reg:tweedie']), 'tree_method': trial.suggest_categorical('tree_method',['hist']), 'lambda': trial.suggest_loguniform('lambda',1e-3,10.0), 'alpha': trial.suggest_loguniform('alpha',1e-3,10.0), 'colsample_bytree': trial.suggest_categorical('colsample_bytree', [0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]), 'subsample': trial.suggest_categorical('subsample', [0.4,0.5,0.6,0.7,0.8,1.0]), 'learning_rate': trial.suggest_categorical('learning_rate', [0.008,0.01,0.012,0.014,0.016,0.018,0.02]), 'n_estimators': trial.suggest_categorical('n_estimators', [1000,2000,4000,8000]), 'max_depth': trial.suggest_categorical('max_depth', [5,7,9,11,13,15,17,20]), 'random_state': trial.suggest_categorical('random_state', [24,48,2020]), 'min_child_weight': trial.suggest_int('min_child_weight', 1,300), 'use_label_encoder': trial.suggest_categorical('use_label_encoder',[False]) } model = xgb.XGBRegressor(**param) model.fit(train_x,train_y,eval_set=[(test_x,test_y)],early_stopping_rounds=100,verbose=False) preds = model.predict(test_x) rmse = mean_squared_error(test_y, preds,squared=False) return rmse<train_model>
model = keras.models.load_model(best_model_filename) evaluate_model(model, X_val, y_val, log=history.history, pre_train_weights=pre_train_weights )
Digit Recognizer
2,619,265
study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=16) print('Number of finished trials:', len(study.trials)) print('Best trial:', study.best_trial.params )<create_dataframe>
test_data = np.loadtxt('.. /input/test.csv', dtype=int, delimiter=',', skiprows=1 )
Digit Recognizer
2,619,265
study.trials_dataframe()<find_best_params>
test_scores = model.predict(test_images) test_predictions = np.argmax(test_scores, axis=1 )
Digit Recognizer
2,619,265
Best_trial=study.best_trial.params print(Best_trial )<load_from_csv>
header = 'ImageId,Label' submission = np.stack(( range(1, 28001), test_predictions), axis=1) np.savetxt('submission-' + time_id + '.csv', submission, fmt='%i', delimiter=',', header=header, comments='' )
Digit Recognizer
8,497,709
sample = pd.read_csv(".. /input/tabular-playground-series-feb-2021/sample_submission.csv") print(sample.shape )<find_best_model_class>
class SnapshotEnsemble(Callback): __snapshot_name_fmt = "snapshot_%d.hdf5" def __init__(self, n_models, n_epochs_per_model, lr_max, verbose=1): self.n_epochs_per_model = n_epochs_per_model self.n_models = n_models self.n_epochs_total = self.n_models * self.n_epochs_per_model self.lr_max = lr_max self.verbose = verbose self.lrs = [] def cosine_annealing(self, epoch): cos_inner =(math.pi *(epoch % self.n_epochs_per_model)) / self.n_epochs_per_model return self.lr_max / 2 *(math.cos(cos_inner)+ 1) def on_epoch_begin(self, epoch, logs={}): lr = self.cosine_annealing(epoch) backend.set_value(self.model.optimizer.lr, lr) self.lrs.append(lr) def on_epoch_end(self, epoch, logs={}): if(epoch + 1)% self.n_epochs_per_model == 0: filename = self.__snapshot_name_fmt %(( epoch + 1)// self.n_epochs_per_model) self.model.save(filename) if self.verbose: print('Epoch %d: snapshot saved to %s' %(epoch, filename)) def load_ensemble(self): models = [] for i in range(self.n_models): models.append(load_model(self.__snapshot_name_fmt %(i + 1))) return models
Digit Recognizer
8,497,709
preds = np.zeros(( sample.shape[0])) kf = KFold(n_splits=5,random_state=48,shuffle=True) for trn_idx, test_idx in kf.split(train[columns],target): X_tr,X_val=train[columns].iloc[trn_idx],train[columns].iloc[test_idx] y_tr,y_val=target.iloc[trn_idx],target.iloc[test_idx] model = xgb.XGBRegressor(**Best_trial) model.fit(X_tr,y_tr,eval_set=[(X_val,y_val)],early_stopping_rounds=100,verbose=False) preds+=model.predict(test[columns])/kf.n_splits rmse=mean_squared_error(y_val, model.predict(X_val),squared=False) print(rmse )<save_to_csv>
path = '/kaggle/input/digit-recognizer/' train = pd.read_csv(path + 'train.csv') test = pd.read_csv(path + 'test.csv') target = train['label'] train.drop(columns=['label'], inplace=True) im_size = 28 train = train.to_numpy().reshape(( -1, im_size, im_size, 1)) test = test.to_numpy().reshape(( -1, im_size, im_size, 1)) train = train / 255 test = test / 255 train.shape, test.shape
Digit Recognizer
8,497,709
subm = sample subm['target'] = preds subm.to_csv('submission.csv',index=False) subm<load_from_csv>
target = to_categorical(target) x_train, x_test, y_train, y_test = train_test_split(train, target, test_size=0.2, random_state=289) x_train.shape, x_test.shape
Digit Recognizer
8,497,709
warnings.filterwarnings("ignore") data = pd.read_csv(".. /input/tabular-playground-series-feb-2021/train.csv") test = pd.read_csv(".. /input/tabular-playground-series-feb-2021/test.csv") <load_from_csv>
model = Sequential() model.add(Conv2D(16, 3, activation='relu', padding='same', input_shape=(im_size, im_size, 1))) model.add(Dropout(0.5)) model.add(Conv2D(16, 3, activation='relu', padding='same')) model.add(Dropout(0.5)) model.add(Conv2D(16, 5, activation='relu', padding='same')) model.add(MaxPooling2D()) model.add(Conv2D(32, 3, activation='relu', padding='same')) model.add(Dropout(0.5)) model.add(Conv2D(32, 3, activation='relu', padding='same')) model.add(Dropout(0.5)) model.add(Conv2D(32, 5, activation='relu', padding='same')) model.add(MaxPooling2D()) model.add(Conv2D(64, 3, activation='relu', padding='same')) model.add(Dropout(0.5)) model.add(Conv2D(64, 3, activation='relu', padding='same')) model.add(Dropout(0.5)) model.add(Conv2D(64, 5, activation='relu', padding='same')) model.add(MaxPooling2D()) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['acc'] )
Digit Recognizer
8,497,709
train = pd.read_csv(DATA / "train.csv") test = pd.read_csv(DATA / "test.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv") print("train: {}, test: {}, sample sub: {}".format( train.shape, test.shape, smpl_sub.shape ))<prepare_output>
imagegen = ImageDataGenerator( rotation_range=15, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2 )
Digit Recognizer
8,497,709
data.set_index("id",inplace=True) test.set_index("id",inplace=True )<compute_test_metric>
se_callback = SnapshotEnsemble(n_models=7, n_epochs_per_model=50, lr_max=.001) history = model.fit_generator( imagegen.flow(x_train, y_train, batch_size=32), steps_per_epoch=len(x_train)/ 32, epochs=se_callback.n_epochs_total, verbose=0, callbacks=[se_callback], validation_data=(x_test, y_test) )
Digit Recognizer
8,497,709
def rmse(y_true, y_pred): return np.sqrt(np.mean(( y_true - y_pred)** 2))<categorify>
def predict(models, data, weights=None): if weights is None: weights = [1 /(len(models)) ] * len(models) pred = np.zeros(( data.shape[0], 10)) for i, model in enumerate(models): pred += model.predict(data)* weights[i] return pred def evaluate(preds, weights=None): if weights is None: weights = [1 / len(preds)] * len(preds) y_pred = np.zeros(( y_test.shape[0], 10)) for i, pred in enumerate(preds): y_pred += pred * weights[i] y_pred = np.argmax(y_pred, axis=1) y_true = np.argmax(y_test, axis=1) return accuracy_score(y_true, y_pred) models = se_callback.load_ensemble() preds = [] for i, model in enumerate(models): pred = predict([model], x_test) preds.append(pred) score = evaluate([pred]) print(f'model {i + 1}: accuracy = {score:.4f}') ensemble_score = evaluate(preds) print(f'ensemble: accuracy = {ensemble_score:.4f}' )
Digit Recognizer
8,497,709
def enc_scl_pipe(X_train, y_train, X_test, enc_method, scaler = StandardScaler()): X_train_encoded = X_train.copy() X_test_encoded= X_test.copy() feature_to_encode = X_train.columns[X_train.dtypes == 'O'].tolist() if enc_method == 'label': for feat in feature_to_encode: lbEncoder = LabelEncoder() lbEncoder.fit(X_train[feat]) X_train_encoded[feat] = lbEncoder.transform(X_train[feat]) X_test_encoded[feat] = lbEncoder.transform(X_test[feat]) elif enc_method == 'glmm': GLMMEncoder = ce.glmm.GLMMEncoder(verbose =0 ,binomial_target=False) GLMMEncoder.fit(X_train[feature_to_encode],y_train) X_train_encoded[feature_to_encode] = GLMMEncoder.transform(X_train[feature_to_encode]) X_test_encoded[feature_to_encode] = GLMMEncoder.transform(X_test[feature_to_encode]) else: raise 'No encoding method stated' scaler.fit(X_train_encoded) X_train_scaled = pd.DataFrame(scaler.transform(X_train_encoded), columns=X_train_encoded.columns, index=X_train_encoded.index) X_test_scaled = pd.DataFrame(scaler.transform(X_test_encoded), columns=X_test_encoded.columns, index=X_test_encoded.index) return X_train_scaled, X_test_scaled, feature_to_encode<compute_train_metric>
best_score = ensemble_score best_weights = None no_improvements = 0 while no_improvements < 5000: new_weights = np.random.uniform(size=(len(models),)) new_weights /= new_weights.sum() new_score = evaluate(preds, new_weights) if new_score > best_score: no_improvements = 0 best_score = new_score best_weights = new_weights print(f'improvement: {best_score:.4f}') else: no_improvements += 1 print(f'best weights are {best_weights}' )
Digit Recognizer
8,497,709
<init_hyperparams><EOS>
pred = predict(models, test, best_weights) res = pd.DataFrame() res['ImageId'] = np.arange(test.shape[0])+ 1 res['Label'] = np.argmax(pred, axis=1) res.to_csv('submission.csv', index=False) res.head(15 )
Digit Recognizer
3,266,938
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from keras.models import Sequential from keras.layers import Conv2D, Lambda, MaxPooling2D from keras.layers import Dense, Dropout, Flatten from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical
Digit Recognizer
3,266,938
def feature_engineering(data): new_data = data.copy() new_data['cat2p6'] = new_data['cat2'] + new_data['cat6'] new_data['cat6p1'] = new_data['cat6'] + new_data['cat1'] new_data['cat2p1'] = new_data['cat2'] + new_data['cat1'] new_data['cont0p8'] = new_data['cont0']*new_data['cont8'] new_data['cont0p5'] = new_data['cont0']*new_data['cont5'] new_data['cont11p8'] = new_data['cont11']*new_data['cont8'] return new_data new_train = feature_engineering(train) new_test = feature_engineering(test) print(new_train.shape) print(new_test.shape )<compute_train_metric>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') sub = pd.read_csv('.. /input/sample_submission.csv') print("Data are Ready!!" )
Digit Recognizer
3,266,938
print("lgb base CV scores label") model = lgb.LGBMRegressor(**lgb_params_base) lgb_train_oof, lgb_test_preds, lgb_all_scores = kfold_CV_pipe(train, target, test, model,enc_method = 'label', columns = train.columns) oof_score = rmse(target, lgb_train_oof) print(f"lgb oof score: {oof_score:.6f}") print("---------------------------------------------------------------") print("lgb base CV scores glmm") model = lgb.LGBMRegressor(**lgb_params_base) lgb_train_oof, lgb_test_preds, lgb_all_scores = kfold_CV_pipe(train, target, test, model,enc_method = 'glmm' ,columns = train.columns) oof_score = rmse(target, lgb_train_oof) print(f"lgb oof score: {oof_score:.6f}") print("---------------------------------------------------------------") print("lgb base new features CV scores") model = lgb.LGBMRegressor(**lgb_params_base) lgb_train_oof, lgb_test_preds, lgb_all_scores = kfold_CV_pipe(new_train, target, new_test, model,enc_method = 'glmm', columns = new_train.columns) oof_score = rmse(target, lgb_train_oof) print(f"lgb oof score: {oof_score:.6f}" )<compute_train_metric>
print(f"Training data size is {train.shape} Testing data size is {test.shape}" )
Digit Recognizer
3,266,938
print("---------------------------------------------------------------") print("===============================================================") print("---------------------------------------------------------------") print("lgb1 CV scores") model = lgb.LGBMRegressor(**lgb_params) lgb1_train_oof, lgb1_test_preds, lgb1_all_scores = kfold_CV_pipe(train, target, test, model,enc_method = 'glmm', columns = train.columns) oof_score = rmse(target, lgb1_train_oof) print(f"lgb1 oof score: {oof_score:.6f}") print("---------------------------------------------------------------") print("===============================================================") print("---------------------------------------------------------------") print("lgb2 CV scores") model = lgb.LGBMRegressor(**lgb_params_2) lgb2_train_oof, lgb2_test_preds, lgb2_all_scores = kfold_CV_pipe(train, target, test, model,enc_method = 'glmm', columns = train.columns) oof_score = rmse(target, lgb2_train_oof) print(f"lgb2 oof score: {oof_score:.6f}") print("---------------------------------------------------------------") print("===============================================================") print("---------------------------------------------------------------") print("xgb CV scores") model = xgb.XGBRegressor(**xgb_params) xgb_train_oof, xgb_test_preds, xgb_all_scores = kfold_CV_pipe(train, target, test, model,enc_method = 'glmm', columns = train.columns) oof_score = rmse(target, xgb_train_oof) print(f"xgb oof score: {oof_score:.6f}" )<save_to_csv>
X = train.drop(['label'], 1 ).values y = train['label'].values test_x = test.values
Digit Recognizer
3,266,938
sub = smpl_sub.copy() sub['target'] = lgb1_test_preds sub.to_csv("lgb1_submission.csv", index=False) <save_to_csv>
X = X / 255.0 test_x = test_x / 255.0
Digit Recognizer
3,266,938
sub = smpl_sub.copy() sub['target'] = lgb2_test_preds sub.to_csv("lgb2_submission.csv", index=False )<save_to_csv>
y = to_categorical(y) print(f"Label size {y.shape}" )
Digit Recognizer
3,266,938
sub = smpl_sub.copy() sub['target'] = xgb_test_preds sub.to_csv("xgb_submission.csv", index=False )<save_to_csv>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0 )
Digit Recognizer
3,266,938
weights = [0.2, 0.4, 0.4] oof_pred_wavg = weights[0]*lgb1_train_oof + weights[1]*lgb2_train_oof + weights[2]*xgb_train_oof oof_score_wavg = rmse(target, oof_pred_wavg) print(f"oof score weighted avg: {oof_score_wavg:.6f}") test_pred_wavg = weights[0]*lgb1_test_preds + weights[1]*lgb2_test_preds + weights[2]*xgb_test_preds sub = smpl_sub.copy() sub['target'] = test_pred_wavg sub.to_csv("wavg_submission.csv", index=False )<import_modules>
mean = np.mean(X_train) std = np.std(X_train) def standardize(x): return(x-mean)/std
Digit Recognizer
3,266,938
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import optuna from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split,KFold from sklearn.preprocessing import LabelEncoder<load_from_csv>
epochs = 50 batch_size = 64
Digit Recognizer
3,266,938
train=pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv') test=pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv' )<categorify>
model=Sequential() model.add(Conv2D(filters=64, kernel_size =(3,3), activation="relu", input_shape=(28,28,1))) model.add(Conv2D(filters=64, kernel_size =(3,3), activation="relu")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu")) model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(filters=256, kernel_size =(3,3), activation="relu")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(512,activation="relu")) model.add(Dense(10,activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] )
Digit Recognizer
3,266,938
cat_var=[f'cat{i}' for i in range(10)] cont_var=[f'cont{i}' for i in range(14)] columns=[ col for col in train.columns.tolist() if col not in ['id','target']] for cat in cat_var: le = LabelEncoder() train[cat]=le.fit_transform(train[cat]) test[cat]=le.transform(test[cat] )<prepare_x_and_y>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) train_gen = datagen.flow(X_train, y_train, batch_size=batch_size) test_gen = datagen.flow(X_test, y_test, batch_size=batch_size )
Digit Recognizer
3,266,938
X=train[columns] y=train.target<init_hyperparams>
history = model.fit_generator(train_gen, epochs = epochs, steps_per_epoch = X_train.shape[0] // batch_size, validation_data = test_gen, validation_steps = X_test.shape[0] // batch_size )
Digit Recognizer
3,266,938
lgb_params={'random_state': 2021, 'metric': 'rmse', 'n_estimators': 30000, 'n_jobs': -1, 'cat_feature': [x for x in range(len(cat_var)) ], 'bagging_seed': 2021, 'feature_fraction_seed': 2021, 'learning_rate': 0.003899156646724397, 'max_depth': 99, 'num_leaves': 63, 'reg_alpha': 9.562925363678952, 'reg_lambda': 9.355810045480153, 'colsample_bytree': 0.2256038826485174, 'min_child_samples': 290, 'subsample_freq': 1, 'subsample': 0.8805303688019942, 'max_bin': 882, 'min_data_per_group': 127, 'cat_smooth': 96, 'cat_l2': 19 }<init_hyperparams>
y_pred = model.predict(X_test) X_test__ = X_test.reshape(X_test.shape[0], 28, 28) fig, axis = plt.subplots(4, 4, figsize=(12, 14)) for i, ax in enumerate(axis.flat): ax.imshow(X_test__[i], cmap='binary') ax.set(title = f"Real Number is {y_test[i].argmax() } Predict Number is {y_pred[i].argmax() }");
Digit Recognizer
3,266,938
f1= 0.6547870667136243 f2= 2.6711351556035487 f3= 20 f4= 49 f5= 2<train_model>
pred = model.predict_classes(test_x, verbose=1 )
Digit Recognizer
3,266,938
%%time kf=KFold(n_splits=5,random_state=48,shuffle=True) preds = np.zeros(test.shape[0]) rmse=[] i=0 for idx_train,idx_test in kf.split(X,y): X_train,X_test=X.iloc[idx_train],X.iloc[idx_test] y_train,y_test=y.iloc[idx_train],y.iloc[idx_test] model=LGBMRegressor(**lgb_params) model.fit(X_train,y_train,eval_set=(X_test,y_test),early_stopping_rounds=300,verbose=False,eval_metric='rmse') predictions=model.predict(X_test,num_iteration=model.best_iteration_) rmse.append(mean_squared_error(y_test,predictions,squared=False)) print('First Round:') print(f'RMSE {rmse[i]}') rmse_tuned=[] params = lgb_params.copy() for t in range(1,17): if t >2: params['reg_lambda'] *= f1 params['reg_alpha'] += f2 params['num_leaves'] += f3 params['min_child_samples'] -= f4 params['cat_smooth'] -= f5 params['learning_rate']=0.003 if params['min_child_samples']<1: params['min_child_samples']=1 if t>11: params['learning_rate']=0.001 model=LGBMRegressor(**params ).fit(X_train,y_train,eval_set=(X_test,y_test),eval_metric='rmse',early_stopping_rounds=200,verbose=False,init_model=model) predictions=model.predict(X_test, num_iteration= model.best_iteration_) rmse_tuned.append(mean_squared_error(y_test,predictions,squared=False)) print(f'RMSE tuned {t}: {rmse_tuned[t-1]}') print(f'Improvement of {rmse[i]-rmse_tuned[t-1]}') preds+=model.predict(test[columns],num_iteration=model.best_iteration_)/kf.n_splits i+=1<save_to_csv>
sub['Label'] = pred sub.to_csv("CNN_keras_sub.csv", index=False) sub.head()
Digit Recognizer
4,385,369
test['target']=preds test=test[['id','target']] test.to_csv('submission.csv',index=False )<import_modules>
from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels
Digit Recognizer
4,385,369
import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error from lightgbm import LGBMRegressor<load_from_csv>
from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.optimizers import RMSprop, Adam from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
4,385,369
train = pd.read_csv(".. /input/tabular-playground-series-feb-2021/train.csv") test = pd.read_csv(".. /input/tabular-playground-series-feb-2021/test.csv" )<define_variables>
df = pd.read_csv('.. /input/train.csv')
Digit Recognizer
4,385,369
category_features = [ "cat0", "cat1", "cat2", "cat3", "cat4", "cat5", "cat6", "cat7", "cat8", "cat9" ] continous_features = [ "cont0", "cont1", "cont2", "cont3", "cont4", "cont5", "cont6", "cont7", "cont8", "cont9", "cont10", "cont11", "cont12", "cont13" ] all_features = category_features + continous_features <categorify>
test_df = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,385,369
for feature in category_features: encoder = LabelEncoder() encoder.fit(train[feature]) train[feature] = pd.Series(encoder.transform(train[feature]), dtype="category") test[feature] = pd.Series(encoder.transform(test[feature]), dtype="category" )<init_hyperparams>
Y = df['label'] X = df.drop(['label'], axis = 1 )
Digit Recognizer
4,385,369
PARAMS = { 'objective': 'regression', 'metric': 'rmse', 'boosting': 'gbdt', 'num_iterations': 5000, 'learning_rate': 0.02, 'num_leaves': 15, 'min_data_in_leaf': 1000, 'feature_fraction': 0.3, 'lambda_l2': 0.001 } <prepare_x_and_y>
X = X / 255.0 X = X.values.reshape(-1,28,28,1) Y = np.array(Y) Y = to_categorical(Y, num_classes = 10 )
Digit Recognizer
4,385,369
PARAMS_KFOLD = PARAMS.copy() PARAMS_KFOLD.update({ 'early_stopping_round': 500 }) def rmse_kfold(parameters, n_fold): kfold = KFold(n_splits=n_fold) rmse_kfold = np.zeros(n_fold) for index,(train_index, validation_index)in enumerate(kfold.split(train)) : X_train, X_validation = train[all_features].iloc[train_index], train[all_features].iloc[validation_index] y_train, y_validation = train['target'].iloc[train_index], train['target'].iloc[validation_index] model = LGBMRegressor(**parameters) model.fit(X_train, y_train, eval_set = [(X_validation, y_validation)], verbose = -1) pred_validation = model.predict(X_validation) rmse_kfold[index] = mean_squared_error(y_validation, pred_validation, squared=False) rmse_average = np.average(rmse_kfold) return rmse_average<define_search_space>
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, stratify = Y, random_state = 31, test_size = 0.2 )
Digit Recognizer
4,385,369
num_leaves_range = [2**3-1, 2**4-1, 2**5-1, 2**6-1] min_data_in_leaf_range = [2000, 1000, 500, 250] feature_fraction_range = [0.3] N_FOLD = 10 rmse_grid_search = [] for num_leaves in num_leaves_range: for min_data_in_leaf in min_data_in_leaf_range: for feature_fraction in feature_fraction_range: params = PARAMS_KFOLD.copy() params.update({ 'num_leaves': num_leaves, 'min_data_in_leaf': min_data_in_leaf, 'feature_fraction': feature_fraction }) rmse = rmse_kfold(params, N_FOLD) rmse_grid_search.append(( rmse, params)) print("parameters " + str(params)) print("rmse: " + str(rmse))<find_best_params>
model = Sequential()
Digit Recognizer
4,385,369
PARAMS_OPTIMIZED = sorted(rmse_grid_search, key=lambda o1: o1[0])[0][1]<train_model>
model.add(Conv2D(32,(5,5),padding = 'Same',activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(64,(3,3), padding = 'same', activation = 'relu')) model.add(MaxPool2D(pool_size =(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(128,(3,3), padding = 'same', activation = 'relu')) model.add(MaxPool2D(pool_size =(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64,(3,3), padding = 'same', activation = 'relu')) model.add(MaxPool2D(pool_size =(2,2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(256, activation = 'relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation = 'softmax'))
Digit Recognizer
4,385,369
PARAMS_PRED = PARAMS_OPTIMIZED.copy() PARAMS_PRED.pop('early_stopping_round', None) print('parameters for final model ' + str(PARAMS_PRED)) model = LGBMRegressor(**PARAMS_PRED) model.fit(train[all_features], train['target'] )<save_to_csv>
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, brightness_range=None, shear_range=0.1, zoom_range=0.15, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0, dtype=None )
Digit Recognizer
4,385,369
pred = model.predict(test[all_features]) submission = pd.DataFrame({'id': test['id'], 'target': pred}) submission.to_csv('submission.csv', index=False )<load_from_csv>
datagen.fit(X_train )
Digit Recognizer
4,385,369
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2021/train.csv') test_df = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2021/test.csv') <filter>
model.compile(optimizer = "Nadam", loss = "categorical_crossentropy", metrics = ["accuracy"] )
Digit Recognizer
4,385,369
<concatenate>
max_epochs = 30 batch_size = 256
Digit Recognizer
4,385,369
df = pd.concat([train_df.drop(['target','id'],axis=1),test_df.drop(['id'],axis=1)],axis=0) df.head()<count_missing_values>
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size = batch_size), epochs = max_epochs, verbose = 1, validation_data =(X_test, Y_test), steps_per_epoch=X_train.shape[0] // batch_size )
Digit Recognizer
4,385,369
df.isnull().sum()<string_transform>
test_df = test_df / 255.0 test_arr = test_df.values.reshape(-1,28,28,1 )
Digit Recognizer
4,385,369
def divideFeatures(df): numerical_features = df.select_dtypes(include=[np.number]) categorical_features = df.select_dtypes(include=[np.object]) return numerical_features, categorical_features<feature_engineering>
Y_pred_arr = model.predict(test_arr )
Digit Recognizer
4,385,369
cont_features, cat_features =divideFeatures(df )<import_modules>
Y_pred_arr = np.argmax(Y_pred_arr, axis = 1 )
Digit Recognizer
4,385,369
<categorify><EOS>
results = pd.Series(Y_pred_arr,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
1,349,231
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10) annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x, verbose=0) styles=[':','-.','--','-',':','-.','--','-',':','-.','--','-']
Digit Recognizer
1,349,231
def lencode(df): for i in df.columns: for k in cat_features.columns: if i ==k: df[i]= le.fit_transform(df[i]) <prepare_x_and_y>
nets = 3 model = [0] *nets for j in range(3): model[j] = Sequential() model[j].add(Conv2D(24,kernel_size=5,padding='same',activation='relu', input_shape=(28,28,1))) model[j].add(MaxPool2D()) if j>0: model[j].add(Conv2D(48,kernel_size=5,padding='same',activation='relu')) model[j].add(MaxPool2D()) if j>1: model[j].add(Conv2D(64,kernel_size=5,padding='same',activation='relu')) model[j].add(MaxPool2D(padding='same')) model[j].add(Flatten()) model[j].add(Dense(256, activation='relu')) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
X_train = df[:300000] Xf_test = df[300000:] y = train_df['target']<split>
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.333) history = [0] * nets names = ["(C-P)x1","(C-P)x2","(C-P)x3"] epochs = 20 for j in range(nets): history[j] = model[j].fit(X_train2,Y_train2, batch_size=80, epochs = epochs, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
Digit Recognizer
1,349,231
X_train, X_test, y_train, y_test = train_test_split(X_train, y, test_size=0.1, random_state=42 )<choose_model_class>
nets = 6 model = [0] *nets for j in range(6): model[j] = Sequential() model[j].add(Conv2D(j*8+8,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(MaxPool2D()) model[j].add(Conv2D(j*16+16,kernel_size=5,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Flatten()) model[j].add(Dense(256, activation='relu')) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
linear = LinearRegression() random = RandomForestRegressor(n_estimators=100,random_state=42,n_jobs=-1,max_features=4) <choose_model_class>
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.333) history = [0] * nets names = ["8 maps","16 maps","24 maps","32 maps","48 maps","64 maps"] epochs = 20 for j in range(nets): history[j] = model[j].fit(X_train2,Y_train2, batch_size=80, epochs = epochs, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
Digit Recognizer
1,349,231
xgbr = xgb.XGBRegressor(tree_method='gpu_hist') <find_best_params>
nets = 8 model = [0] *nets for j in range(8): model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(MaxPool2D()) model[j].add(Conv2D(64,kernel_size=5,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Flatten()) if j>0: model[j].add(Dense(2**(j+4), activation='relu')) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
def objective(trial): params = { 'random_state': 0, 'n_estimators': trial.suggest_categorical('n_estimators', [10000]), 'max_depth': trial.suggest_int('max_depth', 3, 8), 'learning_rate': trial.suggest_float('learning_rate', 0.001, 1.0), 'reg_lambda': trial.suggest_float('reg_lambda', 0.0, 10), 'reg_alpha': trial.suggest_float('reg_alpha', 0.0, 10), 'gamma': trial.suggest_float('gamma', 0.0, 10), 'subsample': trial.suggest_categorical('subsample', [0.8, 0.9, 1.0]), 'colsample_bytree': trial.suggest_categorical('colsample_bytree', [0.1, 0.2, 0.3, 0.4, 0.5]), 'tree_method':'gpu_hist' } model = xgb.XGBRegressor(**params) model.fit(X_train, y_train, eval_set=[(X_test,y_test)], early_stopping_rounds=1000, verbose=0) y_pred = model.predict(X_test) rmse = mean_squared_error(y_test, y_pred, squared=False) return rmse<find_best_params>
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.333) history = [0] * nets names = ["0N","32N","64N","128N","256N","512N","1024N","2048N"] epochs = 20 for j in range(nets): history[j] = model[j].fit(X_train2,Y_train2, batch_size=80, epochs = epochs, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
Digit Recognizer
1,349,231
%%time study = optuna.create_study(direction='minimize',sampler=optuna.samplers.TPESampler(seed=0)) study.optimize(objective, n_trials=100) print('Number of finished trials:', len(study.trials)) print('Best parameters:', study.best_trial.params) print('Best RMSE:', study.best_trial.value )<train_model>
nets = 8 model = [0] *nets for j in range(8): model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(MaxPool2D()) model[j].add(Dropout(j*0.1)) model[j].add(Conv2D(64,kernel_size=5,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Dropout(j*0.1)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(j*0.1)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
params = study.best_params params['random_state'] = 0 params['n_estimators'] = 10000 params['tree_method'] = 'gpu_hist' model3 = xgb.XGBRegressor(**params) model3.fit(X_train,y_train,eval_set=[(X_test, y_test)],early_stopping_rounds=1000,verbose=2) <train_model>
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.333) history = [0] * nets names = ["D=0","D=0.1","D=0.2","D=0.3","D=0.4","D=0.5","D=0.6","D=0.7"] epochs = 30 for j in range(nets): history[j] = model[j].fit(X_train2,Y_train2, batch_size=80, epochs = epochs, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
Digit Recognizer
1,349,231
model = make_pipeline(linear) model.fit(X_train,y_train) model2 = make_pipeline(xgbr) model2.fit(X_train,y_train )<choose_model_class>
nets = 5 model = [0] *nets j=0 model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(MaxPool2D()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64,kernel_size=5,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Dropout(0.4)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) j=1 model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1))) model[j].add(Conv2D(32,kernel_size=3,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Dropout(0.4)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) j=2 model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64,kernel_size=5,activation='relu')) model[j].add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) j=3 model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
models ={'linear':model} models['xgbr'] = model2 models['optuna_xbg'] =model3<compute_test_metric>
j=4 model[j] = Sequential() model[j].add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64,kernel_size=3,activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
1,349,231
def rmse(y_test,y_pred): rmse= np.sqrt(mean_squared_error(y_test,y_pred)) return rmse <find_best_model_class>
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.2) history = [0] * nets names = ["basic","32C3-32C3","32C5S2","both+BN","both+BN+DA"] epochs = 35 for j in range(nets-1): history[j] = model[j].fit(X_train2,Y_train2, batch_size=64, epochs = epochs, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc']))) datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1) j = nets-1 history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=64), epochs = epochs, steps_per_epoch = X_train2.shape[0]//64, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
Digit Recognizer
1,349,231
def cross_val_rmse(model): model =clone(model) five_fold = KFold(n_splits=5) rmse_val =[] for tr_ind,val_ind in five_fold.split(X_train): model.fit(X_train.iloc[tr_ind,:],y.iloc[tr_ind]) rmse_val.append(rmse(y.iloc[val_ind],model.predict(X_train.iloc[val_ind,:]))) return np.mean(rmse_val )<compute_test_metric>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 **(x+epochs)) model[4].fit_generator(datagen.flow(X_train,Y_train, batch_size=64), epochs = 25, steps_per_epoch = X_train.shape[0]//64, callbacks=[annealer], verbose=0) results = model[4].predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("MNIST-CNN.csv",index=False )
Digit Recognizer
10,844,943
y_pred = model3.predict(X_test) y_pred =np.round(y_pred,6) print('min',y_pred.min()) print('max',y_pred.max()) RMSE = np.sqrt(mean_squared_error(y_test,y_pred)) print('RMSE',RMSE) print(y_pred.tolist() )<predict_on_test>
import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Input, Conv2D, Dense, Activation, Flatten, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPooling2D, AveragePooling2D, BatchNormalization, LeakyReLU, Concatenate from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.utils import to_categorical from tensorflow.keras import regularizers, optimizers from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
Digit Recognizer
10,844,943
ys_pred =model3.predict(Xf_test) ys_pred =np.round(ys_pred,6) ys_pred.tolist()<save_to_csv>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
10,844,943
sub = pd.DataFrame({'id':test_df.id,'target':ys_pred}) sub.to_csv('TPS.csv',index=False) sub.head()<create_dataframe>
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.1, random_state=1337 )
Digit Recognizer
10,844,943
%matplotlib inline dfk = pd.DataFrame({ 'Kernel ID': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U'], 'Score': [ 0.84362, 0.84358, 0.84352, 0.84318, 0.84310, 0.84303, 0.84266, 0.84251, 0.84245, 0.84243, 0.84233, 0.84229, 0.84221, 0.84220, 0.84209, 0.84207, 0.84202, 0.84200, 0.84199, 0.84198, 0.84193], 'File Path': ['.. /input/feb84362/FEB84362.csv', '.. /input/feb84358/FEB84358.csv', '.. /input/feb84352/FEB84352.csv', '.. /input/feb84318/FEB84318.csv', '.. /input/feb84310/FEB84310.csv', '.. /input/feb84303/FEB84303.csv', '.. /input/feb84266/FEB84266.csv', '.. /input/feb84251/FEB84251.csv', '.. /input/feb84245/FEB84245.csv', '.. /input/feb84243/FEB84243.csv', '.. /input/feb84233/FEB84233.csv' , '.. /input/feb84229/FEB84229.csv', '.. /input/feb84221/FEB84221.csv', '.. /input/feb84220/FEB84220.csv', '.. /input/feb84209/FEB84209.csv', '.. /input/feb84207/FEB84207.csv', '.. /input/feb84202/FEB84202.csv', '.. /input/feb84200/FEB84200.csv', '.. /input/feb84199/FEB84199.csv', '.. /input/feb84198/FEB84198.csv', '.. /input/feb84193/FEB84193.csv'] }) dfk<categorify>
rlr = ReduceLROnPlateau(monitor='accuracy', mode = 'max', factor=0.5, min_lr=1e-7, verbose = 1, patience=5) es = EarlyStopping(monitor='accuracy', mode='max', verbose = 1, patience=50) mc = ModelCheckpoint('cnn_best_model.h5', monitor='accuracy', mode='max', verbose = 1, save_best_only=True )
Digit Recognizer
10,844,943
def generate(main, support, coeff): g = main.copy() for i in main.columns[1:]: res = [] lm, Is = [], [] lm = main[i].tolist() ls = support[i].tolist() for j in range(len(main)) : res.append(( lm[j] * coeff)+(ls[j] *(1.- coeff))) g[i] = res return g <load_from_csv>
def build_model(lr = 0, mt = 0, dr = 0): model = Sequential(name = 'cnn_mnist') model.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation = "relu")) model.add(Dropout(dr)) model.add(Dense(10, activation = "softmax")) opt = optimizers.SGD(lr = lr, momentum = mt) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model
Digit Recognizer
10,844,943
support = pd.read_csv(dfk.iloc[0, 2]) for k in range(1, 6): main = pd.read_csv(dfk.iloc[k, 2]) support = generate(main, support, 0.60) sub1 = support<load_from_csv>
model = build_model(lr = 0.01, mt = 0.9, dr = 0.5 )
Digit Recognizer
10,844,943
support = sub1 for k in range(6, 14): main = pd.read_csv(dfk.iloc[k, 2]) support = generate(main, support, 0.50) sub2 = support<compute_test_metric>
datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=False, vertical_flip=False ) datagen.fit(X_train )
Digit Recognizer
10,844,943
sub2 = comparison2(sub2, 10, 0.9970, 1.0000 )<load_from_csv>
model.fit_generator(datagen.flow(X_train, y_train, batch_size = 64), validation_data =(X_valid, y_valid), steps_per_epoch = X_train.shape[0] // 64, epochs = 400, verbose = 2, callbacks = [rlr, es, mc] )
Digit Recognizer
10,844,943
<compute_test_metric><EOS>
saved_model = load_model('cnn_best_model.h5') y_pred = saved_model.predict_classes(X_test, verbose=0) submissions=pd.DataFrame({"ImageId": list(range(1, len(y_pred)+1)) , "Label": y_pred}) submissions.to_csv("submission.csv", index=False )
Digit Recognizer
10,594,850
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_addons as tfa import PIL from sklearn.model_selection import train_test_split
Digit Recognizer
10,594,850
main = pd.read_csv(dfk.iloc[20, 2]) sub4 = generate(main, sub3, 0.60 )<compute_test_metric>
ROOT_DIR = '/kaggle/input/digit-recognizer' train_df = pd.read_csv(os.path.join(ROOT_DIR, 'train.csv')) test_df = pd.read_csv(os.path.join(ROOT_DIR, 'test.csv'))
Digit Recognizer
10,594,850
sub4 = comparison(sub4, 17, 0.9955, 0.9985 )<load_from_csv>
feature_train = train_df.to_numpy() [:,1:] label_train = train_df.to_numpy() [:,0] feature_test = test_df.to_numpy()
Digit Recognizer
10,594,850
original = pd.read_csv(".. /input/feb84186/FEB84186.csv") sub5 = comparison5(sub4, original, 18, 1.0000, 0.9960) <save_to_csv>
feature_train, feature_val, label_train, label_val = train_test_split(feature_train, label_train, test_size=0.2, stratify=label_train )
Digit Recognizer
10,594,850
sub = sub5 sub.to_csv("submission.csv", index=False) sub1.to_csv("submission1.csv", index=False) sub2.to_csv("submission2.csv", index=False) sub3.to_csv("submission3.csv", index=False) sub4.to_csv("submission4.csv", index=False) sub5.to_csv("submission5.csv", index=False) !ls<set_options>
print('Train size: {}, validation size: {}'.format(len(label_train), len(label_val)) )
Digit Recognizer
10,594,850
optuna.logging.set_verbosity(optuna.logging.CRITICAL )<load_from_csv>
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=25, height_shift_range=3, width_shift_range=3 )
Digit Recognizer
10,594,850
data = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv', index_col=0) test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv', index_col=0) preds = data.columns[:-1] target = data.columns[-1]<data_type_conversions>
BATCH_SIZE = 64 train_ds = train_datagen.flow(feature_train, label_train, batch_size=BATCH_SIZE) val_ds = tf.data.Dataset.from_tensor_slices(( feature_val, label_val)).batch(BATCH_SIZE ).prefetch(1) test_ds = tf.data.Dataset.from_tensor_slices(feature_test ).batch(BATCH_SIZE ).prefetch(1 )
Digit Recognizer
10,594,850
cat_cols = [col for col in preds if 'cat' in col] data[cat_cols] = data[cat_cols].astype('category') test[cat_cols] = test[cat_cols].astype('category' )<train_model>
layers = [ tf.keras.layers.Conv2D(16, kernel_size=3, strides=1, padding='same', input_shape=(28,28,1)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.ReLU() , tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(32, kernel_size=3, strides=1, padding='valid'), tf.keras.layers.BatchNormalization() , tf.keras.layers.ReLU() , tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(64, kernel_size=5, strides=1, padding='valid'), tf.keras.layers.BatchNormalization() , tf.keras.layers.ReLU() , tf.keras.layers.Dropout(0.25), tf.keras.layers.MaxPool2D(strides=2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.25), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.25), tf.keras.layers.Dense(10, activation='softmax') ] model = tf.keras.Sequential(layers=layers) lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, decay_steps=3000, decay_rate=0.95) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_ds, epochs=40, validation_data=val_ds )
Digit Recognizer
10,594,850
<find_best_params><EOS>
pred_test = np.argmax(model.predict(test_ds), axis=1) out_df = pd.DataFrame({'ImageId': np.arange(1, len(pred_test)+1), 'Label': pred_test}) out_df.to_csv('output.csv', index=False )
Digit Recognizer
10,475,153
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<find_best_score>
import itertools import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from tensorflow.keras import Sequential from tensorflow.keras.layers import Flatten, Dense, Conv2D, Lambda, MaxPooling2D, Dropout, BatchNormalization from tensorflow.keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split
Digit Recognizer
10,475,153
study.best_value<find_best_params>
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
10,475,153
best_params = study.best_params best_params<prepare_x_and_y>
train_df.sample(frac=1) cols = list(train_df.columns) cols.remove('label') X = train_df[cols] Y = train_df['label'] X_train, X_dev, Y_train, Y_dev = train_test_split(X, Y, test_size=0.1, random_state=0) X_test = test_df[cols]
Digit Recognizer
10,475,153
def objective(trial): hyper_params = { 'num_leaves': trial.suggest_int('num_leaves', 1, 63), 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf ', 1, 100) } scores = [] kf = KFold(5) for i,(train_idx, test_idx)in enumerate(kf.split(data)) : X_train = data.iloc[train_idx][preds] y_train = data.iloc[train_idx][target] X_test = data.iloc[test_idx][preds] y_test = data.iloc[test_idx][target] hyper_params.update(best_params) estimator = LGBMRegressor(**hyper_params) estimator.fit(X_train, y_train, eval_set=(X_test, y_test), eval_metric='rmse', verbose=0) y_pred = estimator.predict(X_test) rmse = mean_squared_error(y_test, y_pred, squared=False) scores.append(rmse) return np.mean(scores )<find_best_params>
X_train = X_train.values.reshape(-1, 28, 28) X_dev = X_dev.values.reshape(-1, 28, 28) X_test = X_test.values.reshape(-1, 28, 28 )
Digit Recognizer
10,475,153
study = optuna.create_study(direction='minimize') study.optimize(objective, timeout=3600*2.5 )<find_best_score>
X_train = np.expand_dims(X_train, axis=-1)/ 255 X_dev = np.expand_dims(X_dev, axis=-1)/ 255 X_test = np.expand_dims(X_test, axis=-1)/ 255
Digit Recognizer
10,475,153
study.best_value<find_best_params>
def data_augmentation(x_data, y_data, batch_size): datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False ) datagen.fit(x_data) train_data = datagen.flow(x_data, y_data, batch_size=batch_size, shuffle=True) return train_data
Digit Recognizer
10,475,153
best_params.update(study.best_params) best_params<prepare_x_and_y>
BATCH_SIZE = 64 aug_train_data = data_augmentation(X_train, Y_train, BATCH_SIZE )
Digit Recognizer
10,475,153
def objective(trial): hyper_params = { 'bagging_freq': trial.suggest_int('bagging_freq', 1, 100), 'bagging_fraction': trial.suggest_float('bagging_fraction ', 0, 1.0), 'feature_fraction': trial.suggest_float('feature_fraction', 0, 1.0) } scores = [] kf = KFold(5) for i,(train_idx, test_idx)in enumerate(kf.split(data)) : X_train = data.iloc[train_idx][preds] y_train = data.iloc[train_idx][target] X_test = data.iloc[test_idx][preds] y_test = data.iloc[test_idx][target] hyper_params.update(best_params) estimator = LGBMRegressor(**hyper_params) estimator.fit(X_train, y_train, eval_set=(X_test, y_test), eval_metric='rmse', verbose=0) y_pred = estimator.predict(X_test) rmse = mean_squared_error(y_test, y_pred, squared=False) scores.append(rmse) return np.mean(scores )<find_best_params>
layers = [ Conv2D(filters=96, kernel_size=(11, 11), strides=2, activation='relu', input_shape=(28, 28, 1)) , MaxPooling2D(pool_size=(3, 3), strides=2), Conv2D(filters=256, kernel_size=(5, 5), padding='same', activation='relu'), Flatten() , Dense(9216, activation='relu'), Dense(4096, activation='relu'), Dense(4096, activation='relu'), Dense(10, activation='softmax'), ]
Digit Recognizer
10,475,153
study = optuna.create_study(direction='minimize') study.optimize(objective, timeout=3600*2 )<find_best_score>
model = Sequential(layers) optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) model.compile( optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) callbacks = [ tf.keras.callbacks.ReduceLROnPlateau(monitor="loss",factor=0.1, patience=2, min_lr=0.000001, verbose=1), ] hist = model.fit( aug_train_data, steps_per_epoch=X_train.shape[0] // BATCH_SIZE, batch_size=BATCH_SIZE, validation_data=(X_dev, Y_dev), epochs=50, callbacks=callbacks )
Digit Recognizer
10,475,153
study.best_value<find_best_params>
predictions = Y_pred print(predictions[0]) print('Predicted digit is: ' + str(np.argmax(predictions[0]))) print('Accuracy is: ' + str(np.max(predictions[0] * 100)) + '%') plt.imshow(X_dev[0].reshape(( 28, 28)) , cmap=plt.cm.binary )
Digit Recognizer
10,475,153
best_params.update(study.best_params) best_params<prepare_x_and_y>
predictions = model.predict(X_test )
Digit Recognizer
10,475,153
def objective(trial): hyper_params = { 'lambda_l1': trial.suggest_float('lambda_l1', 1E-12, 20, log=True), 'lambda_l2': trial.suggest_float('lambda_l2', 1E-12, 20, log=True) } scores = [] kf = KFold(5) for i,(train_idx, test_idx)in enumerate(kf.split(data)) : X_train = data.iloc[train_idx][preds] y_train = data.iloc[train_idx][target] X_test = data.iloc[test_idx][preds] y_test = data.iloc[test_idx][target] hyper_params.update(best_params) estimator = LGBMRegressor(**hyper_params) estimator.fit(X_train, y_train, eval_set=(X_test, y_test), eval_metric='rmse', verbose=0) y_pred = estimator.predict(X_test) rmse = mean_squared_error(y_test, y_pred, squared=False) scores.append(rmse) return np.mean(scores )<find_best_params>
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') submission.head()
Digit Recognizer
10,475,153
study = optuna.create_study(direction='minimize') study.optimize(objective, timeout=3600*2 )<find_best_score>
for i in submission.index: submission['Label'][i] = np.argmax(predictions[i] )
Digit Recognizer
10,475,153
study.best_value<find_best_params>
submission.to_csv("sample_submission.csv", index=False )
Digit Recognizer
10,475,153
best_params.update(study.best_params) best_params<train_model>
model.save('model') model.save('model.h5' )
Digit Recognizer
9,877,165
k = 10 test[target] = 0 scores = [] kf = KFold(k) for i,(train_idx, test_idx)in enumerate(kf.split(data)) : X_train = data.iloc[train_idx][preds] y_train = data.iloc[train_idx][target] X_test = data.iloc[test_idx][preds] y_test = data.iloc[test_idx][target] best_params['learning_rate'] = 0.005 best_params['n_estimators'] = 100000 estimator = LGBMRegressor(**best_params) estimator.fit(X_train, y_train, eval_set=(X_test, y_test), eval_metric='rmse', early_stopping_rounds=1000, verbose=1000) y_pred = estimator.predict(X_test) rmse = mean_squared_error(y_test, y_pred, squared=False) scores.append(rmse) test[target] += estimator.predict(test[preds])/ k test[target].to_csv('submission.csv' )<compute_test_metric>
def l1_reg(weight_matrix): return 0.01 * K.sum(K.abs(weight_matrix)) tf.keras.backend.clear_session()
Digit Recognizer
9,877,165
print(f"Expected score: {np.mean(scores)}" )<load_from_csv>
NB_EPOCH = 40 BATCH_SIZE = 32 VERBOSE = 1 NB_CLASSES = 10 OPTIMIZER = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0) N_HIDDEN = 128 VALIDATION_SPLIT = 0.05 DROPOUT = 0.3 RESHAPED =(28, 28, 1 )
Digit Recognizer
9,877,165
train = pd.read_csv(".. /input/titanic/train.csv") test = pd.read_csv(".. /input/titanic/test.csv") submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' )<concatenate>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
9,877,165
df = pd.concat([train, test], axis=0) df = df.set_index('PassengerId') df.info()<drop_column>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
9,877,165
df = df.drop(['Name', 'Ticket', 'Cabin'], axis=1) <drop_column>
x_train = train.drop(labels=["label"], axis=1) x_train = x_train / 255.0 x_train = x_train.values.reshape(-1, 28, 28, 1) y_train = train["label"] y_train = np.array(to_categorical(y_train, num_classes=10)) x_test = test / 255.0 x_test = x_test.values.reshape(-1, 28, 28, 1) print(x_train.shape, y_train.shape, x_test.shape )
Digit Recognizer
9,877,165
df = df.drop('Fare', axis=1 )<categorify>
( x_train_mnist, y_train_mnist),(x_val_mnist, y_val_mnist)= mnist.load_data() x_train_mnist = np.concatenate(( x_train_mnist, x_val_mnist)) y_train_mnist = np.concatenate(( y_train_mnist, y_val_mnist)) x_train_mnist = x_train_mnist.reshape(( x_train_mnist.shape[0], 28, 28, 1)) X_train_mnist = x_train_mnist / 255 y_train_mnist = np.array(to_categorical(y_train_mnist.reshape(( y_train_mnist.shape[0], 1)) ,num_classes=10)) print(x_train_mnist.shape, y_train_mnist.shape )
Digit Recognizer