kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,953,593
most_common = all_data.Embarked.mode() print("Most common Embarked value: {0}".format(most_common[0])) for data in [train_data, test_data]: data.fillna(value={'Embarked': most_common[0]}, inplace=True )<drop_column>
subs=pd.DataFrame({"ImageId":ImageId,"Label":class_score} )
Digit Recognizer
10,953,593
<categorify><EOS>
subs.to_csv("submission.csv",index=False) subs.head(3 )
Digit Recognizer
10,770,776
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
import tensorflow as tf import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import VGG16 from keras.models import Model from keras.layers import Dense from keras.layers import Flatten from keras.models import Sequential from keras.optimizers import SGD, Adam import matplotlib.pyplot as plt from keras import Input from keras.utils import to_categorical from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
10,770,776
X = train_data.drop(['Survived', 'PassengerId'], axis=1) y = train_data['Survived'] test_X = test_data.drop(['PassengerId'], axis=1 )<train_on_grid>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') target = train['label'] train = train.drop(['label'], axis=1)
Digit Recognizer
10,770,776
best_models = {} train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) def print_best_parameters(hyperparameters, best_parameters): value = "Best parameters: " for key in hyperparameters: value += str(key)+ ": " + str(best_parameters[key])+ ", " if hyperparameters: print(value[:-2]) def get_best_model(estimator, hyperparameters): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) grid_search = GridSearchCV(estimator=estimator, param_grid=hyperparameters, n_jobs=-1, cv=cv, scoring="accuracy") best_model = grid_search.fit(train_X, train_y) best_parameters = best_model.best_estimator_.get_params() print_best_parameters(hyperparameters, best_parameters) return best_model def evaluate_model(model, name): print("Accuracy score:", accuracy_score(train_y, model.predict(train_X))) best_models[name] = model<train_model>
full = pd.concat([train, test]) full=full.to_numpy() full=full.reshape(-1, 28, 28) full.shape
Digit Recognizer
10,770,776
class MyXGBClassifier(XGBClassifier): def fit(self, X, y=None): return super(XGBClassifier, self ).fit(X, y, verbose=False, early_stopping_rounds=40, eval_metric='logloss', eval_set=[(val_X, val_y)] )<choose_model_class>
full = np.pad(full,(( 0,0),(2,2),(2,2)) , mode='constant') full = stacked_img = np.stack([full, full, full], axis=3) full.shape
Digit Recognizer
10,770,776
randomForest = RandomForestClassifier(random_state=1, n_estimators=20, max_features='auto', criterion='gini', max_depth=4, min_samples_split=2, min_samples_leaf=3) xgbClassifier = MyXGBClassifier(seed=1, tree_method='gpu_hist', predictor='gpu_predictor', use_label_encoder=False, learning_rate=0.4, gamma=0.4, max_depth=4, reg_lambda=0, reg_alpha=0.1) lgbmClassifier = LGBMClassifier(random_state=1, device='gpu', boosting_type='dart', num_leaves=8, learning_rate=0.1, n_estimators=100, reg_alpha=1, reg_lambda=1) classifiers = [ ('randomForest', randomForest), ('xgbClassifier', xgbClassifier), ('lgbmClassifier', lgbmClassifier) ]<choose_model_class>
full=full.astype("float32") full = full/255 train=full[:42000, :, :, :] test=full[42000:, :, :, :]
Digit Recognizer
10,770,776
hyperparameters = { 'n_jobs' : [-1], 'voting' : ['hard', 'soft'], 'weights' : [(1, 1, 1), (2, 1, 1),(1, 2, 1),(1, 1, 2), (2, 2, 1),(1, 2, 2),(2, 1, 2), (3, 2, 1),(1, 3, 2),(2, 1, 3),(3, 1, 2)] } estimator = VotingClassifier(estimators=classifiers) best_model_voting = get_best_model(estimator, hyperparameters )<find_best_params>
X_train, X_val, Y_train, Y_val = train_test_split(train, target, test_size = 0.2, random_state = 1 )
Digit Recognizer
10,770,776
evaluate_model(best_model_voting.best_estimator_, 'voting' )<save_to_csv>
Y_val_access = Y_val
Digit Recognizer
10,770,776
for model in best_models: predictions = best_models[model].predict(test_X) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission_' + model + '.csv', index=False )<load_from_csv>
Y_val=to_categorical(Y_val, num_classes=10) Y_train=to_categorical(Y_train, num_classes=10 )
Digit Recognizer
10,770,776
train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv' )<prepare_x_and_y>
def create_model() : VGG = VGG16( input_shape=(32, 32, 3), weights='imagenet', include_top=False, ) model =tf.keras.Sequential([VGG, tf.keras.layers.Flatten() , tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) return model
Digit Recognizer
10,770,776
features = ['Pclass','Sex','SibSp','Parch','Fare','Age'] x = pd.get_dummies(train_data[features]) x_test = pd.get_dummies(test_data[features]) y = train_data["Survived"] <data_type_conversions>
im = ImageDataGenerator(zoom_range=0.1, rotation_range=15, height_shift_range= 0.05, width_shift_range= 0.05) flow=im.flow(X_train, Y_train, batch_size=32 )
Digit Recognizer
10,770,776
x['Fare'].fillna(x['Fare'].mode() [0], inplace=True) x_test['Fare'].fillna(x_test['Fare'].mode() [0], inplace=True) x['Age'].fillna(x['Age'].mode() [0], inplace=True) x_test['Age'].fillna(x_test['Age'].mode() [0], inplace=True) <train_on_grid>
mymod=create_model() mymod.summary() mymod.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
10,770,776
param_grid = {'alpha': sp_rand() } model = Ridge() rsearch = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=100) rsearch.fit(x,y) print(rsearch )<save_to_csv>
predictions_val = np.zeros(shape=(5, X_val.shape[0], 10)) predictions_test = np.zeros(shape=(5, test.shape[0], 10)) for i in range(5): print('training model ', i+1, '...') perf = mymod.fit_generator(flow, epochs=10, steps_per_epoch= X_train.shape[0]/32, validation_data=(X_val, Y_val), verbose=0) pred_val = mymod.predict(X_val) pred_test = mymod.predict(test) predictions_val[i, :, :] = pred_val predictions_test[i, :, :] = pred_test pvall = np.zeros(shape=(X_val.shape[0], 10)) ptall = np.zeros(shape=(test.shape[0], 10)) for i in range(10): pv1 = np.mean(predictions_val[:, :, i], axis=0) pt1 = np.mean(predictions_test[:, :, i], axis=0) pvall[:, i] = pv1 ptall[:, i] = pt1 preds_val= np.argmax(pvall,axis = 1) preds_test = np.argmax(ptall, axis=1) val_acc = np.mean(preds_val == Y_val_access) print('Overall validation accuracy is', val_acc)
Digit Recognizer
10,770,776
<load_from_csv><EOS>
preds= pd.Series(preds_test,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),preds],axis = 1) submission.to_csv("MINST.csv",index=False )
Digit Recognizer
11,247,216
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from tensorflow.keras.applications import EfficientNetB3
Digit Recognizer
11,247,216
temp = np.where(np.isnan(train_data.Age)) len(temp[0] )<feature_engineering>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
11,247,216
for item in temp[0]: train_data.Age.at[item] = np.mean(train_data.Age) temp = np.where(np.isnan(train_data.Age)) len(temp[0] )<feature_engineering>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
11,247,216
temp = np.where(np.isnan(test_data.Fare)) for item in temp[0]: test_data.Fare.at[item] = np.mean(test_data.Fare) temp = np.where(np.isnan(test_data.Fare)) len(temp[0] )<feature_engineering>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
11,247,216
temp = np.where(np.isnan(test_data.Age)) for item in temp[0]: test_data.Age.at[item] = np.mean(train_data.Age) temp = np.where(np.isnan(test_data.Age)) len(temp[0] )<categorify>
enet = EfficientNetB3(input_shape=(32, 32, 3), weights='imagenet',include_top=False )
Digit Recognizer
11,247,216
y = train_data['Survived'] features = ['Pclass', 'Sex', 'SibSp', 'Parch','Fare', 'Age'] x = pd.get_dummies(train_data[features]) x_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators = 100, max_depth = 5, random_state = 1) model.fit(x, y) predictions = model.predict(x_test )<save_to_csv>
X_train = np.pad(X_train,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant') X_train.shape
Digit Recognizer
11,247,216
output = pd.DataFrame({'PassengerId' : test_data.PassengerId, 'Survived' : predictions}) output.to_csv('submission.csv', index = False) print("Your submission was successfully saved!" )<load_from_csv>
X_train = np.squeeze(X_train, axis=-1) X_train = stacked_img = np.stack(( X_train,)*3, axis=-1) X_train.shape
Digit Recognizer
11,247,216
df = pd.read_csv('.. /input/titanic/train.csv') test_df = pd.read_csv('.. /input/titanic/test.csv' )<count_values>
nets = 2 model = [0] *nets for j in range(nets): model[j] = Sequential(enet) model[j].add(Flatten()) model[j].add(Dense(units=1024, use_bias=True, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dense(units=512, use_bias=True, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout (.4)) model[j].add(Dense(units=256, use_bias=True, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout (.4)) model[j].add(Dense(units=10, use_bias=True, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
11,247,216
df['Sex'].value_counts()<concatenate>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * nets epochs = 45 for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1) history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=64), epochs = epochs, steps_per_epoch = X_train2.shape[0]//64, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("EffNet {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
11,247,216
complete_df = pd.concat([df, test_df] )<count_missing_values>
X_test = np.pad(X_test,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant') X_test = np.squeeze(X_test, axis=-1) X_test = stacked_img = np.stack(( X_test,)*3, axis=-1) X_test.shape
Digit Recognizer
11,247,216
complete_df.isnull().sum()<filter>
results = np.zeros(( X_test.shape[0],10)) for j in range(nets): results = results + model[j].predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("MNIST_EffNet_Ensemble.csv",index=False )
Digit Recognizer
11,247,216
<feature_engineering><EOS>
X_test1a = test / 255.0 X_test1a = X_test1a.values.reshape(-1,28,28,1 )
Digit Recognizer
11,282,263
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<filter>
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv") mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
Digit Recognizer
11,282,263
complete_df[complete_df['Fare'].isnull() ]<feature_engineering>
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
Digit Recognizer
11,282,263
complete_df['Fare'] = complete_df.groupby('Pclass')['Fare'].transform(lambda val: val.fillna(val.median()))<feature_engineering>
test['dataset'] = 'test'
Digit Recognizer
11,282,263
complete_df.loc[complete_df['Sex']=='female','Age'] = complete_df[complete_df['Sex']=='female']['Age'].transform(lambda val: val.fillna(val.median())) complete_df.loc[complete_df['Sex']=='male', 'Age'] = complete_df[ complete_df['Sex']=='male' ]['Age'].transform(lambda val: val.fillna(val.median()))<count_missing_values>
train['dataset'] = 'train'
Digit Recognizer
11,282,263
complete_df.isnull().sum()<drop_column>
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
Digit Recognizer
11,282,263
X = complete_df[:891].drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'] ,axis=1) X<categorify>
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True) labels = mnist['label'].values mnist.drop('label', axis=1, inplace=True) mnist.columns = cols
Digit Recognizer
11,282,263
X = pd.get_dummies(X) X<prepare_x_and_y>
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
Digit Recognizer
11,282,263
y = complete_df[:891]['Survived']<import_modules>
for i in range(len(idx_mnist)) : if dataset_from[i] == 'test': sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
Digit Recognizer
11,282,263
from sklearn.model_selection import train_test_split<import_modules>
sample_submission.to_csv('submission.csv', index=False )
Digit Recognizer
8,738,762
from sklearn.model_selection import train_test_split<split>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
8,738,762
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42 )<import_modules>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
8,738,762
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV<define_search_space>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
8,738,762
param_grid = {'max_depth':[4,5,6,7,8,9,10]}<train_on_grid>
Y_train = keras.utils.to_categorical(Y_train, 10 )
Digit Recognizer
8,738,762
forest = RandomForestClassifier(random_state=42) grid = GridSearchCV(forest, param_grid, cv=10) grid.fit(X_train, y_train )<find_best_params>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1 )
Digit Recognizer
8,738,762
grid.best_params_<import_modules>
batch_size = 86 num_classes = 10 epochs = 10 input_shape =(28, 28, 1 )
Digit Recognizer
8,738,762
from sklearn.metrics import accuracy_score, classification_report<predict_on_test>
batch_size = 86 num_classes = 10 epochs = 10 input_shape =(28, 28, 1) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal',input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal')) model.add(MaxPool2D(( 2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.RMSprop() , metrics=['accuracy']) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.0001 )
Digit Recognizer
8,738,762
test_predictions = grid.predict(X_test) print(accuracy_score(y_test, test_predictions))<compute_test_metric>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
8,738,762
print(classification_report(y_test, test_predictions))<categorify>
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv") mnist_train = pd.read_csv(".. /input/mnist-in-csv-train/mnist_train.csv" )
Digit Recognizer
8,738,762
X_final = complete_df[891:].drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'] ,axis=1) X_final = pd.get_dummies(X_final )<predict_on_test>
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
Digit Recognizer
8,738,762
forest = RandomForestClassifier(max_depth=6, random_state=42) forest.fit(X,y) final_preds = forest.predict(X_final )<load_from_csv>
test['dataset'] = 'test'
Digit Recognizer
8,738,762
submission = pd.read_csv('.. /input/titanic/gender_submission.csv') submission<prepare_output>
train['dataset'] = 'train'
Digit Recognizer
8,738,762
submission['Survived'] = final_preds submission<data_type_conversions>
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
Digit Recognizer
8,738,762
submission['Survived'] = submission['Survived'].astype(int) submission<save_to_csv>
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True) labels = mnist['label'].values mnist.drop('label', axis=1, inplace=True) mnist.columns = cols
Digit Recognizer
8,738,762
submission.to_csv('submission.csv', index=False )<save_to_csv>
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
Digit Recognizer
8,738,762
submission.to_csv('submission.csv', index=False )<set_options>
for i in range(len(idx_mnist)) : if dataset_from[i] == 'test': sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
Digit Recognizer
8,738,762
sns.set_style("whitegrid") %matplotlib inline<set_options>
sample_submission.to_csv('submission.csv', index=False )
Digit Recognizer
4,401,419
warnings.filterwarnings('ignore' )<load_from_csv>
train_data = pd.read_csv(".. /input/train.csv" )
Digit Recognizer
4,401,419
train = pd.read_csv('.. /input/telstra-recruiting-network/train.csv.zip') test = pd.read_csv('.. /input/telstra-recruiting-network/test.csv.zip') severity_type = pd.read_csv('.. /input/telstra-recruiting-network/severity_type.csv.zip', error_bad_lines= False, warn_bad_lines= False) resource_type = pd.read_csv('.. /input/telstra-recruiting-network/resource_type.csv.zip', error_bad_lines= False, warn_bad_lines= False) log_failure = pd.read_csv('.. /input/telstra-recruiting-network/log_feature.csv.zip', error_bad_lines= False, warn_bad_lines= False) event_type = pd.read_csv('.. /input/telstra-recruiting-network/event_type.csv.zip', error_bad_lines=False, warn_bad_lines= False )<merge>
X = train_data[label_names[1:]] y = train_data[label_names[0]]
Digit Recognizer
4,401,419
train_1 = train.merge(severity_type, how = 'left', left_on='id', right_on='id') train_2 = train_1.merge(resource_type, how = 'left', left_on='id', right_on='id') train_3 = train_2.merge(log_failure, how = 'left', left_on='id', right_on='id') train_4 = train_3.merge(event_type, how = 'left', left_on='id', right_on='id' )<remove_duplicates>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2 )
Digit Recognizer
4,401,419
train_4.drop_duplicates(subset= 'id', keep= 'first', inplace = True) train_4.head()<import_modules>
img_rows, img_cols = 28, 28 num_classes =10 X_train = X_train.values.reshape([-1,28,28,1])/255 X_test = X_test.values.reshape([-1,28,28,1])/255 y_train = y_train.values y_test = y_test.values y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes )
Digit Recognizer
4,401,419
from catboost import CatBoostClassifier, Pool from sklearn.model_selection import train_test_split<prepare_x_and_y>
model = Sequential()
Digit Recognizer
4,401,419
X = train_4[['id', 'location', 'severity_type', 'resource_type', 'log_feature', 'volume', 'event_type']] y = train_4.fault_severity<split>
model.add(tf.keras.layers.Conv2D(32,(3, 3), padding='same', input_shape=(img_rows,img_cols,1))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(32,(3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3, 3), padding='same')) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(64,(3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(num_classes)) model.add(tf.keras.layers.Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=tf.train.AdamOptimizer() , metrics=['accuracy'])
Digit Recognizer
4,401,419
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.20, random_state=101 )<define_variables>
batch_size =128 epochs = 100 history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, y_test))
Digit Recognizer
4,401,419
categorical_features_indices = np.where(X_train.dtypes == object)[0]<create_dataframe>
test_data = pd.read_csv(".. /input/test.csv") y_pred = model.predict(test_data.values.reshape([-1,28,28,1])/255,batch_size=batch_size) y_pred = np.argmax(y_pred,axis=1)
Digit Recognizer
4,401,419
train_dataset = Pool(data=X_train, label=y_train, cat_features=categorical_features_indices) eval_dataset = Pool(data=X_validation, label=y_validation, cat_features=categorical_features_indices )<choose_model_class>
save = pd.DataFrame() save["ImageId"] = list(range(1,len(y_pred)+1)) save["Label"] = y_pred save.to_csv("submit.csv", index=False)
Digit Recognizer
10,941,180
model = CatBoostClassifier(iterations=1000, learning_rate=1, depth=2, loss_function='MultiClass', random_seed=1, bagging_temperature=22, od_type='Iter', metric_period=100, od_wait=100 )<train_model>
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') print(train_df.shape )
Digit Recognizer
10,941,180
model.fit(train_dataset, eval_set= eval_dataset, plot= True )<predict_on_test>
X_train = train_df.iloc[:,1:].values y_train = train_df.iloc[:,0].values
Digit Recognizer
10,941,180
preds_class = model.predict(eval_dataset) preds_proba = model.predict_proba(eval_dataset )<merge>
N = X_train.shape[0] X_train = X_train.reshape(N,28,28,1) X_train = X_train.astype('float32') X_train /= 255. y_train = tf.one_hot(y_train, 10) X_train.shape, y_train.shape
Digit Recognizer
10,941,180
test_1 = test.merge(severity_type, how = 'left', left_on='id', right_on='id') test_2 = test_1.merge(resource_type, how = 'left', left_on='id', right_on='id') test_3 = test_2.merge(log_failure, how = 'left', left_on='id', right_on='id') test_4 = test_3.merge(event_type, how = 'left', left_on='id', right_on='id' )<remove_duplicates>
def make_model(inputs): x = tf.keras.layers.Conv2D(filters=32,kernel_size=(3,3))(inputs) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.MaxPool2D()(x) x = tf.keras.layers.Conv2D(filters=64,kernel_size=(3,3))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.MaxPool2D()(x) x = tf.keras.layers.Conv2D(filters=1024,kernel_size=(5,5))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.Conv2D(filters=256,kernel_size=(1,1))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.Conv2D(filters=10,kernel_size=(1,1))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Softmax()(x) x = tf.keras.layers.Reshape(( 10,))(x) return x
Digit Recognizer
10,941,180
test_4.drop_duplicates(subset= 'id', keep= 'first', inplace = True) test_4.head()<count_missing_values>
tf.keras.backend.clear_session() inputs = tf.keras.Input(shape=(28,28,1)) outputs = make_model(inputs) model = tf.keras.Model( inputs=inputs, outputs=outputs, name="simple" ) model.compile(optimizer='adam', loss='categorical_crossentropy' )
Digit Recognizer
10,941,180
test_4.isnull().sum()<save_to_csv>
history = model.fit(X_train,y_train, validation_split=0.2,epochs=10 )
Digit Recognizer
10,941,180
predict_test=model.predict_proba(test_4) pred_df=pd.DataFrame(predict_test,columns=['predict_0', 'predict_1', 'predict_2']) submission_cat=pd.concat([test[['id']],pred_df],axis=1) submission_cat.to_csv('sub_cat_1.csv',index=False,header=True )<load_from_csv>
X_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ).values N_test = X_test.shape[0] X_test = X_test.reshape(N_test,28,28,1) X_test = X_test.astype('float32') X_test /= 255. X_test.shape
Digit Recognizer
10,941,180
train = pd.read_csv(input_path / 'train.csv', index_col='id') test = pd.read_csv(input_path / 'test.csv', index_col='id') submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id' )<split>
pred = model.predict(X_test) pred.shape
Digit Recognizer
10,941,180
<init_hyperparams><EOS>
submissions=pd.DataFrame({"ImageId": list(range(1,len(pred)+1)) , "Label": np.argmax(pred, axis=1)}) submissions.to_csv("my_submissions.csv", index=False, header=True )
Digit Recognizer
8,326,415
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams>
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler
Digit Recognizer
8,326,415
parameters2 = { 'n_estimators': 350, 'tree_method': 'exact', 'learning_rate': 0.03, 'colsample_bytree': 0.9, 'subsample': 0.9, 'min_child_weight': 9, 'max_depth': 11, 'n_jobs': -1 }<train_on_grid>
batch_size = 86 num_nets = 15
Digit Recognizer
8,326,415
<choose_model_class>
digits_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") digits_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
8,326,415
<train_model>
X = digits_train.drop(columns="label" ).values.reshape(digits_train.shape[0],28,28,1)/ 255.0 Y = to_categorical(digits_train["label"], num_classes=10) X_test = digits_test.values.reshape(digits_test.shape[0],28,28,1)/ 255.0
Digit Recognizer
8,326,415
final_model = XGBRegressor(tree_method='hist', min_child_weight=9, max_depth=11, n_jobs=-1, colsample_bytree=0.5, learning_rate=0.01, n_estimators=1500) final_model.fit(X_train, y_train, early_stopping_rounds=10, eval_set=[(X_test, y_test)], verbose=False) prediction = final_model.predict(X_test) mse = mean_squared_error(y_test, prediction,squared=False) print(mse) <save_to_csv>
datagen_train = ImageDataGenerator( rotation_range = 10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
8,326,415
submission['target'] = final_model.predict(test) submission.to_csv('xgb_reg.csv' )<import_modules>
model = [0] * num_nets for j in range(num_nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size = 3, activation = "relu", input_shape =(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 3, activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 5, strides = 2, padding = "same", activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size = 3, activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 3, activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 5, strides = 2, padding = "same", activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(128, kernel_size = 4, activation = "relu")) model[j].add(BatchNormalization()) model[j].add(Flatten()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation = "softmax")) model[j].compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"] )
Digit Recognizer
8,326,415
import matplotlib.pyplot as plt import seaborn as sns from matplotlib_venn import venn2 import shap from optuna.integration import _lightgbm_tuner as lgb_tuner import optuna from catboost import CatBoost from catboost import Pool from catboost import cv import category_encoders as ce from tqdm import tqdm import lightgbm as lgb import xgboost as xgb import joblib import os import logging import datetime from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.decomposition import PCA from scipy import stats from sklearn.preprocessing import StandardScaler from sklearn import metrics from catboost import CatBoostRegressor<load_from_csv>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * num_nets epochs = 45 for j in range(num_nets): x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size = 0.1) history[j] = model[j].fit_generator(datagen_train.flow(x_train, y_train, batch_size = 64), epochs = epochs, steps_per_epoch = x_train.shape[0] // 64, validation_data =(x_val, y_val), callbacks = [annealer], verbose = 0) print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
8,326,415
<prepare_x_and_y><EOS>
results = np.zeros(( X_test.shape[0], 10)) for j in range(num_nets): results = results + model[j].predict(X_test) results = np.argmax(results, axis = 1) results = pd.Series(results, name = "Label") submission = pd.concat([pd.Series(range(1, 28001), name = "ImageId"), results], axis = 1) submission.ImageId = submission.ImageId.astype(int) submission.to_csv("prediction.csv", index = False )
Digit Recognizer
7,034,662
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams>
import datetime import imageio import numpy as np import pandas as pd import pickle from sklearn.model_selection import train_test_split from fastai.vision import * from fastai.metrics import accuracy, error_rate from fastai.widgets import DatasetFormatter, PredictionsCorrector
Digit Recognizer
7,034,662
fold_num = 10 EARLY_STOPPING_ROUNDS = 10 VERBOSE_EVAL = 10000 LGB_ROUND_NUM = 10000 objective = 'regression' metric = 'rmse' params = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': objective, 'metric': metric, 'verbosity': -1, "seed": 42, } @contextmanager def timer(logger=None, format_str='{:.3f}[s]', prefix=None, suffix=None): if prefix: format_str = str(prefix)+ format_str if suffix: format_str = format_str + str(suffix) start = time() yield d = time() - start out_str = format_str.format(d) if logger: logger.info(out_str) else: print(out_str) def fit_lgbm(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] lgb_train = lgb.Dataset(x_train, y_train) lgb_valid = lgb.Dataset(x_valid, y_valid) with timer(prefix='fit fold={} '.format(i + 1)) : lgb_model = lgb_tuner.train(params, lgb_train, num_boost_round=LGB_ROUND_NUM, valid_names=["train", "valid"], valid_sets=[lgb_train, lgb_valid], early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=VERBOSE_EVAL) pred_i = lgb_model.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(lgb_model) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models<train_model>
print(fastai.__version__ )
Digit Recognizer
7,034,662
fold = KFold(n_splits=5, shuffle=True, random_state=71) cv = list(fold.split(X, y)) oof, models = fit_lgbm(X.values, y, cv, params=params )<create_dataframe>
np.random.seed(42 )
Digit Recognizer
7,034,662
def visualize_importance(models, feat_train_df): feature_importance_df = pd.DataFrame() for i, model in enumerate(models): _df = pd.DataFrame() _df['feature_importance'] = model.feature_importance() _df['column'] = feat_train_df.columns _df['fold'] = i + 1 feature_importance_df = pd.concat([feature_importance_df, _df], axis=0, ignore_index=True) order = feature_importance_df.groupby('column')\ .sum() [['feature_importance']]\ .sort_values('feature_importance', ascending=False ).index[:50] fig, ax = plt.subplots(figsize=(max(6, len(order)*.4), 7)) sns.boxenplot(data=feature_importance_df, x='column', y='feature_importance', order=order, ax=ax, palette='viridis') ax.tick_params(axis='x', rotation=90) ax.grid() fig.tight_layout() return fig, ax<split>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
7,034,662
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) def opt(trial): n_estimators = trial.suggest_int('n_estimators', 0, 1000) max_depth = trial.suggest_int('max_depth', 1, 20) learning_rate = trial.suggest_discrete_uniform('learning_rate', 0.01,0.1,0.01) min_child_weight = trial.suggest_int('min_child_weight', 1, 20) subsample = trial.suggest_discrete_uniform('subsample', 0.5, 0.9, 0.1) colsample_bytree = trial.suggest_discrete_uniform('colsample_bytree', 0.5, 0.9, 0.1) xgboost_tuna = xgb.XGBRegressor( random_state=42, n_estimators = n_estimators, max_depth = max_depth, min_child_weight = min_child_weight, subsample = subsample, colsample_bytree = colsample_bytree, ) xgboost_tuna.fit(X_train,y_train) tuna_pred_test = xgboost_tuna.predict(X_test) return mean_squared_error(y_test, tuna_pred_test )<train_model>
y = train["label"].values X = train.iloc[:, 1:].values X.shape, y.shape
Digit Recognizer
7,034,662
def fit_xgb(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] model_xgb = xgb.XGBRegressor(**params) with timer(prefix='fit fold={} '.format(i + 1)) : model_xgb.fit(x_train, y_train, eval_set=[(x_valid, y_valid)],verbose=-1) pred_i = model_xgb.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(model_xgb) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models<define_search_space>
X_train, X_valid, y_train, y_valid = \ train_test_split(X, y, test_size=0.05, random_state=42, stratify=y )
Digit Recognizer
7,034,662
params_xgb = {'n_estimators': 208, 'max_depth': 4, 'learning_rate':0.08, 'min_child_weight': 13, 'subsample': 0.8, 'colsample_bytree': 0.8} oof_xgb, models_xgb = fit_xgb(X.values, y, cv, params=params_xgb )<set_options>
def to_img_shape(X, y=[]): "Format matrix of rows, Nx784 to Nx28x28x3" X = np.array(X ).reshape(-1,28,28) X = np.stack(( X,)*3, axis=-1) y = np.array(y) return X, y def save_imgs(path:Path, data, labels=[]): path.mkdir(parents=True, exist_ok=True) for label in np.unique(labels): (path / str(label)).mkdir(parents=True, exist_ok=True) for i in range(len(data)) : if(len(labels)!= 0): imageio.imsave(str(path / str(labels[i])/(str(i)+ '.jpg')) , data[i]) else: imageio.imsave(str(path / f'testimg_{str(i ).zfill(5)}.jpg'), data[i] )
Digit Recognizer
7,034,662
def opt_cb(trial): params = { 'iterations' : trial.suggest_int('iterations', 50, 300), 'depth' : trial.suggest_int('depth', 4, 10), 'learning_rate' : trial.suggest_loguniform('learning_rate', 0.01, 0.3), 'random_strength' :trial.suggest_int('random_strength', 0, 100), 'bagging_temperature' :trial.suggest_loguniform('bagging_temperature', 0.01, 100.00), 'od_type': trial.suggest_categorical('od_type', ['IncToDec', 'Iter']), 'od_wait' :trial.suggest_int('od_wait', 10, 50) } train_pool = Pool(X_train, y_train) test_pool = Pool(X_test, y_test) catboost_tuna = CatBoostRegressor(**params) catboost_tuna.fit(train_pool) tuna_pred_test = catboost_tuna.predict(test_pool) pred_labels = np.rint(tuna_pred_test) return mean_squared_error(y_test, pred_labels )<train_model>
X_train, y_train = to_img_shape(X_train, y_train) X_valid, y_valid = to_img_shape(X_valid, y_valid) X_test, _ = to_img_shape(test )
Digit Recognizer
7,034,662
def fit_cb(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] train_pool = Pool(x_train, label = y_train) valid_pool = Pool(x_valid, label = y_valid) model_cb = CatBoost(params) with timer(prefix='fit fold={} '.format(i + 1)) : model_cb.fit(train_pool, eval_set = valid_pool, use_best_model = True, silent = True, plot = False) print(model_cb.get_best_score()) pred_i = model_cb.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(model_cb) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models<train_model>
%%time save_imgs(Path('/kaggle/working/data/train'), X_train, y_train )
Digit Recognizer
7,034,662
params_cb = { 'loss_function': 'RMSE', 'max_depth': 3, 'learning_rate': 0.08, 'subsample': 0.8, 'num_boost_round': 1000, 'early_stopping_rounds': 100, } oof_cb, models_cb = fit_cb(X.values, y, cv, params=params_cb )<drop_column>
%%time save_imgs(Path('/kaggle/working/data/valid'), X_valid, y_valid )
Digit Recognizer
7,034,662
df_test = df_test.drop("id",axis=1 )<predict_on_test>
%%time save_imgs(Path('/kaggle/working/data/test'), X_test )
Digit Recognizer
7,034,662
pred_lgb = np.array([model.predict(df_test.values)for model in models]) pred_lgb = np.mean(pred_lgb, axis=0) pred_lgb = np.where(pred_lgb < 0, 0, pred_lgb) pred_xgb = np.array([model.predict(df_test.values)for model in models_xgb]) pred_xgb = np.mean(pred_xgb, axis=0) pred_xgb = np.where(pred_xgb < 0, 0, pred_xgb) pred_cb = np.array([model.predict(df_test.values)for model in models_cb]) pred_cb = np.mean(pred_cb, axis=0) pred_cb = np.where(pred_cb < 0, 0, pred_cb) tmp_sub = pd.DataFrame({"lgb":pred_lgb, "xgb":pred_xgb, "cb":pred_cb}) tmp_sub["pred"] = tmp_sub.mean(axis="columns" )<prepare_output>
path = Path('/kaggle/working/data') image_list =(ImageList.from_folder(path) .split_by_folder() .label_from_folder()) data =(image_list.databunch(bs=1) .normalize(imagenet_stats))
Digit Recognizer
7,034,662
submission["target"] = tmp_sub["pred"].copy()<save_to_csv>
tfms = get_transforms() tfms[0]
Digit Recognizer
7,034,662
submission.to_csv("submission.csv", index=False )<save_to_csv>
tfms[1]
Digit Recognizer
7,034,662
submission.to_csv("submission.csv", index=False )<create_dataframe>
tfms = get_transforms(do_flip=False) tfms[0]
Digit Recognizer
7,034,662
oof_df = pd.DataFrame({"lgb":oof, "xgb":oof_xgb, "cb":oof_cb}) oof_df["pred"] = oof_df.mean(axis="columns" )<import_modules>
image_list =(image_list.transform(get_transforms(do_flip=False), size=28) .add_test(ItemList.from_folder(path=path/"test"), label=None)) data =(image_list.databunch(bs=256) .normalize(imagenet_stats))
Digit Recognizer
7,034,662
import numpy as np import pandas as pd<load_from_csv>
learn = cnn_learner(data, models.resnet18, metrics=accuracy )
Digit Recognizer
7,034,662
train_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv') test_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv') print('Train: ', train_data.shape) print('Test: ', test_data.shape )<prepare_x_and_y>
learn.model
Digit Recognizer
7,034,662
y = train_data['target'] X = train_data.drop(columns=['target', 'id']) X_test = test_data.drop(columns='id' )<split>
learn.lr_find()
Digit Recognizer
7,034,662
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size = 0.05, random_state=22 )<init_hyperparams>
learn.fit_one_cycle(5, 0.02 )
Digit Recognizer
7,034,662
params = {'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': 'gbdt', 'feature_pre_filter': False, 'learning_rate': 0.007, 'num_leaves': 102, 'min_child_samples': 20, 'sub_feature': 0.4, 'sub_row': 1, 'subsample_freq': 0, 'lambda_l1': 4.6, 'lambda_l2': 1.9} N_FOLDS = 10 kf = KFold(n_splits = N_FOLDS) oof = np.zeros(len(y)) oof_vanilla = np.zeros(len(y)) preds = np.zeros(len(X_test)) params['learning_rate'] = 0.005 params['num_iterations'] = 5000 for train_ind, test_ind in tqdm(kf.split(X)) : Xtrain = X.iloc[train_ind] Xval = X.iloc[test_ind] ytrain = y.iloc[train_ind] yval = y.iloc[test_ind] model = LGBMRegressor(**params) vanilla_model = LGBMRegressor() model.fit(Xtrain, ytrain, eval_set =(( Xval,yval)) , early_stopping_rounds = 50, verbose = 0) vanilla_model.fit(Xtrain, ytrain) p = model.predict(Xval) p_vanilla = vanilla_model.predict(Xval) oof[test_ind] = p oof_vanilla[test_ind] = p_vanilla preds += model.predict(X_test)/N_FOLDS print(f'mean square error on training data(vanilla model): {np.round(mean_squared_error(y, oof_vanilla, squared=False),5)}') print(f'mean square error on training data(with tuning): {np.round(mean_squared_error(y, oof, squared=False),5)}' )<save_to_csv>
learn.save('stage-1' )
Digit Recognizer