kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,075,583
%matplotlib inline <merge>
def model_cnn(input_shape=input_shape, num_classes=num_classes): model = Sequential() model.add(Conv2D(32, kernel_size =(3,3), activation='relu', input_shape = input_shape)) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size =(3,3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size =(5,5), strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size =(3,3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size =(3,3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size =(5,5), strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(num_classes, activation = "softmax")) model.compile(optimizer = Adam() , loss = "categorical_crossentropy", metrics=["accuracy"]) return model
Digit Recognizer
2,075,583
train_c = train.join(COLOR) test_c = test.join(COLOR2) <choose_model_class>
def LeNet5(input_shape=input_shape,num_classes=num_classes): model = Sequential() model.add(Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='relu', input_shape=input_shape, padding="same")) model.add(AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')) model.add(Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')) model.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) model.add(Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')) model.add(Flatten()) model.add(Dense(84, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer = Adam() , loss = "categorical_crossentropy", metrics=["accuracy"]) return model
Digit Recognizer
2,075,583
def GOB() : global y_pred_c global train global test hidden_layer_sizes=(100,) activation = 'relu' solver = 'adam' batch_size = 'auto' alpha = 0.0001 random_state = 0 max_iter = 10000 early_stopping = True train_X = train_c train_y1 = target_GOB clf = MLPRegressor( hidden_layer_sizes=hidden_layer_sizes, activation=activation, solver=solver, batch_size=batch_size, alpha=alpha, random_state=random_state, max_iter=max_iter, ) clf.fit(train_X, train_y1) SAVE_TRAINED_DATA_PATH = 'train1.learn' joblib.dump(clf, SAVE_TRAINED_DATA_PATH) clf1 = joblib.load(SAVE_TRAINED_DATA_PATH) test_X1 = test_c predict_Y1 = clf1.predict(train_X) print(predict_Y1) y_pred_c = pd.DataFrame(predict_Y1) train = train.join(y_pred_c) predict_Y1 = clf1.predict(test_X1) print(predict_Y1) y_pred_c = pd.DataFrame(predict_Y1) test = test.join(y_pred_c )<concatenate>
datagen = ImageDataGenerator(rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1) datagen.fit(x_train )
Digit Recognizer
2,075,583
dataset = pd.concat([train, test], ignore_index = True) ',train.isnull().sum() )<choose_model_class>
models = [] for i in range(len(model)) : model[i].fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size), epochs = epochs, steps_per_epoch=x_train.shape[0] // batch_size, validation_data =(x_test,y_test), callbacks=[ReduceLROnPlateau(monitor='loss', patience=3, factor=0.1)], verbose=2) models.append(model[i] )
Digit Recognizer
2,075,583
def lightGBM_k() : global y_pred_AA hidden_layer_sizes=(100,) activation = 'relu' solver = 'adam' batch_size = 'auto' alpha = 0.0001 random_state = 0 max_iter = 10000 early_stopping = True train_X = train_k train_y1 = target_k clf = MLPRegressor( hidden_layer_sizes=hidden_layer_sizes, activation=activation, solver=solver, batch_size=batch_size, alpha=alpha, random_state=random_state, max_iter=max_iter, ) clf.fit(train_X, train_y1) SAVE_TRAINED_DATA_PATH = 'train1.learn' joblib.dump(clf, SAVE_TRAINED_DATA_PATH) clf1 = joblib.load(SAVE_TRAINED_DATA_PATH) test_X1 = test_k predict_Y = clf1.predict(test_X1) print(predict_Y) predict_Y = np.round(predict_Y, decimals=1) predict_Y1 = np.round(predict_Y) print(predict_Y1) y_pred_AA = predict_Y1 print("学習データの精度: {:.3f}".format(clf.score(train_X, train_y1))) print("テスト結果の精度: {:.3f}".format(clf.score(test_X1, predict_Y1)) )<create_dataframe>
labels = [] for m in models: predicts = np.argmax(m.predict(test), axis=1) labels.append(predicts) labels = np.array(labels) labels = np.transpose(labels,(1, 0)) labels = scipy.stats.mode(labels, axis=-1)[0] labels = np.squeeze(labels )
Digit Recognizer
2,075,583
<save_to_csv><EOS>
pd.DataFrame({'ImageId' : np.arange(1, predicts.shape[0] + 1), 'Label' : labels } ).to_csv('submission.csv', index=False )
Digit Recognizer
897,687
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
sns.set(style='white', context='notebook', palette='deep') plt.rcParams['image.cmap']='gray'
Digit Recognizer
897,687
sns.set_style("whitegrid") warnings.filterwarnings('ignore') <load_from_csv>
col_names = ['label']+[str(x)for x in range(784)] df = pd.concat([ pd.read_csv('.. /input/mnist-in-csv/mnist_train.csv', names=col_names, header=0), pd.read_csv('.. /input/mnist-in-csv/mnist_test.csv', names=col_names, header=0), pd.read_csv('.. /input/digit-recognizer/train.csv', names=col_names, header=0), pd.read_csv('.. /input/digit-recognizer/test.csv', names=col_names[1:], header=0), ], axis=0, sort=False, ignore_index=True )
Digit Recognizer
897,687
train_data=pd.read_csv('/kaggle/input/walmart-recruiting-store-sales-forecasting/train.csv.zip',parse_dates=True) sample_submission=pd.read_csv('/kaggle/input/walmart-recruiting-store-sales-forecasting/sampleSubmission.csv.zip') features_data=pd.read_csv('/kaggle/input/walmart-recruiting-store-sales-forecasting/features.csv.zip',parse_dates=True) stores_data=pd.read_csv('/kaggle/input/walmart-recruiting-store-sales-forecasting/stores.csv') test_data=pd.read_csv('/kaggle/input/walmart-recruiting-store-sales-forecasting/test.csv.zip' )<data_type_conversions>
num_classes = 10 X, y = df[col_names[1:]], df[col_names[0]]
Digit Recognizer
897,687
df = pd.DataFrame() for i in tq.tqdm(range(1,46)) : model=Prophet() filled=features_data[(( features_data['Store']==i)&(features_data['Date']<'2013-05-03')) ][['Date','CPI']] tserie = filled.rename(columns = {'Date': 'ds', 'CPI': 'y'}, inplace = False) tserie =tserie.sort_values(by=['ds']) tserie['ds'] = pd.to_datetime(tserie['ds']) model.fit(tserie) future_dates=model.make_future_dataframe(periods=13,freq = 'W',include_history =True) future_dates['ds'] = future_dates['ds'].apply(lambda x: x + timedelta(days=5)) prediction=model.predict(future_dates) df=df.append(prediction) df.reset_index(drop=True) features_data['CPI1']=np.nan for i,j in enumerate(df['yhat']): features_data['CPI1'].iloc[i]=j features_data=features_data.drop(['CPI'],axis=1) features_data = features_data.rename(columns = {'CPI1': 'CPI'} )<data_type_conversions>
train = y.notna() test = ~train y_matrix =(y[:,None] == range(num_classes)).astype(int) Xtrain, ytrain = X[train], y_matrix[train] Xtest , ytest = X[test] , y_matrix[test]
Digit Recognizer
897,687
df = pd.DataFrame() for i in tq.tqdm(range(1,46)) : model=Prophet() filled=features_data[(( features_data['Store']==i)&(features_data['Date']<'2013-05-03')) ][['Date','Unemployment']] tserie = filled.rename(columns = {'Date': 'ds', 'Unemployment': 'y'}, inplace = False) tserie =tserie.sort_values(by=['ds']) tserie['ds'] = pd.to_datetime(tserie['ds']) model.fit(tserie) future_dates=model.make_future_dataframe(periods=13,freq = 'W',include_history =True) future_dates['ds'] = future_dates['ds'].apply(lambda x: x + timedelta(days=5)) prediction=model.predict(future_dates) df=df.append(prediction) df.reset_index(drop=True) features_data['Unemployment1']=np.nan for i,j in enumerate(df['yhat']): features_data['Unemployment1'].iloc[i]=j features_data=features_data.drop(['Unemployment'],axis=1) features_data = features_data.rename(columns = {'Unemployment1': 'Unemployment'} )<merge>
def baseline_model() : model = Sequential() model.add(Conv2D(32, kernel_size=(6, 6), strides=(2, 2), activation='relu',input_shape=Xtrain[0].shape)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(64,(5, 5), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(25, activation='relu')) model.add(Dense(num_classes, activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Digit Recognizer
897,687
stores = stores_data.merge(features_data, on ='Store' , how = 'left') final_data_train = train_data.merge(stores, on = ['Store', 'Date', 'IsHoliday'], how = 'left' )<merge>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, brightness_range=None, shear_range=5, zoom_range=-0.4, fill_mode='nearest', horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.2 )
Digit Recognizer
897,687
stores = stores_data.merge(features_data, on ='Store' , how = 'left') final_data_test = test_data.merge(stores, on = ['Store', 'Date', 'IsHoliday'], how = 'left' )<categorify>
datagen.fit(Xtrain )
Digit Recognizer
897,687
def markdown_imputation(final_data): final_data.loc[final_data.MarkDown1.isnull() ,'MarkDown1']= 0 final_data.loc[final_data.MarkDown2.isnull() ,'MarkDown2']= 0 final_data.loc[final_data.MarkDown3.isnull() ,'MarkDown3']= 0 final_data.loc[final_data.MarkDown4.isnull() ,'MarkDown4']= 0 final_data.loc[final_data.MarkDown5.isnull() ,'MarkDown5']= 0 return final_data<split>
augmentation = True if augmentation: history = estimator.fit_generator( datagen.flow(Xtrain, ytrain, batch_size=10), steps_per_epoch=Xtrain.shape[0], epochs=10 ) else: history = estimator.fit( Xtrain, ytrain, batch_size=32, epochs=10, validation_split=len(ytest)/len(y) )
Digit Recognizer
897,687
def train_temp_bins(final_data): temp_100_110_f=final_data[(( final_data.Temperature>100)&(final_data.Temperature< 110)) ].Weekly_Sales.sum() temp_90_100_f=final_data[(( final_data.Temperature>90)&(final_data.Temperature< 100)) ].Weekly_Sales.sum() temp_80_90_f=final_data[(( final_data.Temperature>80)&(final_data.Temperature< 90)) ].Weekly_Sales.sum() temp_70_80_f=final_data[(( final_data.Temperature>70)&(final_data.Temperature< 80)) ].Weekly_Sales.sum() temp_60_70_f=final_data[(( final_data.Temperature>60)&(final_data.Temperature< 70)) ].Weekly_Sales.sum() temp_50_60_f=final_data[(( final_data.Temperature>50)&(final_data.Temperature< 60)) ].Weekly_Sales.sum() temp_40_50_f=final_data[(( final_data.Temperature>40)&(final_data.Temperature< 50)) ].Weekly_Sales.sum() temp_30_40_f=final_data[(( final_data.Temperature>30)&(final_data.Temperature< 40)) ].Weekly_Sales.sum() temp_0_30_f=final_data[(( final_data.Temperature>0)&(final_data.Temperature< 30)) ].Weekly_Sales.sum() temp_less_than_0_f=final_data[(( final_data.Temperature>-10)&(final_data.Temperature< 0)) ].Weekly_Sales.sum() final_data['Temp_bins'] = np.nan final_data.loc[(( final_data.Temperature>-10)&(final_data.Temperature<0)) ,'Temp_bins']= temp_less_than_0_f final_data.loc[(( final_data.Temperature>0)&(final_data.Temperature< 30)) ,'Temp_bins']= temp_0_30_f final_data.loc[(( final_data.Temperature>30)&(final_data.Temperature< 40)) ,'Temp_bins']= temp_30_40_f final_data.loc[(( final_data.Temperature>40)&(final_data.Temperature< 50)) ,'Temp_bins']= temp_40_50_f final_data.loc[(( final_data.Temperature>50)&(final_data.Temperature< 60)) ,'Temp_bins']= temp_50_60_f final_data.loc[(( final_data.Temperature>60)&(final_data.Temperature< 70)) ,'Temp_bins']= temp_60_70_f final_data.loc[(( final_data.Temperature>70)&(final_data.Temperature< 80)) ,'Temp_bins']= temp_70_80_f final_data.loc[(( final_data.Temperature>80)&(final_data.Temperature< 90)) ,'Temp_bins']= temp_80_90_f final_data.loc[(( final_data.Temperature>90)&(final_data.Temperature< 100)) ,'Temp_bins']= temp_90_100_f final_data.loc[(( final_data.Temperature>100)&(final_data.Temperature< 110)) ,'Temp_bins']= temp_100_110_f final_data.loc[final_data.Temp_bins.isnull() ,'Temp_bins']= 0 list1=[temp_less_than_0_f,temp_0_30_f,temp_30_40_f,temp_40_50_f,temp_50_60_f,temp_60_70_f,temp_70_80_f,temp_80_90_f,temp_90_100_f,temp_100_110_f] return final_data,list1<feature_engineering>
ytest = estimator.predict_classes(Xtest )
Digit Recognizer
897,687
def test_temp_bins(final_data,list1): final_data['Temp_bins'] = np.nan final_data.loc[(( final_data.Temperature>-10)&(final_data.Temperature<0)) ,'Temp_bins']= list1[0] final_data.loc[(( final_data.Temperature>0)&(final_data.Temperature< 30)) ,'Temp_bins']= list1[1] final_data.loc[(( final_data.Temperature>30)&(final_data.Temperature< 40)) ,'Temp_bins']= list1[2] final_data.loc[(( final_data.Temperature>40)&(final_data.Temperature< 50)) ,'Temp_bins']= list1[3] final_data.loc[(( final_data.Temperature>50)&(final_data.Temperature< 60)) ,'Temp_bins']= list1[4] final_data.loc[(( final_data.Temperature>60)&(final_data.Temperature< 70)) ,'Temp_bins']= list1[5] final_data.loc[(( final_data.Temperature>70)&(final_data.Temperature< 80)) ,'Temp_bins']= list1[6] final_data.loc[(( final_data.Temperature>80)&(final_data.Temperature< 90)) ,'Temp_bins']= list1[7] final_data.loc[(( final_data.Temperature>90)&(final_data.Temperature< 100)) ,'Temp_bins']= list1[8] final_data.loc[(( final_data.Temperature>100)&(final_data.Temperature< 110)) ,'Temp_bins']= list1[9] final_data.loc[final_data.Temp_bins.isnull() ,'Temp_bins']= 0 return final_data<feature_engineering>
submit = pd.DataFrame(data={'ImageId': range(1, ytest.shape[0]+1), 'Label': ytest} )
Digit Recognizer
897,687
def split(final_data): final_data['Date'] = pd.to_datetime(final_data['Date']) final_data['Year'] = final_data['Date'].dt.year final_data['Month']= final_data['Date'].dt.month final_data['Week'] = final_data['Date'].dt.week final_data['Day'] = final_data['Date'].dt.day return final_data<feature_engineering>
submit.to_csv("submit.csv", index=None )
Digit Recognizer
7,780,225
def days_from_christmas_for_train(x): if x['Year']== 2010 : diff=datetime.datetime(2010, 12, 31)-x['Date'] return diff.days if(( x['Year']== 2011)and(x['Date']< datetime.datetime(2011, 12, 30))): diff=datetime.datetime(2011, 12, 30)-x['Date'] return diff.days else: return 0<feature_engineering>
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler
Digit Recognizer
7,780,225
def days_from_christmas_for_test(x): if x['Year']== 2010 : diff=datetime.datetime(2010, 12, 31)-x['Date'] return diff.days if(( x['Year']== 2011)and(x['Date']< datetime.datetime(2011, 12, 30))): diff=datetime.datetime(2011, 12, 30)-x['Date'] return diff.days if(( x['Year']== 2012)and(x['Date']< datetime.datetime(2012, 12, 28))): diff=datetime.datetime(2012, 12, 28)-x['Date'] return diff.days if(( x['Year']== 2013)and(x['Date']< datetime.datetime(2013, 12, 27))): diff=datetime.datetime(2013, 12, 27)-x['Date'] return diff.days else: return 0<categorify>
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
Digit Recognizer
7,780,225
def days_from_thanksgiving_for_train(x): if(( x['Year']== 2010)and(x['Date']< datetime.datetime(2010, 11, 26))): diff=datetime.datetime(2010, 11, 26)-x['Date'] return diff.days if(( x['Year']== 2011)and(x['Date']< datetime.datetime(2011, 11, 25))): diff=datetime.datetime(2011, 11, 25)-x['Date'] return diff.days else: return 0<feature_engineering>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
7,780,225
def days_from_thanksgiving_for_test(x): if(( x['Year']== 2010)and(x['Date']< datetime.datetime(2010, 11, 26))): diff=datetime.datetime(2010, 11, 26)-x['Date'] return diff.days if(( x['Year']== 2011)and(x['Date']< datetime.datetime(2011, 11, 25))): diff=datetime.datetime(2011, 11, 25)-x['Date'] return diff.days if(( x['Year']== 2012)and(x['Date']< datetime.datetime(2012, 11, 23))): diff=datetime.datetime(2012, 11, 23)-x['Date'] return diff.days if(( x['Year']== 2013)and(x['Date']< datetime.datetime(2013, 11, 29))): diff=datetime.datetime(2013, 11, 29)-x['Date'] return diff.days else: return 0<categorify>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
7,780,225
def holiday_type(x): if(x['IsHoliday']== 1)&(x['Week']==6): return 1 elif(x['IsHoliday']== 1)&(x['Week']==36): return 2 elif(x['IsHoliday']== 1)&(x['Week']==47): return 3 elif(x['IsHoliday']== 1)&(x['Week']==52): return 4 else: return 0<feature_engineering>
nets = 15 model = [0] *nets for j in range(nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(128, kernel_size = 4, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Flatten()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
7,780,225
def holiday_label(final_data): final_data.loc[(final_data.IsHoliday==True),'IsHoliday']= 1 final_data.loc[(final_data.IsHoliday==False),'IsHoliday']= 0 return final_data<categorify>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * nets epochs = 20 for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1) history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=66), epochs = epochs, steps_per_epoch = X_train2.shape[0]//66, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format(j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
7,780,225
def type_label(final_data): final_data.loc[(final_data.Type=='A'),'Type']= 1 final_data.loc[(final_data.Type=='B'),'Type']= 2 final_data.loc[(final_data.Type=='C'),'Type']= 3 return final_data<categorify>
results = np.zeros(( X_test.shape[0],10)) for j in range(nets): results = results + model[j].predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission_digit.csv",index=False )
Digit Recognizer
8,708,408
def holiday_in_week_train(final_data): dates =[] for ptr in holidays.US(years = 2010 ).items() : dates.append(ptr[0]) for ptr in holidays.US(years = 2011 ).items() : dates.append(ptr[0]) for ptr in holidays.US(years = 2012 ).items() : dates.append(ptr[0]) holiday_count=[] for index, row in final_data.iterrows() : dat = final_data['Date'][index] dt=[] for i in range(0,5): dt.append(dat - datetime.timedelta(days = i)) for i in range(1,3): dt.append(dat + datetime.timedelta(days = i)) count = 0 for date in dates: if date in dt: count +=1 holiday_count.append(count) return holiday_count<define_variables>
file_path = "/kaggle/input/digit-recognizer/train.csv" X_train = pd.read_csv(file_path) y_train = X_train.label X_train = X_train.drop(columns = ["label"]) file_path = "/kaggle/input/digit-recognizer/test.csv" X_test = pd.read_csv(file_path) X_test = np.array(X_test )
Digit Recognizer
8,708,408
def holiday_in_week_test(final_data): dates =[] for ptr in holidays.US(years = 2010 ).items() : dates.append(ptr[0]) for ptr in holidays.US(years = 2011 ).items() : dates.append(ptr[0]) for ptr in holidays.US(years = 2012 ).items() : dates.append(ptr[0]) for ptr in holidays.US(years = 2013 ).items() : dates.append(ptr[0]) holiday_count=[] for index, row in final_data.iterrows() : dat = final_data['Date'][index] dt=[] for i in range(0,5): dt.append(dat - datetime.timedelta(days = i)) for i in range(1,3): dt.append(dat + datetime.timedelta(days = i)) count = 0 for date in dates: if date in dt: count +=1 holiday_count.append(count) return holiday_count<feature_engineering>
X_train = np.array(X_train) X_test = np.array(X_test )
Digit Recognizer
8,708,408
final_data_train=markdown_imputation(final_data_train) final_data_train=weekly_sales_imputation(final_data_train) final_data_train,list1=train_temp_bins(final_data_train) final_data_train=split(final_data_train) final_data_train['diff_from_christmas'] = final_data_train.apply(days_from_christmas_for_train, axis=1) final_data_train['days_from_thanksgiving'] = final_data_train.apply(days_from_thanksgiving_for_train, axis=1) final_data_train['IsHoliday_bins'] = final_data_train.apply(holiday_type, axis=1) final_data_train=holiday_label(final_data_train) final_data_train=type_label(final_data_train) final_data_train['Holidays'] = np.array(holiday_in_week_train(final_data_train))<feature_engineering>
X_train = X_train /255 X_test = X_test / 255
Digit Recognizer
8,708,408
final_data_test=markdown_imputation(final_data_test) final_data_test=test_temp_bins(final_data_test,list1) final_data_test=split(final_data_test) final_data_test['diff_from_christmas'] = final_data_test.apply(days_from_christmas_for_test, axis=1) final_data_test['days_from_thanksgiving'] = final_data_test.apply(days_from_thanksgiving_for_test, axis=1) final_data_test['IsHoliday_bins'] = final_data_test.apply(holiday_type, axis=1) final_data_test=holiday_label(final_data_test) final_data_test=type_label(final_data_test) final_data_test['Holidays'] = np.array(holiday_in_week_test(final_data_test))<drop_column>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer
8,708,408
final_data_train=final_data_train.reset_index(drop=True )<data_type_conversions>
random_seed = 2 X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=random_seed)
Digit Recognizer
8,708,408
final_data_train=final_data_train[['Store','Dept','IsHoliday','Size','Week','Type','Year','Weekly_Sales','Holidays','Day']] final_data_test=final_data_test[['Store','Dept','IsHoliday','Size','Week','Type','Year','Holidays','Day']] final_data_train['IsHoliday']=final_data_train['IsHoliday'].astype('bool') final_data_test['IsHoliday']=final_data_test['IsHoliday'].astype('bool') final_data_train['Type']=final_data_train['Type'].astype('int') final_data_test['Type']=final_data_test['Type'].astype('int') y = final_data_train['Weekly_Sales'] X = final_data_train.drop(['Weekly_Sales'], axis=1) rf_Model = RandomForestRegressor(n_estimators= 140,max_depth=27,n_jobs = -1) rf_Model.fit(X, y) y_hat= rf_Model.predict(final_data_test )<define_variables>
from keras.models import Sequential from keras.layers import Dense, Conv2D, Flatten from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score from keras.models import Model
Digit Recognizer
8,708,408
sample_submission['Weekly_Sales'] = list(y_hat )<save_to_csv>
!pip install -q efficientnet model = Sequential() model.add(Conv2D(64, kernel_size=5, activation="relu", padding = "same", input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(256, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=3, activation="relu", padding = "same")) model.add(Flatten()) model.add(Dense(10, activation="softmax"))
Digit Recognizer
8,708,408
sample_submission.to_csv('submission.csv',index = False )<set_options>
optimizer = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
8,708,408
%%HTML <style type="text/css"> div.h1 { background-color: color: white; padding: 8px; padding-right: 300px; font-size: 35px; max-width: 1500px; margin: auto; margin-top: 50px; } div.h2 { background-color: color: white; padding: 8px; padding-right: 300px; font-size: 35px; max-width: 1500px; margin: auto; margin-top: 50px; } </style><set_options>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
8,708,408
warnings.filterwarnings('ignore' )<import_modules>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
8,708,408
from sklearn.preprocessing import QuantileTransformer<load_from_csv>
epochs = 40 batch_size = 50
Digit Recognizer
8,708,408
train_features = pd.read_csv('.. /input/lish-moa/train_features.csv') train_targets_scored = pd.read_csv('.. /input/lish-moa/train_targets_scored.csv') train_targets_nonscored = pd.read_csv('.. /input/lish-moa/train_targets_nonscored.csv') test_features = pd.read_csv('.. /input/lish-moa/test_features.csv') sample_submission = pd.read_csv('.. /input/lish-moa/sample_submission.csv' )<define_variables>
datagen = keras.preprocessing.image.ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
8,708,408
GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')]<normalization>
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
8,708,408
for col in(GENES + CELLS): transformer = QuantileTransformer(n_quantiles=100, random_state=0, output_distribution="normal") vec_len = len(train_features[col].values) vec_len_test = len(test_features[col].values) raw_vec = train_features[col].values.reshape(vec_len, 1) transformer.fit(raw_vec) train_features[col] = transformer.transform(raw_vec ).reshape(1, vec_len)[0] test_features[col] = transformer.transform(test_features[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0]<set_options>
results = model.predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label")
Digit Recognizer
8,708,408
def seed_everything(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(seed=42 )<sort_values>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Digit Recognizer
8,708,408
train_targets_scored.sum() [1:].sort_values()<concatenate>
file_path = "/kaggle/input/digit-recognizer/train.csv" X_train = pd.read_csv(file_path) y_train = X_train.label X_train = X_train.drop(columns = ["label"]) file_path = "/kaggle/input/digit-recognizer/test.csv" X_test = pd.read_csv(file_path) X_test = np.array(X_test )
Digit Recognizer
8,708,408
n_comp = 50 data = pd.concat([pd.DataFrame(train_features[GENES]), pd.DataFrame(test_features[GENES])]) data2 =(PCA(n_components=n_comp, random_state=42 ).fit_transform(data[GENES])) train2 = data2[:train_features.shape[0]]; test2 = data2[-test_features.shape[0]:] train2 = pd.DataFrame(train2, columns=[f'pca_G-{i}' for i in range(n_comp)]) test2 = pd.DataFrame(test2, columns=[f'pca_G-{i}' for i in range(n_comp)]) train_features = pd.concat(( train_features, train2), axis=1) test_features = pd.concat(( test_features, test2), axis=1 )<concatenate>
X_train = np.array(X_train) X_test = np.array(X_test )
Digit Recognizer
8,708,408
n_comp = 15 data = pd.concat([pd.DataFrame(train_features[CELLS]), pd.DataFrame(test_features[CELLS])]) data2 =(PCA(n_components=n_comp, random_state=42 ).fit_transform(data[CELLS])) train2 = data2[:train_features.shape[0]]; test2 = data2[-test_features.shape[0]:] train2 = pd.DataFrame(train2, columns=[f'pca_C-{i}' for i in range(n_comp)]) test2 = pd.DataFrame(test2, columns=[f'pca_C-{i}' for i in range(n_comp)]) train_features = pd.concat(( train_features, train2), axis=1) test_features = pd.concat(( test_features, test2), axis=1 )<create_dataframe>
X_train = X_train /255 X_test = X_test / 255
Digit Recognizer
8,708,408
var_thresh = VarianceThreshold(threshold=0.5) data = train_features.append(test_features) data_transformed = var_thresh.fit_transform(data.iloc[:, 4:]) train_features_transformed = data_transformed[ : train_features.shape[0]] test_features_transformed = data_transformed[-test_features.shape[0] : ] train_features = pd.DataFrame(train_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\ columns=['sig_id','cp_type','cp_time','cp_dose']) train_features = pd.concat([train_features, pd.DataFrame(train_features_transformed)], axis=1) test_features = pd.DataFrame(test_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\ columns=['sig_id','cp_type','cp_time','cp_dose']) test_features = pd.concat([test_features, pd.DataFrame(test_features_transformed)], axis=1) train_features <merge>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer
8,708,408
train = train_features.merge(train_targets_scored, on='sig_id') train = train[train['cp_type']!='ctl_vehicle'].reset_index(drop=True) test = test_features[test_features['cp_type']!='ctl_vehicle'].reset_index(drop=True) target = train[train_targets_scored.columns]<drop_column>
random_seed = 2 X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=random_seed)
Digit Recognizer
8,708,408
train = train.drop('cp_type', axis=1) test = test.drop('cp_type', axis=1 )<feature_engineering>
from keras.models import Sequential from keras.layers import Dense, Conv2D, Flatten from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score from keras.models import Model
Digit Recognizer
8,708,408
<feature_engineering>
!pip install -q efficientnet model = Sequential() model.add(Conv2D(64, kernel_size=5, activation="relu", padding = "same", input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(256, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=3, activation="relu", padding = "same")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=3, activation="relu", padding = "same")) model.add(Flatten()) model.add(Dense(10, activation="softmax"))
Digit Recognizer
8,708,408
<create_dataframe>
optimizer = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
8,708,408
<drop_column>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
8,708,408
target_cols = target.drop('sig_id', axis=1 ).columns.values.tolist()<split>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
8,708,408
folds = train.copy() mskf = MultilabelStratifiedKFold(n_splits=5) for f,(t_idx, v_idx)in enumerate(mskf.split(X=train, y=target)) : folds.loc[v_idx, 'kfold'] = int(f) folds['kfold'] = folds['kfold'].astype(int) folds<categorify>
epochs = 40 batch_size = 50
Digit Recognizer
8,708,408
class MoADataset: def __init__(self, features, targets): self.features = features self.targets = targets def __len__(self): return(self.features.shape[0]) def __getitem__(self, idx): dct = { 'x' : torch.tensor(self.features[idx, :], dtype=torch.float), 'y' : torch.tensor(self.targets[idx, :], dtype=torch.float) } return dct class TestDataset: def __init__(self, features): self.features = features def __len__(self): return(self.features.shape[0]) def __getitem__(self, idx): dct = { 'x' : torch.tensor(self.features[idx, :], dtype=torch.float) } return dct <train_model>
datagen = keras.preprocessing.image.ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
8,708,408
def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device): model.train() final_loss = 0 for data in dataloader: optimizer.zero_grad() inputs, targets = data['x'].to(device), data['y'].to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) loss.backward() optimizer.step() scheduler.step() final_loss += loss.item() final_loss /= len(dataloader) return final_loss def valid_fn(model, loss_fn, dataloader, device): model.eval() final_loss = 0 valid_preds = [] for data in dataloader: inputs, targets = data['x'].to(device), data['y'].to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) final_loss += loss.item() valid_preds.append(outputs.sigmoid().detach().cpu().numpy()) final_loss /= len(dataloader) valid_preds = np.concatenate(valid_preds) return final_loss, valid_preds def inference_fn(model, dataloader, device): model.eval() preds = [] for data in dataloader: inputs = data['x'].to(device) with torch.no_grad() : outputs = model(inputs) preds.append(outputs.sigmoid().detach().cpu().numpy()) preds = np.concatenate(preds) return preds <normalization>
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
8,708,408
class Model(nn.Module): def __init__(self, num_features, num_targets, hidden_size): super(Model, self ).__init__() self.batch_norm1 = nn.BatchNorm1d(num_features) self.dropout1 = nn.Dropout(0.2) self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size)) self.batch_norm2 = nn.BatchNorm1d(hidden_size) self.dropout2 = nn.Dropout(0.5) self.dense2 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size)) self.batch_norm3 = nn.BatchNorm1d(hidden_size) self.dropout3 = nn.Dropout(0.5) self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets)) def forward(self, x): x = self.batch_norm1(x) x = self.dropout1(x) x = F.relu(self.dense1(x)) x = self.batch_norm2(x) x = self.dropout2(x) x = F.relu(self.dense2(x)) x = self.batch_norm3(x) x = self.dropout3(x) x = self.dense3(x) return x<feature_engineering>
results = model.predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label")
Digit Recognizer
8,708,408
def process_data(data): data = pd.get_dummies(data, columns=['cp_time','cp_dose']) return data<define_variables>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Digit Recognizer
3,233,000
feature_cols = [c for c in process_data(folds ).columns if c not in target_cols] feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']] len(feature_cols )<define_variables>
train_dir = ".. /input/train.csv" test_dir = ".. /input/test.csv" df = pd.read_csv(train_dir) df.info()
Digit Recognizer
3,233,000
DEVICE =('cuda' if torch.cuda.is_available() else 'cpu') EPOCHS = 25 BATCH_SIZE = 128 LEARNING_RATE = 1e-3 WEIGHT_DECAY = 1e-5 NFOLDS = 5 EARLY_STOPPING_STEPS = 10 EARLY_STOP = False num_features=len(feature_cols) num_targets=len(target_cols) hidden_size=1024 <prepare_x_and_y>
labels = df["label"].values.tolist() labels = np.array(labels) n_classes = len(set(labels)) labels = keras.utils.to_categorical(labels )
Digit Recognizer
3,233,000
def run_training(fold, seed): seed_everything(seed) train = process_data(folds) test_ = process_data(test) trn_idx = train[train['kfold'] != fold].index val_idx = train[train['kfold'] == fold].index train_df = train[train['kfold'] != fold].reset_index(drop=True) valid_df = train[train['kfold'] == fold].reset_index(drop=True) x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values train_dataset = MoADataset(x_train, y_train) valid_dataset = MoADataset(x_valid, y_valid) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters() , lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3, max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader)) loss_fn = nn.BCEWithLogitsLoss() early_stopping_steps = EARLY_STOPPING_STEPS early_step = 0 oof = np.zeros(( len(train), target.iloc[:, 1:].shape[1])) best_loss = np.inf for epoch in range(EPOCHS): train_loss = train_fn(model, optimizer,scheduler, loss_fn, trainloader, DEVICE) print(f"FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}") valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) print(f"FOLD: {fold}, EPOCH: {epoch}, valid_loss: {valid_loss}") if valid_loss < best_loss: best_loss = valid_loss oof[val_idx] = valid_preds torch.save(model.state_dict() , f"FOLD{fold}_.pth") elif(EARLY_STOP == True): early_step += 1 if(early_step >= early_stopping_steps): break x_test = test_[feature_cols].values testdataset = TestDataset(x_test) testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.load_state_dict(torch.load(f"FOLD{fold}_.pth")) model.to(DEVICE) predictions = np.zeros(( len(test_), target.iloc[:, 1:].shape[1])) predictions = inference_fn(model, testloader, DEVICE) return oof, predictions <categorify>
df_train = df.drop(["label"], axis = 1) data = df_train.values.tolist() data = np.array(data) data = data.astype('float32')/255.0
Digit Recognizer
3,233,000
def run_k_fold(NFOLDS, seed): oof = np.zeros(( len(train), len(target_cols))) predictions = np.zeros(( len(test), len(target_cols))) for fold in range(NFOLDS): oof_, pred_ = run_training(fold, seed) predictions += pred_ / NFOLDS oof += oof_ return oof, predictions<categorify>
print("Training data shape = " + str(data.shape)) print("Training labels shape = " + str(labels.shape))
Digit Recognizer
3,233,000
SEED = [0, 1, 2, 3 ,4, 5] oof = np.zeros(( len(train), len(target_cols))) predictions = np.zeros(( len(test), len(target_cols))) for seed in SEED: oof_, predictions_ = run_k_fold(NFOLDS, seed) oof += oof_ / len(SEED) predictions += predictions_ / len(SEED) train[target_cols] = oof test[target_cols] = predictions <merge>
gen_model = Sequential() gen_model.add(Dense(784, activation = 'relu', input_shape =(784,))) gen_model.add(Dense(512, activation = 'relu')) gen_model.add(Dense(264, activation = 'relu')) gen_model.add(Dense(10, activation = 'softmax')) print("STANDARD NEURAL NETWORK MODEL :-") gen_model.summary()
Digit Recognizer
3,233,000
valid_results = train_targets_scored.drop(columns=target_cols ).merge(train[['sig_id']+target_cols], on='sig_id', how='left' ).fillna(0) y_true = train_targets_scored[target_cols].values y_pred = valid_results[target_cols].values score = 0 for i in range(len(target_cols)) : score_ = log_loss(y_true[:, i], y_pred[:, i]) score += score_ / target.shape[1] print("CV log_loss: ", score) <save_to_csv>
gen_model.compile(loss = 'categorical_crossentropy', optimizer = keras.optimizers.Adadelta() , metrics = ['accuracy'] )
Digit Recognizer
3,233,000
sub = sample_submission.drop(columns=target_cols ).merge(test[['sig_id']+target_cols], on='sig_id', how='left' ).fillna(0) sub.to_csv('submission.csv', index=False )<define_variables>
gen_model_hist = gen_model.fit(data, labels, batch_size = 32, epochs = 5, validation_split = 0.1 )
Digit Recognizer
3,233,000
TEST_MODE = False MOCK_MODE = False SKIP_ADD_FEATURE = False LOCAL = False SAINT_PICKLE_PATH = ".. /input/saint-final" SAINT_MODEL_PATH = ".. /input/saint-final/saintv113.pth"<import_modules>
del gen_model, gen_model_hist gc.collect()
Digit Recognizer
3,233,000
if MOCK_MODE: <import_modules>
X_train_cnn = data.reshape(len(data), 28, 28, 1 )
Digit Recognizer
3,233,000
if LOCAL is False: <load_pretrained>
cnn_model = Sequential() cnn_model.add(Conv2D(32, kernel_size = [3,3], activation = 'relu', input_shape =(28,28,1))) cnn_model.add(Conv2D(64, kernel_size = [3,3], activation = 'relu')) cnn_model.add(BatchNormalization()) cnn_model.add(MaxPool2D(pool_size = [2,2], strides = 2)) cnn_model.add(Conv2D(128, kernel_size = [3,3], activation = 'relu')) cnn_model.add(MaxPool2D(pool_size = [2,2], strides = 2)) cnn_model.add(Flatten()) cnn_model.add(Dense(512, activation = 'relu')) cnn_model.add(Dense(10, activation = 'softmax')) print("CONVOLUTIONAL NEURAL NETWORK MODEL :-") cnn_model.summary()
Digit Recognizer
3,233,000
def load_group() : group = None for i in range(10): with open(f"{SAINT_PICKLE_PATH}/{i}groupv1.pickle", "rb")as f: if group is None: group = pickle.load(f) else: group = pd.concat([group, pickle.load(f)]) gc.collect() gc.collect() return group<groupby>
cnn_model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) cnn_model_hist = cnn_model.fit(X_train_cnn, labels, batch_size = 32, epochs = 6, validation_split = 0.1 )
Digit Recognizer
3,233,000
group = load_group()<define_variables>
del cnn_model, cnn_model_hist gc.collect()
Digit Recognizer
3,233,000
SEED = 123 def seed_everything(seed): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) seed_everything(SEED )<categorify>
data_aug = ImageDataGenerator(featurewise_center = False, samplewise_center = False, featurewise_std_normalization = False, samplewise_std_normalization = False, zca_whitening = False, rotation_range = 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1, horizontal_flip = False, vertical_flip = False )
Digit Recognizer
3,233,000
MAX_SEQ = 100 n_skill = 13523 n_part = 7 n_et = 300 n_lt = 1441 n_lsi = 128 DROPOUT = 0.1 EMBED_SIZE = 256 BATCH_SIZE = 256 def future_mask(seq_length): future_mask = np.triu(np.ones(( seq_length, seq_length)) , k=1 ).astype('bool') return torch.from_numpy(future_mask) class FFN(nn.Module): def __init__(self, state_size = 200, forward_expansion = 1, bn_size=MAX_SEQ - 1): super(FFN, self ).__init__() self.state_size = state_size self.lr1 = nn.Linear(state_size, forward_expansion * state_size) self.relu = nn.ReLU() self.lr2 = nn.Linear(forward_expansion * state_size, state_size) self.dropout = nn.Dropout(0.2) def forward(self, x): x = self.relu(self.lr1(x)) x = self.lr2(x) return self.dropout(x) class Encoder(nn.Module): def __init__(self, n_skill, n_pt=7, n_lsi=n_lsi, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, num_layers=1, heads = 8): super(Encoder, self ).__init__() self.n_skill, self.embed_dim = n_skill, embed_dim self.embedding = nn.Embedding(n_skill + 1, embed_dim) self.pos_embedding = nn.Embedding(max_seq, embed_dim) self.dropout = nn.Dropout(dropout) self.n_pt = n_pt self.pt_embedding = nn.Embedding(n_pt + 1, embed_dim) self.n_lsi = n_lsi self.lsi_embedding = nn.Embedding(n_lsi + 1, embed_dim) self.layer_normal = nn.LayerNorm(embed_dim) def forward(self, x, question_ids, pt_x, lsi_x): device = x.device x = self.embedding(x) pt_x = self.pt_embedding(pt_x) lsi_x = self.lsi_embedding(lsi_x) pos_id = torch.arange(x.size(1)).unsqueeze(0 ).to(device) pos_x = self.pos_embedding(pos_id) x = self.dropout(self.layer_normal(x + pos_x + pt_x + lsi_x)) return x class Decoder(nn.Module): def __init__(self, n_et=n_et, n_lt=n_lt, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, num_layers=1, heads = 8): super(Decoder, self ).__init__() self.embed_dim = embed_dim self.pos_embedding = nn.Embedding(max_seq, embed_dim) self.dropout = nn.Dropout(dropout) self.n_response = 2 self.n_et = n_et self.n_lt = n_lt self.response_embedding = nn.Embedding(self.n_response + 2, embed_dim) self.et_embedding = nn.Embedding(self.n_et + 2, embed_dim) self.lt_embedding = nn.Embedding(self.n_lt + 2, embed_dim) self.layer_normal = nn.LayerNorm(embed_dim) def forward(self, c, et, lt): device = c.device c = self.response_embedding(c) pos_id = torch.arange(c.size(1)).unsqueeze(0 ).to(device) pos_x = self.pos_embedding(pos_id) et = self.et_embedding(et) lt = self.lt_embedding(lt) x = self.dropout(self.layer_normal(c + pos_x + et + lt)) return x class SAINTModel(nn.Module): def __init__(self, n_skill, n_pt=7, n_lsi=n_lsi, n_et=n_et, n_lt=n_lt, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, enc_layers=3, dec_layers=3, heads = 8): super(SAINTModel, self ).__init__() self.encoder = Encoder(n_skill, n_pt, n_lsi, max_seq, embed_dim, dropout, forward_expansion, num_layers=enc_layers) self.decoder = Decoder(n_et, n_lt, max_seq, embed_dim, dropout, forward_expansion, num_layers=dec_layers) self.transformer = torch.nn.Transformer(embed_dim, heads, enc_layers, dec_layers, embed_dim*forward_expansion, dropout) self.ffn = FFN(embed_dim, forward_expansion = forward_expansion) self.pred = nn.Linear(embed_dim, 1) self.layer_normal = nn.LayerNorm(embed_dim) self.dropout = nn.Dropout(dropout) def forward(self, x, question_ids, pt_x, lsi_x, c, et, lt): ex = self.encoder(x, question_ids, pt_x, lsi_x) dx = self.decoder(c, et, lt) ex = ex.permute(1, 0, 2) dx = dx.permute(1, 0, 2) device = ex.device mask = future_mask(ex.size(0)).to(device) att_output = self.transformer(ex, dx, src_mask=mask, tgt_mask=mask) att_output = self.layer_normal(att_output) att_output = att_output.permute(1, 0, 2) x = self.ffn(att_output) x = self.dropout(self.layer_normal(x + att_output)) x = self.pred(x) return x.squeeze(-1) class TestDataset(Dataset): def __init__(self, samples, test_df, n_skill=n_skill, n_et=n_et, n_lt=n_lt, n_pt=7, max_seq=MAX_SEQ): super(TestDataset, self ).__init__() self.samples = samples self.user_ids = [x for x in test_df["user_id"].unique() ] self.test_df = test_df self.n_skill = n_skill self.n_et = n_et self.n_lt = n_lt self.n_pt = n_pt self.max_seq = max_seq def __len__(self): return self.test_df.shape[0] def __getitem__(self, index): test_info = self.test_df.iloc[index] user_id = test_info["user_id"] target_id_new = test_info["content_id"] part_new = test_info["part"] lsi_topic_new = test_info["lsi_topic"] content_id_seq = np.zeros(self.max_seq, dtype=int) answered_correctly_seq = np.zeros(self.max_seq, dtype=int) c_seq = np.zeros(self.max_seq, dtype=int) lag_seq = np.zeros(self.max_seq, dtype=int) elapsed_time_seq = np.zeros(self.max_seq, dtype=int) part_seq = np.zeros(self.max_seq, dtype=int) lsi_topic_seq = np.zeros(self.max_seq, dtype=int) if user_id in self.samples.index: content_id, answered_correctly, lag, elapsed_time, part, lsi_topic = self.samples[user_id] seq_len = len(content_id) if seq_len >= self.max_seq: content_id_seq[:] = content_id[-self.max_seq:] answered_correctly_seq[:] = answered_correctly[-self.max_seq:] c_seq[:] = answered_correctly[-self.max_seq:] + 1 lag_seq[:] = lag[-self.max_seq:] + 1 elapsed_time_seq[:] = elapsed_time[-self.max_seq:] + 1 part_seq[:] = part[-self.max_seq:] lsi_topic_seq[:] = lsi_topic[-self.max_seq:] else: content_id_seq[-seq_len:] = content_id c_seq[-seq_len:] = answered_correctly[:] + 1 lag_seq[-seq_len:] = lag[:] + 1 elapsed_time_seq[-seq_len:] = elapsed_time[:] + 1 part_seq[-seq_len:] = part lsi_topic_seq[-seq_len:] = lsi_topic target_id = content_id_seq label = answered_correctly_seq x = np.append(content_id_seq.copy() [1:], target_id_new) pt_x = np.append(part_seq.copy() [1:], part_new) lsi_x = np.append(lsi_topic_seq.copy() [1:], lsi_topic_new) c = c_seq.copy() et = elapsed_time_seq.copy() lt = lag_seq.copy() return x, target_id, pt_x, lsi_x, c, et, lt<choose_model_class>
models_ensemble = [] for i in range(7): model = Sequential() model.add(Conv2D(32, kernel_size = [3,3], activation = 'relu', input_shape =(28,28,1))) model.add(Conv2D(64, kernel_size = [3,3], activation = 'relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size = [2,2], strides = 2)) model.add(Conv2D(128, kernel_size = [3,3], activation = 'relu')) model.add(MaxPool2D(pool_size = [2,2], strides = 2)) model.add(Flatten()) model.add(Dense(512, activation = 'relu')) model.add(Dense(10, activation = 'softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) models_ensemble.append(model )
Digit Recognizer
3,233,000
def create_model() : return SAINTModel(n_skill, n_pt=7, n_lsi=n_lsi, n_et=n_et, n_lt=n_lt, max_seq=MAX_SEQ, embed_dim=EMBED_SIZE, forward_expansion=1, enc_layers=2, dec_layers=2, heads=8, dropout=0.1 )<load_pretrained>
model_histories = [] i = 1 for model in models_ensemble: xtrain, xtest, ytrain, ytest = train_test_split(X_train_cnn, labels, test_size = 0.07) print("Model " +str(i)+ " : ",end="") model_history = model.fit_generator(data_aug.flow(xtrain, ytrain, batch_size = 64), epochs = 1, verbose = 1, validation_data =(xtest, ytest), steps_per_epoch = xtrain.shape[0]) model_histories.append(model_history) i += 1
Digit Recognizer
3,233,000
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") saint_model = create_model() try: saint_model.load_state_dict(torch.load(SAINT_MODEL_PATH)) except: saint_model.load_state_dict(torch.load(SAINT_MODEL_PATH, map_location='cpu')) saint_model.to(device) saint_model.eval()<categorify>
testdata = pd.read_csv(test_dir) testdata = testdata.values.tolist() testdata = np.array(testdata) testdata_reshaped = testdata.reshape(testdata.shape[0], 28, 28, 1) testdata_reshaped = testdata_reshaped.astype('float')/255.0 def make_predictions_final_model(curr_model): prediction_array = curr_model.predict_on_batch(testdata_reshaped) predictions = [np.argmax(i)for i in prediction_array] return predictions
Digit Recognizer
3,233,000
class PredictEnv: def __init__(self, folds_path, folds): self.conn = sqlite3.connect(':memory:') self.c = self.conn.cursor() self.setup_folds(folds_path, folds) def setup_folds(self, folds_path, folds): self.c.executescript(f ).fetchone() self.group_num = 0 self.records_remaining = self.c.execute('SELECT COUNT(*)FROM b_records' ).fetchone() [0] self.df_users = pd.read_sql('SELECT * FROM b_users', self.conn) def iter_test(self): next_correct = '[]' next_responses = '[]' while self.records_remaining: self.c.execute(f ).fetchone() self.conn.commit() df_b = pd.read_sql(f , self.conn) if len(df_b): df_b['group_num'] = self.group_num df_b['prior_group_answers_correct'] = None df_b.at[0, 'prior_group_answers_correct'] = next_correct df_b['prior_group_responses'] = None df_b.at[0, 'prior_group_responses'] = next_responses next_correct = f'[{(", " ).join(df_b.answered_correctly.astype(str)) }]' next_responses = f'[{(", " ).join(df_b.user_answer.astype(str)) }]' del df_b['answered_correctly'] del df_b['user_answer'] df_b = df_b.set_index('group_num') df_p = df_b[['row_id']].copy() df_p['answered_correctly'] = 0.5 self.records_remaining -= len(df_b) yield df_b, df_p self.group_num += 1 def predict(self, df_pred): if(df_pred.answered_correctly == -1 ).any() : raise else: df_pred.reset_index().to_sql('predictions', self.conn, if_exists='append', index=False) def get_predictions(self): df_preds = pd.read_sql(, self.conn) self.score = roc_auc_score(df_preds.y_true, df_preds.y_pred) print(f'ROC AUC Score: {self.score:0.4f}') return df_preds<predict_on_test>
predictions_ensemble = [] for model in models_ensemble: curr_predictions = make_predictions_final_model(model) predictions_ensemble.append(curr_predictions) prediction_per_image = [] for i in range(len(predictions_ensemble[0])) : temppred = [predictions_ensemble[0][i], predictions_ensemble[1][i], predictions_ensemble[2][i], predictions_ensemble[3][i], predictions_ensemble[4][i], predictions_ensemble[5][i], predictions_ensemble[6][i]] prediction_per_image.append(temppred) prediction_per_image = np.array(prediction_per_image) modes = stats.mode(prediction_per_image, axis = 1) final_predictions = [] for i in modes[0]: final_predictions.append(i[0] )
Digit Recognizer
3,233,000
if MOCK_MODE: FOLDS = Path('.. /input/riiid-folds/riiid.db') env = PredictEnv(FOLDS, [0, 1]) iter_test = env.iter_test() else: env = riiideducation.make_env() iter_test = env.iter_test() set_predict = env.predict if TEST_MODE and type(iter_test)!= list: list_df = [] for itr,(df_test, sample_prediction_df)in enumerate(iter_test): df_test.loc[:, 'answered_correctly'] = 0.5 list_df.append(( df_test, None)) env.predict(df_test.loc[df_test['content_type_id'] == 0, ['row_id', 'answered_correctly']]) iter_test = list_df print("TEST_MODE MODE ENABLED") else: print("TEST_MODE MODE DISABLED" )<define_variables>
final_csv = [] csv_title = ['ImageId', 'Label'] final_csv.append(csv_title) for i in range(len(final_predictions)) : image_id = i + 1 label = final_predictions[i] temp = [image_id, label] final_csv.append(temp) print(len(final_csv)) with open('submission_csv_aug.csv', 'w')as file: writer = csv.writer(file) writer.writerows(final_csv) file.close()
Digit Recognizer
4,950,920
TARGET = "answered_correctly"<categorify>
data_train = pd.read_csv(".. /input/train.csv") data_train.head()
Digit Recognizer
4,950,920
QUESTION_FEATURES = ["part", "question_id", "lsi_topic"] question_file = ".. /input/question-features-0102/questions.pickle" questions_df = pd.read_pickle(question_file)[QUESTION_FEATURES] questions_df["lsi_topic"] = questions_df["lsi_topic"].fillna(-1) questions_df["lsi_topic"] = questions_df["lsi_topic"].map(dict(map(lambda x:(x[1],x[0]), enumerate(questions_df["lsi_topic"].fillna(-1 ).unique())))) questions_df["lsi_topic"] = questions_df["lsi_topic"] + 1<set_options>
y_train = data_train['label'] x_train = data_train.drop(labels = ["label"], axis = 1) print('Shape of whole data for training', data_train.shape) print('x_train:', x_train.shape) print('y_train:', y_train.shape) %matplotlib inline def convert_to_grid(x_input): N, H, W = x_input.shape grid_size = int(ceil(sqrt(N))) grid_height = H * grid_size + 1 *(grid_size - 1) grid_width = W * grid_size + 1 *(grid_size - 1) grid = np.zeros(( grid_height, grid_width)) + 255 next_idx = 0 y0, y1 = 0, H for y in range(grid_size): x0, x1 = 0, W for x in range(grid_size): if next_idx < N: img = x_input[next_idx] low, high = np.min(img), np.max(img) grid[y0:y1, x0:x1] = 255.0 *(img - low)/(high - low) next_idx += 1 x0 += W + 1 x1 += W + 1 y0 += H + 1 y1 += H + 1 return grid examples = np.array(x_train.iloc[:81] ).reshape(81, 28, 28) print(examples.shape) fig = plt.figure() grid = convert_to_grid(examples) plt.imshow(grid.astype('uint8'), cmap='gray') plt.axis('off') plt.gcf().set_size_inches(7, 7) plt.title('Some examples of training data', fontsize=24) plt.show() plt.close() fig.savefig('training_examples.png') plt.close()
Digit Recognizer
4,950,920
warnings.filterwarnings(action="ignore" )<load_pretrained>
x_test = pd.read_csv(".. /input/test.csv") x_test.head()
Digit Recognizer
4,950,920
last_row_file = ".. /input/saint-final/last_row_states.pickle" with open(last_row_file, "rb")as f: last_row_states = pickle.load(f) def inference(iter_test, TARGET, saint_model, questions_df): previous_test_df = None for(test_df, sample_prediction_df)in tqdm(iter_test): if previous_test_df is not None: previous_test_df[TARGET] = eval(test_df["prior_group_answers_correct"].iloc[0]) previous_test_df = previous_test_df[previous_test_df.content_type_id == False] previous_test_df["user_to_remove"] = False previous_test_df["prior_question_elapsed_time"] =(previous_test_df["prior_question_elapsed_time"] // 100 ).clip(0,300) previous_test_df['current_container_size'] = previous_test_df[['user_id', 'task_container_id']].groupby(['user_id', 'task_container_id'])['task_container_id'].transform('size') if TEST_MODE: previous_test_df.to_csv("./previous_test_df1.csv") common_users = set(previous_test_df["user_id"].unique() ).intersection(set(last_row_states.keys())) last_records = pd.DataFrame([ {**last_row_states[user_id], **{'user_id': user_id}} for user_id in common_users ]) if len(last_records)!= 0: previous_test_df = pd.concat([last_records, previous_test_df] ).reset_index(drop=True) previous_test_df = previous_test_df.sort_values(['user_id','timestamp'], ascending=True ).reset_index(drop = True) previous_test_df['last_timestamp'] = previous_test_df[['user_id', 'timestamp']].groupby(['user_id'])['timestamp'].shift(1, fill_value=0) previous_test_df['last_timestamp'] = previous_test_df[['user_id', 'task_container_id', 'last_timestamp']].groupby(['user_id', 'task_container_id'])['last_timestamp'].transform('first') previous_test_df['last_task_container_size'] = previous_test_df[['user_id', 'current_container_size']].groupby(['user_id'])['current_container_size'].shift(1, fill_value=0) previous_test_df['last_task_container_size'] = previous_test_df[['user_id', 'task_container_id', 'last_task_container_size']].groupby(['user_id', 'task_container_id'])['last_task_container_size'].transform('first') if TEST_MODE: previous_test_df.to_csv("./previous_test_df2.csv") previous_test_df['lag'] = previous_test_df['timestamp'] - previous_test_df['last_timestamp'] -(previous_test_df['prior_question_elapsed_time'] * previous_test_df['last_task_container_size']) previous_test_df["lag"] =(previous_test_df["lag"]//(100*60)).clip(0, 1440) if TEST_MODE: previous_test_df.to_csv("./previous_test_df3.csv") if TEST_MODE: previous_test_df.to_csv("./previous_test_df4.csv") previous_test_df = previous_test_df[previous_test_df["user_to_remove"] != True] if TEST_MODE: previous_test_df.to_csv("./previous_test_df5.csv") prev_group = previous_test_df[[ 'user_id', 'content_id', 'answered_correctly', 'lag', 'prior_question_elapsed_time', 'part', 'lsi_topic' ]].groupby('user_id' ).apply(lambda r:( r['content_id'].values, r['answered_correctly'].values, r['lag'].values, r['prior_question_elapsed_time'].values, r['part'].values, r['lsi_topic'].values)) for prev_user_id in prev_group.index: if prev_user_id in group.index: group[prev_user_id] =( np.append(group[prev_user_id][0], prev_group[prev_user_id][0])[-MAX_SEQ:], np.append(group[prev_user_id][1], prev_group[prev_user_id][1])[-MAX_SEQ:], np.append(group[prev_user_id][2], prev_group[prev_user_id][2])[-MAX_SEQ:], np.append(group[prev_user_id][3], prev_group[prev_user_id][3])[-MAX_SEQ:], np.append(group[prev_user_id][4], prev_group[prev_user_id][4])[-MAX_SEQ:], np.append(group[prev_user_id][5], prev_group[prev_user_id][5])[-MAX_SEQ:], ) else: group[prev_user_id] =( prev_group[prev_user_id][0], prev_group[prev_user_id][1], prev_group[prev_user_id][2], prev_group[prev_user_id][3], prev_group[prev_user_id][4], prev_group[prev_user_id][5], ) users_to_cache = previous_test_df.groupby("user_id" ).last() user_ids = users_to_cache.index timestamps = users_to_cache["timestamp"].values content_ids = users_to_cache["content_id"].values content_type_ids = users_to_cache["content_type_id"].values task_container_ids = users_to_cache["task_container_id"].values prior_question_elapsed_times = users_to_cache["prior_question_elapsed_time"].values prior_question_had_explanations = users_to_cache["prior_question_had_explanation"].values current_container_sizes = previous_test_df['current_container_size'] for row in zip( user_ids, timestamps, content_ids, content_type_ids, task_container_ids, prior_question_elapsed_times, prior_question_had_explanations, current_container_sizes, ): user_id = row[0] timestamp = row[1] content_id = row[2] content_type_id = row[3] task_container_id = row[4] prior_question_elapsed_time = row[5] prior_question_had_explanation = row[6] current_container_size = row[7] row = { "user_id": user_id, "timestamp": timestamp, "content_id": content_id, "content_type_id": content_type_id, "task_container_id": task_container_id, "prior_question_elapsed_time": prior_question_elapsed_time, "prior_question_had_explanation": prior_question_had_explanation, "current_container_size": current_container_size, } row["user_to_remove"] = True last_row_states[row["user_id"]] = row test_df = pd.merge(test_df, questions_df[QUESTION_FEATURES], left_on = 'content_id', right_on = 'question_id', how = 'left') test_df['prior_question_had_explanation'] = test_df.prior_question_had_explanation.fillna(False ).astype('int8') test_df['prior_question_elapsed_time'].fillna(0, inplace = True) previous_test_df = test_df.copy() test_df = test_df[test_df['content_type_id'] == 0].reset_index(drop = True) test_df[TARGET] = 0 test_df = test_df[test_df.content_type_id == False] test_dataset = TestDataset(group, test_df) test_dataloader = DataLoader(test_dataset, batch_size=51200, shuffle=False) outs = [] for item in test_dataloader: x = item[0].to(device ).long() target_id = item[1].to(device ).long() pt_x = item[2].to(device ).long() lsi_x = item[3].to(device ).long() c = item[4].to(device ).long() et = item[5].to(device ).long() lt = item[6].to(device ).long() if TEST_MODE: pd.DataFrame({ "x": x[0], "target_id": target_id[0].detach().numpy() , "pt_x": pt_x[0].detach().numpy() , "lsi_x": lsi_x[0].detach().numpy() , "c": c[0].detach().numpy() , "et": et[0].detach().numpy() , "lt": lt[0].detach().numpy() , } ).to_csv("input.csv") with torch.no_grad() : output = saint_model(x, target_id, pt_x, lsi_x, c, et, lt) outs.extend(torch.sigmoid(output)[:, -1].view(-1 ).data.cpu().numpy()) pred = np.array(outs) test_df[TARGET] = pred if not TEST_MODE and not MOCK_MODE: set_predict(test_df[['row_id', TARGET]]) print('Job Done') inference(iter_test, TARGET, saint_model, questions_df )<define_variables>
def pre_process_mnist(x_train, y_train, x_test): x_train = x_train / 255.0 x_test = x_test / 255.0 batch_mask = list(range(41000, 42000)) x_validation = x_train[batch_mask] y_validation = y_train[batch_mask] batch_mask = list(range(41000)) x_train = x_train[batch_mask] y_train = y_train[batch_mask] mean_image = np.mean(x_train, axis=0) std = np.std(x_train, axis=0) for j in range(28): for i in range(28): if std[i, j, 0] == 0: std[i, j, 0] = 1.0 x_train -= mean_image x_validation -= mean_image x_test -= mean_image x_train /= std x_validation /= std x_test /= std y_train = to_categorical(y_train, num_classes=10) y_validation = to_categorical(y_validation, num_classes=10) d_processed = {'x_train': x_train, 'y_train': y_train, 'x_validation': x_validation, 'y_validation': y_validation, 'x_test': x_test} return d_processed data = pre_process_mnist(x_train, y_train, x_test) for i, j in data.items() : print(i + ':', j.shape)
Digit Recognizer
4,950,920
global_test = 2 <import_modules>
model = Sequential() model.add(Conv2D(64, kernel_size=7, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=9, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=7, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size=5, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=7, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size=5, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(256, kernel_size=3, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=3, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
Digit Recognizer
4,950,920
import numpy as np import pandas as pd import glob import riiideducation import matplotlib.pyplot as plt from tqdm import tqdm from catboost import CatBoostClassifier from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler <set_options>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 **(x + epochs)) epochs = 50 h = model.fit(data['x_train'], data['y_train'], batch_size=100, epochs = epochs, validation_data =(data['x_validation'], data['y_validation']), callbacks=[annealer], verbose=1)
Digit Recognizer
4,950,920
pd.options.display.max_rows = 100 pd.options.display.max_columns = 100<data_type_conversions>
print("Epochs={0:d}, Train accuracy={1:.5f}, Validation accuracy={2:.5f}".format(epochs, max(h.history['acc']), max(h.history['val_acc'])))
Digit Recognizer
4,950,920
def df_to_np(df, filter_lectures:bool, convert_answers:bool): tmstmp =(df['timestamp']/3600000 ).to_numpy(dtype = np.float32) userid = df['user_id'].to_numpy() ctntid = df['content_id'].to_numpy() ctnttp = df['content_type_id'].to_numpy() contnr = df['task_container_id'].to_numpy() pqtime = np.nan_to_num(df['prior_question_elapsed_time']\ .to_numpy(dtype = np.float32), nan = float32m1) pqexpl = df['prior_question_had_explanation']\ .to_numpy(dtype = np.int8, na_value = 1) if convert_answers: usrans = df['user_answer'].to_numpy() anscor = df['answered_correctly'].to_numpy() if filter_lectures: f = ctnttp == int8_0 if convert_answers: return tmstmp[f], userid[f], ctntid[f], ctnttp[f],\ contnr[f], pqtime[f], pqexpl[f], usrans[f], anscor[f] else: return tmstmp[f], userid[f], ctntid[f], ctnttp[f],\ contnr[f], pqtime[f], pqexpl[f] else: return tmstmp, userid, ctntid, ctnttp, contnr, pqtime, pqexpl, usrans, anscor<load_from_csv>
model.save('my_model.h5' )
Digit Recognizer
4,950,920
<load_from_csv><EOS>
results = model.predict(data['x_test']) results = np.argmax(results, axis=1) submission = pd.read_csv('.. /input/sample_submission.csv') submission['Label'] = results submission.to_csv('sample_submission.csv', index=None)
Digit Recognizer
3,081,290
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions>
import pandas as pd import numpy as np import matplotlib.pyplot as plt from keras import layers from keras import models from keras import optimizers from keras.utils import to_categorical from sklearn.utils import shuffle from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from skimage import img_as_ubyte from skimage.transform import resize from scipy.ndimage.interpolation import rotate
Digit Recognizer
3,081,290
def get_cor_table() : max_neigbrs = 1000 global cor_table map1 = np.load('.. /input/content-correlation-100to300/ctnt_map.npy') map2 = np.load('.. /input/content-correlation/ctnt_map.npy') cor1 = np.load('.. /input/content-correlation-100to300/result.npy') cor2 = np.load('.. /input/content-correlation/result.npy') cor =(cor2[map2[:max_content],:,:][:,map2[:max_content],:].astype(np.uint32) +cor1[map1[:max_content],:,:][:,map1[:max_content],:].astype(np.uint32)) size = np.log(np.log(cor[:,:,0].sum(axis = 1)+1)) size = size/size.max() /10 corrs =(cor[:,:,0]+5)/(cor[:,:,1]*1.7+cor[:,:,0]+10)+size cor_table = corrs.argsort(axis = 1)[:,-max_neigbrs:].astype(np.int32) <create_dataframe>
train_data = pd.read_csv('/kaggle/input/train.csv') test_data = pd.read_csv('/kaggle/input/test.csv' )
Digit Recognizer
3,081,290
def get_content_answer_shares() : global ca_shares_all columns = ['user_id', 'content_id', 'content_type_id', 'user_answer', 'answered_correctly'] df = get_train_large(t_part=99, columns=columns) df = df.loc[df.content_type_id == 0, df.columns != 'content_type_id'] ca_shares_all = pd.pivot_table(df, values='answered_correctly', index='content_id', columns='user_answer', aggfunc='count', fill_value=0 ).to_numpy() +1 ca_shares_all =(ca_shares_all.T/ca_shares_all.sum(axis = 1)).T.astype(np.float32 )<groupby>
X_model = train_data.drop('label', axis=1) y_model = train_data['label'].copy() Y_finish = test_data print('size train data:', X_model.shape) print('size train labels:', y_model.shape) print('size finish test data:', Y_finish.shape )
Digit Recognizer
3,081,290
def get_content_first_answer_mean() : global ctnt_fam columns = ['user_id', 'new_order', 'answered_correctly', 'content_id', 'content_type_id'] df = get_train_large(t_part = 99, columns = columns) df = df\ .loc[df.content_type_id==0, df.columns!='content_type_id']\ .sort_values(by = 'new_order') df = df.groupby(['user_id','content_id'] ).first() df = df.groupby('content_id' ).answered_correctly.mean().sort_index() ctnt_fam = df.to_numpy(dtype = np.float32 )<normalization>
def img_rotate(df_x, angle): change_img = np.empty([df_x.shape[0], df_x.shape[1]]) for i, image in enumerate(df_x.values): img = rotate(image.reshape(28, 28), angle, cval=0, reshape=False, order=0) change_img[i] = img.ravel() return pd.DataFrame(data=change_img, columns=df_x.columns) def img_zoom(df_x, scale): if(scale > 0.9)or(scale < -0.9): raise Exception('scale values must be between -0.9 and 0.9') if scale < 0: change_img = np.empty([df_x.shape[0], df_x.shape[1]]) for i, image in enumerate(df_x.values): number_around = round(28*abs(scale)/2) img = image.reshape(28, 28 ).astype('float32' ).copy() img = np.pad(img, number_around, mode='constant') img = resize(img,(28,28), anti_aliasing=False, mode='constant', cval=0, order=0) change_img[i] = img.ravel().astype('int64') return pd.DataFrame(data=change_img, columns=df_x.columns) elif scale >= 0: change_img = np.empty([df_x.shape[0], df_x.shape[1]]) for i, image in enumerate(df_x.values): number_slice = round(28*abs(scale)/2) img = image.reshape(28, 28 ).astype('float32' ).copy() img = img[number_slice:28-number_slice, number_slice:28-number_slice] img = resize(img,(28,28), anti_aliasing=False, mode='constant', cval=0, order=0) change_img[i] = img.ravel().astype('int64') return pd.DataFrame(data=change_img, columns=df_x.columns)
Digit Recognizer
3,081,290
def get_ctnt_enc() : global ctnt_enc qestn_tagsmap_ohe = np.zeros(( len(qestn_tagsmap), 189), dtype = np.bool) for i,j in enumerate(qestn_tagsmap): for k in j: qestn_tagsmap_ohe[i,k] = True tags_comps = StandardScaler().fit_transform( PCA(n_components=3, random_state=0 ).fit_transform(qestn_tagsmap_ohe) ) corr_comps = StandardScaler().fit_transform( PCA(n_components=9, random_state=0 ).fit_transform(cor_table) ) comb_comps = np.concatenate([tags_comps,corr_comps], axis = 1) ctnt_enc = PCA(n_components=1, random_state=0)\ .fit_transform(comb_comps ).astype(np.float32 ).ravel()<load_pretrained>
X_train, X_test, y_train, y_test = train_test_split(X_model, y_model, test_size=0.2 )
Digit Recognizer
3,081,290
def get_train_small(t_part:int): all_files = glob.glob('.. /input/riiid-parquets-v5/df_*') read_files = [file for file in all_files if file.endswith('_'+str(t_part)) ] df = pd.read_parquet(read_files[0]) return df<load_pretrained>
X_train_add = X_train.append(img_zoom(X_train, 0.2)) X_train_add = X_train_add.append(img_zoom(X_train, -0.3)) X_train_add = X_train_add.append(img_rotate(X_train, 11)) X_train_add = X_train_add.append(img_rotate(X_train, -11)) y_train_add = y_train.append(y_train) y_train_add = y_train_add.append(y_train) y_train_add = y_train_add.append(y_train) y_train_add = y_train_add.append(y_train) X_train = X_train_add.copy() y_train = y_train_add.copy() X_train, y_train = shuffle(X_train, y_train )
Digit Recognizer
3,081,290
def get_train_large(t_part:int, columns:list): all_files = glob.glob('.. /input/riiid-parquets-v5/df_*') read_files = [file for file in all_files if not file.endswith('_'+str(t_part)) ] df = pd.concat([pd.read_parquet(file, columns = columns)for file in read_files]) return df<groupby>
X_train = X_train.values.reshape(X_train.shape[0], 28, 28 ,1) X_train = X_train.astype('float32')/ 255 X_test = X_test.values.reshape(X_test.shape[0], 28, 28, 1) X_test = X_test.astype('float32')/ 255 Y_finish = Y_finish.values.reshape(Y_finish.shape[0], 28, 28 ,1) Y_finish = Y_finish.astype('float32')/ 255
Digit Recognizer
3,081,290
def get_train_groups(t_part:int): df = get_train_small(t_part) groups = [] for i in np.arange(0, 10000, dtype = np.int16): group = df.loc[df.new_order == i].reset_index(drop = True) groups.append(group) return groups<data_type_conversions>
def build_model() : model = models.Sequential() model.add(layers.Conv2D(32,(3,3), activation='relu', input_shape=(28,28,1))) model.add(layers.MaxPooling2D(( 2,2))) model.add(layers.Dropout(0.12)) model.add(layers.Conv2D(64,(3,3), activation='relu')) model.add(layers.MaxPooling2D(( 2,2))) model.add(layers.Dropout(0.12)) model.add(layers.Conv2D(64,(3,3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dropout(0.12)) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) opt = optimizers.Adam(lr=0.0015, beta_1=0.9, beta_2=0.99, epsilon=None, decay=0.0, amsgrad=False) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
Digit Recognizer
3,081,290
def get_arrays_and_lists() : global next_uplace,\ au_ctntid,\ a_userid,\ lu_seq,\ lu_seq_part,\ au_anshar,\ au_ctshar,\ user_map,\ au_tmstmp_prv au_ctntid = np.zeros(( max_users, max_content, 3), dtype = np.int8) a_userid = np.zeros(( max_users, 2), dtype = np.int16) au_anshar = np.zeros(( max_users, 2), dtype = np.float32) au_ctshar = np.zeros(( max_users, 2), dtype = np.float32) user_map = np.zeros(np.iinfo(np.int32 ).max,dtype = np.int32) next_uplace = np.int32(1) au_tmstmp_prv = np.zeros(( max_users,3), dtype = np.float32) lu_seq = [[] for _ in range(max_users)] lu_seq_part = [[[],[],[],[],[],[],[]] for _ in range(max_users)] <categorify>
y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) cnn = build_model() cnn.fit(X_train, y_train, epochs=4, batch_size=64 )
Digit Recognizer
3,081,290
def update_user_map(unique_users): global next_uplace for u in unique_users: if user_map[u] == int32_0: user_map[u] = next_uplace next_uplace += int32_1<feature_engineering>
test_loss, test_acc = cnn.evaluate(X_test, y_test) test_acc
Digit Recognizer
3,081,290
def update_arrays(df): tmstmp,userid,ctntid,ctnttp,contnr,pqtime,pqexpl,usrans,anscor = df_to_np(df,False,True) for r in range(len(df)) : user_ = user_map[userid[r]] if tmstmp[r] > au_tmstmp_prv[user_,0]: au_tmstmp_prv[user_,2] = au_tmstmp_prv[user_,1] au_tmstmp_prv[user_,1] = au_tmstmp_prv[user_,0] au_tmstmp_prv[user_,0] = tmstmp[r] if ctnttp[r] == int8_0: lsu = lu_seq[user_] lsup = lu_seq_part[user_][qestn_partmap[ctntid[r]]] bndl_ = qestn_bndlmap[ctntid[r]] ctnt_ = ctntid[r] if len(lsu)>m: lsu.pop(0) if len(lsup)>m: lsup.pop(0) au_ctntid[user_,ctnt_,1] += int8_1 au_ctntid[user_,ctnt_,2] = usrans[r] if anscor[r] == int8_1: a_userid[user_,0] += int16_1 au_ctntid[user_,ctnt_,0] = int8_1 lsu.append(True) lsup.append(True) au_anshar[user_, 0] += ca_shares_all[ctnt_,usrans[r]] au_ctshar[user_, 0] += ctnt_fam[ctnt_] else: a_userid[user_,1] += int16_1 au_ctntid[user_,ctnt_,0] = int8_0 lsu.append(False) lsup.append(False) au_anshar[user_, 1] += ca_shares_all[ctnt_,usrans[r]] au_ctshar[user_, 1] += ctnt_fam[ctnt_] <categorify>
predict_test = cnn.predict_classes(X_test) y_correct = np.argmax(y_test, axis=1) correct_idx = np.nonzero(predict_test==y_correct) incorrect_idx = np.nonzero(predict_test!=y_correct )
Digit Recognizer
3,081,290
def get_features(df, is_test:bool): if is_test: tmstmp,userid,ctntid,ctnttp,contnr,pqtime,pqexpl=\ df_to_np(df,True,False) else: tmstmp,userid,ctntid,ctnttp,contnr,pqtime,pqexpl,usrans,anscor=\ df_to_np(df,True,True) user = user_map[userid] part = qestn_partmap[ctntid] userid_ctntid_ = au_ctntid[user,ctntid] userid_ = a_userid[user,:] userid_avg_ =(userid_[:,0]/(userid_[:,0]+userid_[:,1]+int16_1)).astype(np.float32) cp_ = ac_pqexpl[ctntid,pqexpl,:] ctntid_pqexpl_avg_ =(cp_[:,0]/(cp_[:,0]+cp_[:,1]+int32_1)).astype(np.float32) uanshar_ = au_anshar[user, :] uanshar_fls =(uanshar_[:,1]/(userid_[:,1]+e)).astype(np.float32) uctshar_ = au_ctshar[user, :] uctshar_slf =(uctshar_[:,0]/(uctshar_[:,0]+uctshar_[:,1]+e)).astype(np.float32) uctshar_cor =(uctshar_[:,0]/(userid_[:,0]+e)).astype(np.float32) uctshar_fls =(uctshar_[:,1]/(userid_[:,1]+e)).astype(np.float32) correlation_ids = cor_table[ctntid] neigh = au_ctntid[user.reshape(-1,1),correlation_ids,:] all_ans_cnt = np.count_nonzero(neigh[:,:,1],axis = 1 ).astype(np.int16) cor_ans_cnt = np.count_nonzero(neigh[:,:,0],axis = 1 ).astype(np.int16) fls_ans_cnt = all_ans_cnt - cor_ans_cnt neigh_ca_shrs_all = ca_shares_all[correlation_ids,neigh[:,:,2]]*(neigh[:,:,1]!=int8_0) cor_shrs_all =(( neigh_ca_shrs_all*(neigh[:,:,0]==int8_1)).sum(axis = 1)/ (cor_ans_cnt+e)).astype(np.float32) fls_shrs_all =(( neigh_ca_shrs_all*(neigh[:,:,0]==int8_0)).sum(axis = 1)/ (fls_ans_cnt+e)).astype(np.float32) lu_seq_ = [lu_seq[u] for u in user] lst_m_avg = np.array( [x.count(True)/(len(x)+e)for x in lu_seq_], dtype = np.float32) lst_s_avg = np.array( [x[-s:].count(True)/(len(x[-s:])+e)for x in lu_seq_], dtype = np.float32) lu_seq_part_ = [lu_seq_part[u][part[_]] for _, u in enumerate(user)] lst_part_m_avg = np.array( [x.count(True)/(len(x)+e)for x in lu_seq_part_], dtype = np.float32) lst_part_s_avg = np.array( [x[-s:].count(True)/(len(x[-s:])+e)for x in lu_seq_part_], dtype = np.float32) X = pd.DataFrame({ 'part':part, 'prior_explanation':pqexpl, 'prior_elapsed_time':pqtime, 'content':ctntid, 'ctntent_encoded':ctnt_enc[ctntid], 'task_container':contnr, 'time_to_cont_1':tmstmp - au_tmstmp_prv[user,0], 'time_to_cont_3':tmstmp - au_tmstmp_prv[user,2], 'time_cont1_to_cont2':au_tmstmp_prv[user,0] - au_tmstmp_prv[user,1], 'time_cont2_to_cont3':au_tmstmp_prv[user,1] - au_tmstmp_prv[user,2], 'user_content_attempts':userid_ctntid_[:,1], 'user_content_last_1':userid_ctntid_[:,0], 'user_part_last_m_avg':lst_part_m_avg, 'user_part_last_s_avg':lst_part_s_avg, 'user_last_m_avg':lst_m_avg, 'user_last_s_avg':lst_s_avg, 'content_explanation_avg':ctntid_pqexpl_avg_, 'content_first_answer_avg':ctnt_fam[ctntid], 'content_avg_time':ctnt_mtime[ctntid], 'user_relative_content_avg':uctshar_slf, 'user_true_content_avg':uctshar_cor, 'user_false_content_avg':uctshar_fls, 'user_false_answer_avg':uanshar_fls, 'neighbor_content_true_shares':cor_shrs_all, 'neighbor_content_false_shares':fls_shrs_all, }) if is_test: return X else: return X, anscor <init_hyperparams>
target_names = ["Class {}".format(i)for i in range(10)] print(classification_report(y_correct, predict_test, target_names=target_names))
Digit Recognizer
3,081,290
%%time uint8_0 = np.uint8(0) uint8_1 = np.uint8(1) uint16_0 = np.uint16(0) uint16_1 = np.uint16(1) int8_0 = np.int8(0) int8_1 = np.int8(1) int16_0 = np.int16(0) int16_1 = np.int16(1) int32_0 = np.int32(0) int32_1 = np.int32(1) float32m1 = np.float32(-1) max_users = 450000 max_content = 13523 m = 100 s = 20 e = 0.1 question_maps() lecture_maps() get_cor_table() get_content_answer_shares() get_ac_pqexpl() get_content_first_answer_mean() get_ctnt_enc() ctnt_mtime = np.load('.. /input/question-duration/question_mean_time.npy' )<categorify>
predict = cnn.predict_classes(Y_finish )
Digit Recognizer
3,081,290
%%time if global_test == 1: for i in tqdm(range(10)) : X = [] y = [] get_arrays_and_lists() groups = get_train_groups(i) for df in groups: update_user_map(df.user_id.unique()) X_, y_ = get_features(df,False) X.append(X_) y.append(y_) update_arrays(df) del(groups) X = pd.concat(X) y = np.concatenate(y) X.to_parquet('X_'+str(i)) np.save('y_'+str(i), y )<load_pretrained>
df_out = pd.DataFrame({'ImageId': range(1, len(predict)+1), 'Label': predict} )
Digit Recognizer
3,081,290
<split><EOS>
df_out.to_csv('mnist_cnn.csv', index=False, header=True )
Digit Recognizer
2,858,410
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
%matplotlib inline plt.rcParams['figure.figsize'] =(5.0, 4.0) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' print(os.listdir(".. /input")) np.random.seed(2)
Digit Recognizer
2,858,410
%%time if global_test == 2: old_df = None for(new_df, sample)in iter_test: if old_df is not None: old_df['user_answer'] = np.array( [int(x)for x in new_df.iloc[0,9][1:-1].split(', ')], dtype = np.int8) old_df['answered_correctly'] = np.array( [int(x)for x in new_df.iloc[0,8][1:-1].split(', ')], dtype = np.int8) update_arrays(old_df) old_df = new_df.iloc[:,:8].copy() update_user_map(new_df.user_id.unique()) X = get_features(new_df, True) sample['answered_correctly'] =( model1.predict_proba(X)[:,1]/2 + model2.predict_proba(X)[:,1]/2 ) env.predict(sample )<import_modules>
def convert_to_one_hot(Y, C): Y = np.eye(C)[Y.reshape(-1)] return Y
Digit Recognizer
2,858,410
import pandas as pd import numpy as np import gc import pickle import psutil import joblib import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader import sqlite3<define_search_space>
def read_csv(filename): X,Y=[],[] test=pd.read_csv(filename) if filename.find("train.csv")>0: Y=test.iloc[:,0].values Y=convert_to_one_hot(Y,10) X=test.iloc[:,1:785].values else: X=test.iloc[:,0:784].values return X, Y
Digit Recognizer
2,858,410
c1, c1_2, c2, c3 , c4 = 0.175, 0.075, 0.25, 0.25, 0.25<init_hyperparams>
def write_csv(filename,predictions): my_submission = pd.DataFrame({'ImageId': range(1,predictions.shape[0]+1), 'Label': predictions}) my_submission.to_csv('submission.csv', index=False)
Digit Recognizer