kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,728,166
train = train[train['age']>0] train.loc[train['NumberOfTime30-59DaysPastDueNotWorse']>20, 'NumberOfTime30-59DaysPastDueNotWorse'] = 6 train.loc[train['NumberOfTimes90DaysLate']>20, 'NumberOfTimes90DaysLate'] = 2 train.loc[train['NumberOfTime60-89DaysPastDueNotWorse']>20, 'NumberOfTime60-89DaysPastDueNotWorse'] = 3<train_model>
y_train = df_train['label'].astype('float32') X_train = df_train.drop(['label'], axis=1 ).astype('int32') X_test = df_test.astype('float32') X_train.shape, y_train.shape, X_test.shape
Digit Recognizer
10,728,166
train["NumberOfDependents"].fillna(train["NumberOfDependents"].mode() [0], inplace=True) mData = train.iloc[:,[6,2,3,4,5,7,8,9,10,11]] train_known = mData[mData.MonthlyIncome.notnull() ].values train_unknown = mData[mData.MonthlyIncome.isnull() ].values train_X = train_known[:,1:] train_y = train_known[:,0] rfr = RandomForestRegressor(random_state=0,n_estimators=200,max_depth=3,n_jobs=-1) rfr.fit(train_X,train_y) predicted_y = rfr.predict(train_unknown[:,1:] ).round(0) train.loc[train.MonthlyIncome.isnull() ,'MonthlyIncome'] = predicted_y <split>
X_train = X_train/255 X_test = X_test/255
Digit Recognizer
10,728,166
train_X = train_2[train_2.columns[2:]] train_y = train_2[train_2.columns[1]] train_X, test_X, train_y, test_y = train_test_split(train_X, train_y, test_size=0.1, random_state=20, stratify=train_y )<predict_on_test>
y_train = to_categorical(y_train, num_classes = 10) y_train.shape
Digit Recognizer
10,728,166
lgbm = LGBMClassifier(max_depth=20,num_leaves=30,learning_rate=0.02,n_estimators=270,feature_fraction=0.7) lgbm.fit(train_X,train_y) pre_y = lgbm.predict_proba(test_X)[:,1]<feature_engineering>
x = tf.Variable(5.0) with tf.GradientTape() as tape: y = x**3
Digit Recognizer
10,728,166
test=pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-test.csv') test.info() test.loc[test['NumberOfTime30-59DaysPastDueNotWorse']>20, 'NumberOfTime30-59DaysPastDueNotWorse'] = 6 test.loc[test['NumberOfTimes90DaysLate']>20, 'NumberOfTimes90DaysLate'] = 2 test.loc[test['NumberOfTime60-89DaysPastDueNotWorse']>20, 'NumberOfTime60-89DaysPastDueNotWorse'] = 3 test.describe()<find_best_model_class>
dy_dx = tape.gradient(y, x) dy_dx.numpy()
Digit Recognizer
10,728,166
test["NumberOfDependents"].fillna(train["NumberOfDependents"].mode() [0], inplace=True) mData2 = test.iloc[:,[6,2,3,4,5,7,8,9,10,11]] test_known = mData2[mData2.MonthlyIncome.notnull() ].values test_unknown = mData2[mData2.MonthlyIncome.isnull() ].values test_X2 = test_known[:,1:] test_y2 = test_known[:,0] rfr2 = RandomForestRegressor(random_state=0,n_estimators=200,max_depth=3,n_jobs=-1) rfr2.fit(test_X2,test_y2) predicted_y = rfr2.predict(test_unknown[:,1:] ).round(0) test.loc[test.MonthlyIncome.isnull() ,'MonthlyIncome'] = predicted_y<predict_on_test>
w = tf.Variable(tf.random.normal(( 3, 2)) , name='w') b = tf.Variable(tf.zeros(2, dtype=tf.float32), name='b') x = [[1., 2., 3.]] with tf.GradientTape(persistent=True)as tape: y = x @ w + b loss = tf.reduce_mean(y**2)
Digit Recognizer
10,728,166
test2 = test[test.columns[2:]] pre_y2 = lgbm.predict_proba(test2)[:,1]<save_to_csv>
[dl_dw, dl_db] = tape.gradient(loss, [w, b] )
Digit Recognizer
10,728,166
result=pd.read_csv('/kaggle/input/GiveMeSomeCredit/sampleEntry.csv') result['Probability'] = pre_y2 result.to_csv('./submit.csv',index=False) reload = pd.read_csv('./submit.csv') reload<import_modules>
x = tf.constant(3.0) with tf.GradientTape() as g: g.watch(x) with tf.GradientTape() as gg: gg.watch(x) y = x * x dy_dx = gg.gradient(y, x) print(dy_dx.numpy()) d2y_dx2 = g.gradient(dy_dx, x) print(d2y_dx2.numpy() )
Digit Recognizer
10,728,166
import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score,roc_curve, auc from sklearn.ensemble import RandomForestRegressor from lightgbm import LGBMClassifier<load_from_csv>
def build_model(width, height, depth, classes): inputShape =(height, width, depth) chanDim = -1 model = Sequential([ Conv2D(16,(3, 3), padding="same", input_shape=inputShape), Activation("relu"), BatchNormalization(axis=chanDim), MaxPooling2D(pool_size=(2, 2)) , Conv2D(32,(3, 3), padding="same"), Activation("relu"), BatchNormalization(axis=chanDim), Conv2D(32,(3, 3), padding="same"), Activation("relu"), BatchNormalization(axis=chanDim), MaxPooling2D(pool_size=(2, 2)) , Conv2D(64,(3, 3), padding="same"), Activation("relu"), BatchNormalization(axis=chanDim), Conv2D(64,(3, 3), padding="same"), Activation("relu"), BatchNormalization(axis=chanDim), Conv2D(64,(3, 3), padding="same"), Activation("relu"), BatchNormalization(axis=chanDim), MaxPooling2D(pool_size=(2, 2)) , Flatten() , Dense(256), Activation("relu"), BatchNormalization() , Dropout(0.5), Dense(classes), Activation("softmax") ]) return model
Digit Recognizer
10,728,166
train=pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-training.csv') train.info() train = train[train['age']>0] train.loc[train['NumberOfTime30-59DaysPastDueNotWorse']>20, 'NumberOfTime30-59DaysPastDueNotWorse'] = 6 train.loc[train['NumberOfTimes90DaysLate']>20, 'NumberOfTimes90DaysLate'] = 2 train.loc[train['NumberOfTime60-89DaysPastDueNotWorse']>20, 'NumberOfTime60-89DaysPastDueNotWorse'] = 3 train.describe(percentiles=[.10,.20,.30,.40,.60,.70,.80,.90,.95,.99,.999] )<train_model>
def step(X, y): with tf.GradientTape() as tape: pred = model(X) loss = categorical_crossentropy(y, pred) grads = tape.gradient(loss, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables))
Digit Recognizer
10,728,166
train["NumberOfDependents"].fillna(train["NumberOfDependents"].mode() [0], inplace=True) mData = train.iloc[:,[6,2,3,4,5,7,8,9,10,11]] train_known = mData[mData.MonthlyIncome.notnull() ].values train_unknown = mData[mData.MonthlyIncome.isnull() ].values train_X = train_known[:,1:] train_y = train_known[:,0] rfr = RandomForestRegressor(random_state=0,n_estimators=200,max_depth=3,n_jobs=-1) rfr.fit(train_X,train_y) predicted_y = rfr.predict(train_unknown[:,1:] ).round(0) train.loc[train.MonthlyIncome.isnull() ,'MonthlyIncome'] = predicted_y <split>
EPOCHS = 50 BS = 32 INIT_LR = 1e-3 model = build_model(28, 28, 1, 10) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) numUpdates = int(X_train.shape[0] / BS) for epoch in range(0, EPOCHS): print("[INFO] starting epoch {}/{}...".format(epoch + 1, EPOCHS), end="") sys.stdout.flush() epochStart = time.time() for i in range(0, numUpdates): start = i * BS end = start + BS step(X_train[start:end], y_train[start:end]) epochEnd = time.time() elapsed =(epochEnd - epochStart)/ 60.0 print("took {:.4} minutes".format(elapsed))
Digit Recognizer
10,728,166
train_X = train_2[train_2.columns[2:]] train_y = train_2[train_2.columns[1]] train_X, test_X, train_y, test_y = train_test_split(train_X, train_y, test_size=0.1, random_state=20, stratify=train_y )<predict_on_test>
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True) Image("model.png" )
Digit Recognizer
10,728,166
lgbm = LGBMClassifier(max_depth=20,num_leaves=30,learning_rate=0.02,n_estimators=250,feature_fraction=0.7) lgbm.fit(train_X,train_y) pre_y = lgbm.predict_proba(test_X)[:,1]<feature_engineering>
model.compile(optimizer=opt, loss=categorical_crossentropy,metrics=["acc"])
Digit Recognizer
10,728,166
test=pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-test.csv') test.info() test.loc[test['NumberOfTime30-59DaysPastDueNotWorse']>20, 'NumberOfTime30-59DaysPastDueNotWorse'] = 6 test.loc[test['NumberOfTimes90DaysLate']>20, 'NumberOfTimes90DaysLate'] = 2 test.loc[test['NumberOfTime60-89DaysPastDueNotWorse']>20, 'NumberOfTime60-89DaysPastDueNotWorse'] = 3 test.describe()<find_best_model_class>
y_pred = model.predict(X_test) y_pred = np.argmax(y_pred,axis=1) my_submission = pd.DataFrame({'ImageId': list(range(1, len(y_pred)+1)) , 'Label': y_pred}) my_submission.to_csv('submission.csv', index=False )
Digit Recognizer
10,720,766
test["NumberOfDependents"].fillna(train["NumberOfDependents"].mode() [0], inplace=True) mData2 = test.iloc[:,[6,2,3,4,5,7,8,9,10,11]] test_known = mData2[mData2.MonthlyIncome.notnull() ].values test_unknown = mData2[mData2.MonthlyIncome.isnull() ].values test_X2 = test_known[:,1:] test_y2 = test_known[:,0] rfr2 = RandomForestRegressor(random_state=0,n_estimators=200,max_depth=3,n_jobs=-1) rfr2.fit(test_X2,test_y2) predicted_y = rfr2.predict(test_unknown[:,1:] ).round(0) test.loc[test.MonthlyIncome.isnull() ,'MonthlyIncome'] = predicted_y<predict_on_test>
hist = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32), steps_per_epoch=1000, epochs=1000, verbose=2, validation_data=(X_train[:400,:], Y_train[:400,:]), callbacks=[annealer]) final_loss, final_acc = model.evaluate(X_train, Y_train, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
Digit Recognizer
10,720,766
test2 = test[test.columns[2:]] pre_y2 = lgbm.predict_proba(test2)[:,1]<save_to_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("MNIST_ver21.csv",index=False )
Digit Recognizer
10,123,364
result=pd.read_csv('/kaggle/input/GiveMeSomeCredit/sampleEntry.csv') result['Probability'] = pre_y2 result.to_csv('./submit.csv',index=False) reload = pd.read_csv('./submit.csv') reload<load_from_csv>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
10,123,364
train = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-training.csv',index_col=0) test = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-test.csv',index_col=0) sample = pd.read_csv('/kaggle/input/GiveMeSomeCredit/sampleEntry.csv') train.shape, test.shape, sample.shape<count_missing_values>
X = train.drop("label", axis = 1) y = train["label"]
Digit Recognizer
10,123,364
train.MonthlyIncome.fillna(train_default_dict['MonthlyIncome'][0], inplace=True) test.MonthlyIncome.fillna(train_default_dict['MonthlyIncome'][0], inplace=True) train.NumberOfDependents.fillna(test_default_dict['NumberOfDependents'][1], inplace=True) test.NumberOfDependents.fillna(test_default_dict['NumberOfDependents'][1], inplace=True) train.isnull().sum() , '*'*50, test.isnull().sum() <prepare_x_and_y>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25 )
Digit Recognizer
10,123,364
X_train = train.iloc[:,1:].values y_train = train.iloc[:,0].values X_test = test.iloc[:,1:].values X_train.shape, y_train.shape, X_test.shape<normalization>
X_train = X_train / 255 X_test = X_test / 255 test = test / 255
Digit Recognizer
10,123,364
train_scaler = preprocessing.StandardScaler().fit(X_train) print(train_scaler.mean_ , ' '+'-'*50+' ', train_scaler.scale_) print('='*50) test_scaler = preprocessing.StandardScaler().fit(X_test) print(test_scaler.mean_ , ' '+'-'*50+' ', test_scaler.scale_) <normalization>
y_train = to_categorical(y_train) y_test = to_categorical(y_test )
Digit Recognizer
10,123,364
X_train_scaled = train_scaler.transform(X_train) X_test_scaled = test_scaler.transform(X_test) X_train_scaled.mean(axis=0), X_train_scaled.std(axis=0), X_test_scaled.mean(axis=0), X_test_scaled.std(axis=0) <split>
X_train = X_train.values.reshape(-1, 28, 28, 1) X_test = X_test.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1 )
Digit Recognizer
10,123,364
X_learn, X_valid, y_learn, y_valid = train_test_split(X_train_scaled, y_train, random_state=0) X_learn.shape, X_valid.shape, y_learn.shape, y_valid.shape<import_modules>
model = Sequential() model.add(Conv2D(64, kernel_size =(3, 3), activation = "relu", padding = "same", input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size =(3, 3), activation = "relu", padding = "same")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, kernel_size =(3, 3), activation = "relu", padding = "same")) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size =(3, 3), activation = "relu", padding = "same")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(256, kernel_size =(3, 3), activation = "relu", padding = "same")) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size =(3, 3), activation = "relu", padding = "same")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(512, activation = "relu")) model.add(Dense(10, activation = "softmax")) model.compile(optimizer = tf.keras.optimizers.RMSprop(lr = 0.0001, centered = True, momentum = 0), loss = "categorical_crossentropy", metrics = ["accuracy"]) history = model.fit(X_train, y_train, validation_split = 0.2, epochs = 80, batch_size = 64, shuffle = True, verbose = 0) print("Test score :" + str(model.evaluate(X_test, y_test))) print("") print("Train score :" + str(model.evaluate(X_train, y_train))) print(model.summary() )
Digit Recognizer
10,123,364
from sklearn.metrics import roc_auc_score<choose_model_class>
y_sub = pd.Series(np.argmax(model.predict(test), axis = 1), name = "Label") submission = pd.concat([pd.Series(range(1, 28001), name = "ImageId"), y_sub], axis = 1) submission.to_csv("submission.csv", index = False) submission.head()
Digit Recognizer
9,484,758
estimators = [ ('lgb', lgb.LGBMClassifier(n_estimators=54)) , ('rfc', RandomForestClassifier(n_estimators=200)) , ('mlp', MLPClassifier(hidden_layer_sizes=2)) , ('knn', KNeighborsClassifier(n_neighbors=320, weights='distance', algorithm='auto')) ]<train_model>
%matplotlib inline
Digit Recognizer
9,484,758
best_l, best_pt, maxauc = 0, 'none', 0 for hid_lay_siz in [1,2,3,4,5]: for pass_throu in [True, False]: reg = StackingClassifier( estimators=estimators, final_estimator=MLPClassifier( hidden_layer_sizes=hid_lay_siz, random_state=0 ), passthrough=pass_throu, verbose=3 ) reg.fit(X_learn, y_learn) y_pred = reg.predict_proba(X_valid)[:,1] score = roc_auc_score(y_valid, y_pred) print(score) if score > maxauc: best_l, best_pt, maxauc = hid_lay_siz, pass_throu, score print() print(best_l, best_pt, maxauc) <train_model>
img_rows, img_cols = 28, 28
Digit Recognizer
9,484,758
reg = StackingClassifier( estimators=estimators, final_estimator=MLPClassifier( hidden_layer_sizes=best_l, random_state=0 ), passthrough=best_pt, verbose=3 ) reg.fit(X_train, y_train) y_pred = reg.predict_proba(X_test)[:,1] <load_from_csv>
df_train = pd.read_csv(".. /input/digit-recognizer/train.csv") df_test = pd.read_csv(".. /input/digit-recognizer/test.csv") df_train.head()
Digit Recognizer
9,484,758
sample = pd.read_csv('.. /input/GiveMeSomeCredit/sampleEntry.csv') sample<save_to_csv>
y_train = df_train["label"] X_train = df_train.drop(labels = ["label"],axis = 1) X_test = df_test X_train /= 255 X_test /= 255 X_train = X_train.values.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.values.reshape(X_test.shape[0], img_rows, img_cols, 1) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=1) y_train = to_categorical(y_train, num_classes=10) y_val = to_categorical(y_val, num_classes=10 )
Digit Recognizer
9,484,758
sample['Probability'] = y_pred sample.to_csv('./submit.csv',index=False) reload = pd.read_csv('./submit.csv') reload <set_options>
batch_size = 128 epochs = 200 lr = 1e-4 rho = 0.7 learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.000001 )
Digit Recognizer
9,484,758
np.set_printoptions(suppress=True) print(tf.__version__ )<load_from_csv>
input_shape =(img_rows, img_cols, 1) num_classes = 10 model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.compile(loss=categorical_crossentropy, optimizer=RMSprop(learning_rate=lr, rho=rho), metrics=['accuracy'] )
Digit Recognizer
9,484,758
PATH = '.. /input/google-quest-challenge/' BERT_PATH = '.. /input/bert-base-uncased-huggingface-transformer/' tokenizer = BertTokenizer.from_pretrained(BERT_PATH+'bert-base-uncased-vocab.txt') MAX_SEQUENCE_LENGTH = 384 df_train = pd.read_csv(PATH+'train.csv') df_test = pd.read_csv(PATH+'test.csv') df_sub = pd.read_csv(PATH+'sample_submission.csv') print('train shape =', df_train.shape) print('test shape =', df_test.shape) output_categories = list(df_train.columns[11:]) input_categories = list(df_train.columns[[1,2,5]]) print(' output categories: \t', output_categories) print(' input categories: \t', input_categories )<categorify>
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, y_val)) score = model.evaluate(X_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1] )
Digit Recognizer
9,484,758
def _convert_to_transformer_inputs(title, question, answer, tokenizer, max_sequence_length): def return_id(str1, str2, truncation_strategy, length): inputs = tokenizer.encode_plus(str1, str2, add_special_tokens=True, max_length=length, truncation_strategy=truncation_strategy) input_ids = inputs["input_ids"] input_masks = [1] * len(input_ids) input_segments = inputs["token_type_ids"] padding_length = length - len(input_ids) padding_id = tokenizer.pad_token_id input_ids = input_ids +([padding_id] * padding_length) input_masks = input_masks +([0] * padding_length) input_segments = input_segments +([0] * padding_length) return [input_ids, input_masks, input_segments] input_ids_q, input_masks_q, input_segments_q = return_id( title + ' ' + question, None, 'longest_first', max_sequence_length) input_ids_a, input_masks_a, input_segments_a = return_id( answer, None, 'longest_first', max_sequence_length) return [input_ids_q, input_masks_q, input_segments_q, input_ids_a, input_masks_a, input_segments_a] def compute_input_arrays(df, columns, tokenizer, max_sequence_length): input_ids_q, input_masks_q, input_segments_q = [], [], [] input_ids_a, input_masks_a, input_segments_a = [], [], [] for _, instance in tqdm(df[columns].iterrows()): t, q, a = instance.question_title, instance.question_body, instance.answer ids_q, masks_q, segments_q, ids_a, masks_a, segments_a = \ _convert_to_transformer_inputs(t, q, a, tokenizer, max_sequence_length) input_ids_q.append(ids_q) input_masks_q.append(masks_q) input_segments_q.append(segments_q) input_ids_a.append(ids_a) input_masks_a.append(masks_a) input_segments_a.append(segments_a) return [np.asarray(input_ids_q, dtype=np.int32), np.asarray(input_masks_q, dtype=np.int32), np.asarray(input_segments_q, dtype=np.int32), np.asarray(input_ids_a, dtype=np.int32), np.asarray(input_masks_a, dtype=np.int32), np.asarray(input_segments_a, dtype=np.int32)] def compute_output_arrays(df, columns): return np.asarray(df[columns] )<feature_engineering>
results = model.predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
9,484,758
def compute_spearmanr_ignore_nan(trues, preds): rhos = [] for tcol, pcol in zip(np.transpose(trues), np.transpose(preds)) : rhos.append(spearmanr(tcol, pcol ).correlation) return np.nanmean(rhos) def create_model() : q_id = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) a_id = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) q_mask = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) a_mask = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) q_atn = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) a_atn = tf.keras.layers.Input(( MAX_SEQUENCE_LENGTH,), dtype=tf.int32) config = BertConfig() config.output_hidden_states = False bert_model = TFBertModel.from_pretrained( BERT_PATH+'bert-base-uncased-tf_model.h5', config=config) q_embedding = bert_model(q_id, attention_mask=q_mask, token_type_ids=q_atn)[0] a_embedding = bert_model(a_id, attention_mask=a_mask, token_type_ids=a_atn)[0] q = tf.keras.layers.GlobalAveragePooling1D()(q_embedding) a = tf.keras.layers.GlobalAveragePooling1D()(a_embedding) x = tf.keras.layers.Concatenate()([q, a]) x = tf.keras.layers.Dropout(0.2 )(x) x = tf.keras.layers.Dense(30, activation='sigmoid' )(x) model = tf.keras.models.Model(inputs=[q_id, q_mask, q_atn, a_id, a_mask, a_atn,], outputs=x) return model<categorify>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
9,056,716
outputs = compute_output_arrays(df_train, output_categories) inputs = compute_input_arrays(df_train, input_categories, tokenizer, MAX_SEQUENCE_LENGTH) test_inputs = compute_input_arrays(df_test, input_categories, tokenizer, MAX_SEQUENCE_LENGTH) <split>
from keras.utils.np_utils import to_categorical
Digit Recognizer
9,056,716
gkf = GroupKFold(n_splits=5 ).split(X=df_train.question_body, groups=df_train.question_body) valid_preds = [] test_preds = [] for fold,(train_idx, valid_idx)in enumerate(gkf): if fold in [0, 2]: train_inputs = [inputs[i][train_idx] for i in range(len(inputs)) ] train_outputs = outputs[train_idx] valid_inputs = [inputs[i][valid_idx] for i in range(len(inputs)) ] valid_outputs = outputs[valid_idx] K.clear_session() model = create_model() optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5) model.compile(loss='binary_crossentropy', optimizer=optimizer) model.fit(train_inputs, train_outputs, epochs=3, batch_size=6) valid_preds.append(model.predict(valid_inputs)) test_preds.append(model.predict(test_inputs)) rho_val = compute_spearmanr_ignore_nan(valid_outputs, valid_preds[-1]) print('validation score = ', rho_val )<save_to_csv>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') X_train = train.iloc[:, 1:].values.astype('float32') y_train = train.iloc[:, 0].values.astype('int32') X_test = test.values.astype('float32') X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) print(X_train.shape, X_test.shape )
Digit Recognizer
9,056,716
df_sub.iloc[:, 1:] = np.average(test_preds, axis=0) df_sub.to_csv('submission.csv', index=False )<set_options>
X_train = X_train / 255.0 X_test = X_test / 255.0
Digit Recognizer
9,056,716
py.init_notebook_mode(connected=True) pd.set_option('max_columns', None) warnings.filterwarnings("ignore" )<load_from_csv>
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) X_train.shape, X_val.shape, y_train.shape, y_val.shape
Digit Recognizer
9,056,716
train = pd.read_csv('.. /input/tmdb-box-office-prediction/train.csv') test = pd.read_csv('.. /input/tmdb-box-office-prediction/test.csv') dict_columns = ['belongs_to_collection','genres','spoken_languages','production_companies', 'production_countries','Keywords','cast','crew'] def text_to_dict(df): for columns in dict_columns: df[columns] = df[columns].apply(lambda x: {} if pd.isna(x)else ast.literal_eval(x)) return df train = text_to_dict(train) test = text_to_dict(test) test['revenue'] = np.nan train = pd.merge(train, pd.read_csv('.. /input/tmdb-competition-additional-features/TrainAdditionalFeatures.csv'), how='left', on=['imdb_id']) test = pd.merge(test, pd.read_csv('.. /input/tmdb-competition-additional-features/TestAdditionalFeatures.csv'), how='left', on=['imdb_id']) train.head(2 )<feature_engineering>
datagen = ImageDataGenerator( rotation_range=8, width_shift_range=0.08, shear_range=0.3, height_shift_range=0.08, zoom_range=0.08 )
Digit Recognizer
9,056,716
train.loc[train['id'] == 16,'revenue'] = 192864 train.loc[train['id'] == 90,'budget'] = 30000000 train.loc[train['id'] == 118,'budget'] = 60000000 train.loc[train['id'] == 149,'budget'] = 18000000 train.loc[train['id'] == 313,'revenue'] = 12000000 train.loc[train['id'] == 451,'revenue'] = 12000000 train.loc[train['id'] == 464,'budget'] = 20000000 train.loc[train['id'] == 470,'budget'] = 13000000 train.loc[train['id'] == 513,'budget'] = 930000 train.loc[train['id'] == 797,'budget'] = 8000000 train.loc[train['id'] == 819,'budget'] = 90000000 train.loc[train['id'] == 850,'budget'] = 90000000 train.loc[train['id'] == 1007,'budget'] = 2 train.loc[train['id'] == 1112,'budget'] = 7500000 train.loc[train['id'] == 1131,'budget'] = 4300000 train.loc[train['id'] == 1359,'budget'] = 10000000 train.loc[train['id'] == 1542,'budget'] = 1 train.loc[train['id'] == 1570,'budget'] = 15800000 train.loc[train['id'] == 1571,'budget'] = 4000000 train.loc[train['id'] == 1714,'budget'] = 46000000 train.loc[train['id'] == 1721,'budget'] = 17500000 train.loc[train['id'] == 1865,'revenue'] = 25000000 train.loc[train['id'] == 1885,'budget'] = 12 train.loc[train['id'] == 2091,'budget'] = 10 train.loc[train['id'] == 2268,'budget'] = 17500000 train.loc[train['id'] == 2491,'budget'] = 6 train.loc[train['id'] == 2602,'budget'] = 31000000 train.loc[train['id'] == 2612,'budget'] = 15000000 train.loc[train['id'] == 2696,'budget'] = 10000000 train.loc[train['id'] == 2801,'budget'] = 10000000 train.loc[train['id'] == 335,'budget'] = 2 train.loc[train['id'] == 348,'budget'] = 12 train.loc[train['id'] == 470,'budget'] = 13000000 train.loc[train['id'] == 513,'budget'] = 1100000 train.loc[train['id'] == 640,'budget'] = 6 train.loc[train['id'] == 696,'budget'] = 1 train.loc[train['id'] == 797,'budget'] = 8000000 train.loc[train['id'] == 850,'budget'] = 1500000 train.loc[train['id'] == 1199,'budget'] = 5 train.loc[train['id'] == 1282,'budget'] = 9 train.loc[train['id'] == 1347,'budget'] = 1 train.loc[train['id'] == 1755,'budget'] = 2 train.loc[train['id'] == 1801,'budget'] = 5 train.loc[train['id'] == 1918,'budget'] = 592 train.loc[train['id'] == 2033,'budget'] = 4 train.loc[train['id'] == 2118,'budget'] = 344 train.loc[train['id'] == 2252,'budget'] = 130 train.loc[train['id'] == 2256,'budget'] = 1 train.loc[train['id'] == 2696,'budget'] = 10000000 test.loc[test['id'] == 6733,'budget'] = 5000000 test.loc[test['id'] == 3889,'budget'] = 15000000 test.loc[test['id'] == 6683,'budget'] = 50000000 test.loc[test['id'] == 5704,'budget'] = 4300000 test.loc[test['id'] == 6109,'budget'] = 281756 test.loc[test['id'] == 7242,'budget'] = 10000000 test.loc[test['id'] == 7021,'budget'] = 17540562 test.loc[test['id'] == 5591,'budget'] = 4000000 test.loc[test['id'] == 4282,'budget'] = 20000000 test.loc[test['id'] == 3033,'budget'] = 250 test.loc[test['id'] == 3051,'budget'] = 50 test.loc[test['id'] == 3084,'budget'] = 337 test.loc[test['id'] == 3224,'budget'] = 4 test.loc[test['id'] == 3594,'budget'] = 25 test.loc[test['id'] == 3619,'budget'] = 500 test.loc[test['id'] == 3831,'budget'] = 3 test.loc[test['id'] == 3935,'budget'] = 500 test.loc[test['id'] == 4049,'budget'] = 995946 test.loc[test['id'] == 4424,'budget'] = 3 test.loc[test['id'] == 4460,'budget'] = 8 test.loc[test['id'] == 4555,'budget'] = 1200000 test.loc[test['id'] == 4624,'budget'] = 30 test.loc[test['id'] == 4645,'budget'] = 500 test.loc[test['id'] == 4709,'budget'] = 450 test.loc[test['id'] == 4839,'budget'] = 7 test.loc[test['id'] == 3125,'budget'] = 25 test.loc[test['id'] == 3142,'budget'] = 1 test.loc[test['id'] == 3201,'budget'] = 450 test.loc[test['id'] == 3222,'budget'] = 6 test.loc[test['id'] == 3545,'budget'] = 38 test.loc[test['id'] == 3670,'budget'] = 18 test.loc[test['id'] == 3792,'budget'] = 19 test.loc[test['id'] == 3881,'budget'] = 7 test.loc[test['id'] == 3969,'budget'] = 400 test.loc[test['id'] == 4196,'budget'] = 6 test.loc[test['id'] == 4221,'budget'] = 11 test.loc[test['id'] == 4222,'budget'] = 500 test.loc[test['id'] == 4285,'budget'] = 11 test.loc[test['id'] == 4319,'budget'] = 1 test.loc[test['id'] == 4639,'budget'] = 10 test.loc[test['id'] == 4719,'budget'] = 45 test.loc[test['id'] == 4822,'budget'] = 22 test.loc[test['id'] == 4829,'budget'] = 20 test.loc[test['id'] == 4969,'budget'] = 20 test.loc[test['id'] == 5021,'budget'] = 40 test.loc[test['id'] == 5035,'budget'] = 1 test.loc[test['id'] == 5063,'budget'] = 14 test.loc[test['id'] == 5119,'budget'] = 2 test.loc[test['id'] == 5214,'budget'] = 30 test.loc[test['id'] == 5221,'budget'] = 50 test.loc[test['id'] == 4903,'budget'] = 15 test.loc[test['id'] == 4983,'budget'] = 3 test.loc[test['id'] == 5102,'budget'] = 28 test.loc[test['id'] == 5217,'budget'] = 75 test.loc[test['id'] == 5224,'budget'] = 3 test.loc[test['id'] == 5469,'budget'] = 20 test.loc[test['id'] == 5840,'budget'] = 1 test.loc[test['id'] == 5960,'budget'] = 30 test.loc[test['id'] == 6506,'budget'] = 11 test.loc[test['id'] == 6553,'budget'] = 280 test.loc[test['id'] == 6561,'budget'] = 7 test.loc[test['id'] == 6582,'budget'] = 218 test.loc[test['id'] == 6638,'budget'] = 5 test.loc[test['id'] == 6749,'budget'] = 8 test.loc[test['id'] == 6759,'budget'] = 50 test.loc[test['id'] == 6856,'budget'] = 10 test.loc[test['id'] == 6858,'budget'] = 100 test.loc[test['id'] == 6876,'budget'] = 250 test.loc[test['id'] == 6972,'budget'] = 1 test.loc[test['id'] == 7079,'budget'] = 8000000 test.loc[test['id'] == 7150,'budget'] = 118 test.loc[test['id'] == 6506,'budget'] = 118 test.loc[test['id'] == 7225,'budget'] = 6 test.loc[test['id'] == 7231,'budget'] = 85 test.loc[test['id'] == 5222,'budget'] = 5 test.loc[test['id'] == 5322,'budget'] = 90 test.loc[test['id'] == 5350,'budget'] = 70 test.loc[test['id'] == 5378,'budget'] = 10 test.loc[test['id'] == 5545,'budget'] = 80 test.loc[test['id'] == 5810,'budget'] = 8 test.loc[test['id'] == 5926,'budget'] = 300 test.loc[test['id'] == 5927,'budget'] = 4 test.loc[test['id'] == 5986,'budget'] = 1 test.loc[test['id'] == 6053,'budget'] = 20 test.loc[test['id'] == 6104,'budget'] = 1 test.loc[test['id'] == 6130,'budget'] = 30 test.loc[test['id'] == 6301,'budget'] = 150 test.loc[test['id'] == 6276,'budget'] = 100 test.loc[test['id'] == 6473,'budget'] = 100 test.loc[test['id'] == 6842,'budget'] = 30 <feature_engineering>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
9,056,716
train['popularity'] = np.log1p(train['popularity']) train['revenue'] = np.log1p(train['revenue']) train['totalVotes'] = np.log1p(train['totalVotes']) train['budget'] = np.log1p(train['budget']) train['runtime'] = np.log1p(train['runtime']) train['popularity2'] = np.log1p(train['popularity2']) test['popularity'] = np.log1p(test['popularity']) test['totalVotes'] = np.log1p(test['totalVotes']) test['budget'] = np.log1p(test['budget']) test['runtime'] = np.log1p(test['runtime']) test['popularity2'] = np.log1p(test['popularity2'] )<define_variables>
from keras.models import Sequential from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D, BatchNormalization
Digit Recognizer
9,056,716
for i,e in enumerate(train['belongs_to_collection'][:2]): print(i,e )<count_values>
model = Sequential() model.add(Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size=5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
9,056,716
train['belongs_to_collection'].apply(lambda x: 1 if x!= {} else 0 ).value_counts()<feature_engineering>
epochs = 30 batch_size = 64 history = model.fit_generator(generator=datagen.flow(X_train, y_train), steps_per_epoch=X_train.shape[0] // batch_size, epochs=epochs, validation_data=datagen.flow(X_val, y_val), validation_steps=X_val.shape[0] // batch_size )
Digit Recognizer
9,056,716
<define_variables><EOS>
predictions = model.predict_classes(X_test, verbose=0) submissions = pd.DataFrame({'ImageID': list(range(1, len(predictions)+ 1)) , 'Label': predictions}) submissions.to_csv('cnn_part3.csv', index=False, header=True )
Digit Recognizer
8,755,892
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
!pip install torchsummary
Digit Recognizer
8,755,892
print('Number of genres in films:') train['genres'].apply(lambda x: len(x)if x!={} else 0 ).value_counts()<define_variables>
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") print(df.shape, test_df.shape)
Digit Recognizer
8,755,892
list_of_genres = list(train['genres'].apply(lambda x: [i['name'] for i in x] if x!={} else [] ).values )<feature_engineering>
class MNIST_dataset(Dataset): def __init__(self, df): self.df = df self.aug = A.Compose([ A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=10, p=.75) ]) def __len__(self): return(len(self.df)) def __getitem__(self, idx): img_data = self.df.iloc[idx,1:].values.reshape(( 1,28,28)).astype(np.float32)/ 255. img_data = self.aug(image=img_data)['image'] label = self.df.iloc[idx, 0] return img_data, label
Digit Recognizer
8,755,892
train['num_of_genres'] = train['genres'].apply(lambda x: len(x)if x!={} else 0) train['all_genres'] = train['genres'].apply(lambda x: ' '.join(sorted([i['name'] for i in x ])) if x!= {} else '') test['num_of_genres'] = test['genres'].apply(lambda x: len(x)if x!={} else 0) test['all_genres'] = test['genres'].apply(lambda x: ' '.join(sorted([i['name'] for i in x ])) if x!= {} else '' )<feature_engineering>
train_df , valid_df = train_test_split(df, test_size=0.2, random_state=1 )
Digit Recognizer
8,755,892
for g in top_genres: train['genre_' + g] = train['all_genres'].apply(lambda x: 1 if g in x else 0) test['genre_' + g] = test['all_genres'].apply(lambda x: 1 if g in x else 0 )<define_variables>
train_dl = DataLoader(MNIST_dataset(train_df), batch_size=128) valid_dl = DataLoader(MNIST_dataset(valid_df), batch_size=128 )
Digit Recognizer
8,755,892
for i,e in enumerate(train['production_companies'][:2]): print(i,e )<count_values>
num_groups = 4 class Model(nn.Module): def __init__(self): super().__init__() self.features = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(32), nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(32), nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(64), nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(64), nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(128), nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(128), nn.Dropout2d(0.25), ) self.classifier = nn.Sequential( nn.Linear(4*4*128, 256), nn.ReLU(inplace=True), nn.BatchNorm1d(256), nn.Dropout2d(0.25), nn.Linear(256, 10), ) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return(x)
Digit Recognizer
8,755,892
print('Number of Production Companies for a movie:') train['production_companies'].apply(lambda x: len(x)if x!= {} else 0 ).value_counts()<filter>
summary(model, input_size=(1, 28, 28))
Digit Recognizer
8,755,892
train[train['production_companies'].apply(lambda x: len(x)if x!= {} else 0)> 10]<count_unique_values>
from torch import optim from tqdm.auto import tqdm
Digit Recognizer
8,755,892
list_of_companies = list(train['production_companies'].apply(lambda x : [i['name'] for i in x] if x!= {} else [] ).values) Counter(i for j in list_of_companies for i in j ).most_common(20 )<feature_engineering>
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters() , lr=1e-3 )
Digit Recognizer
8,755,892
train['num_prod_companies'] = train['production_companies'].apply(lambda x: len(x)if x!={} else 0) test['num_prod_companies'] = test['production_companies'].apply(lambda x: len(x)if x!={} else 0) train['all_prod_companies'] = train['production_companies'].apply(lambda x: ' '.join(sorted([i['name'] for i in x])) if x!={} else '') test['all_prod_companies'] = test['production_companies'].apply(lambda x: ' '.join(sorted([i['name'] for i in x])) if x!={} else '' )<feature_engineering>
def train_one_epoch(dl, epoch_num): total_loss = 0.0 accumulation_steps = 1024 // 128 optimizer.zero_grad() for i,(X, y)in enumerate(tqdm(dl)) : y1 = model(X.cuda()) loss = criterion(y1, y.cuda()) loss /= accumulation_steps loss.backward() if(( i+1)% accumulation_steps == 0): optimizer.step() optimizer.zero_grad() total_loss += loss.detach().cpu().item() print(f'epoch : {epoch_num}, Loss : {total_loss/len(dl.dataset):.6f}')
Digit Recognizer
8,755,892
top_prod_companies = [m[0] for m in Counter(i for j in list_of_companies for i in j ).most_common(10)] for pc in top_prod_companies: train['production_' + pc] = train['all_prod_companies'].apply(lambda x: 1 if pc in x else 0) test['production_'+ pc] = test['all_prod_companies'].apply(lambda x: 1 if pc in x else 0 )<define_variables>
def evaluate(dl): total_loss = 0.0 total_correct = 0.0 with torch.no_grad() : for X,y in dl : y1 = model(X.cuda()) loss = criterion(y1,y.cuda()) pred = torch.argmax(y1, dim=1 ).cpu() total_loss+=loss.item() total_correct += torch.sum(y==pred ).float().item() accuracy = total_correct/len(dl.dataset) print(f'Loss : {total_loss/len(dl.dataset):.6f}, Accuracy : {accuracy*100:.3f}%')
Digit Recognizer
8,755,892
for i, e in enumerate(train['production_countries'][:2]): print(i,e )<count_values>
epoch_num = 20 for epoch in range(epoch_num): train_one_epoch(train_dl, epoch) evaluate(valid_dl )
Digit Recognizer
8,755,892
print('Number of Production Countries in Movies:') train['production_countries'].apply(lambda x: len(x)if x!={} else 0 ).value_counts()<filter>
class MNIST_dataset(Dataset): def __init__(self, df): self.df = df def __len__(self): return(len(self.df)) def __getitem__(self, idx): img_data = self.df.iloc[idx].values.reshape(( 1,28,28)).astype(np.float32)/ 255. return img_data
Digit Recognizer
8,755,892
train[train['production_countries'].apply(lambda x: len(x)if x!= {} else 0)> 5]<count_values>
test_dl = DataLoader(MNIST_dataset(test_df), batch_size=128 )
Digit Recognizer
8,755,892
List_of_countries = list(train['production_countries'].apply(lambda x: [i['name'] for i in x] if x!= {} else [])) Counter(i for j in List_of_countries for i in j ).most_common(10 )<feature_engineering>
def evaluate_Submssions(dl): total_loss = 0.0 total_correct = 0.0 pred_list =[] with torch.no_grad() : for X in dl : y1 = model(X.cuda()) pred = torch.argmax(y1, dim=1 ).detach().cpu().numpy().tolist() pred_list.extend(pred) return pred_list
Digit Recognizer
8,755,892
train['num_prod_countries'] = train['production_countries'].apply(lambda x: len(x)if x!= {} else 0) test['num_prod_countries'] = test['production_countries'].apply(lambda x: len(x)if x!={} else 0) train['all_prod_countries'] = train['production_countries'].apply(lambda x: ' '.join(sorted(i['name'] for i in x)) if x!= {} else '') test['all_prod_countries'] = test['production_countries'].apply(lambda x: ' '.join(sorted(i['name'] for i in x)) if x!= {} else '') <feature_engineering>
pred_list = evaluate_Submssions(test_dl)
Digit Recognizer
8,755,892
top_prod_countries = [m[0] for m in Counter(i for j in List_of_countries for i in j ).most_common(6)] for t in top_prod_countries: train['prod_country_' + t] = train['all_prod_countries'].apply(lambda x: 1 if t in x else 0) test['prod_country_'+ t] = test['all_prod_countries'].apply(lambda x: 1 if t in x else 0 )<define_variables>
subs = pd.DataFrame({ 'ImageId': range(1, len(pred_list)+1), 'Label' : pred_list }) subs.head()
Digit Recognizer
8,755,892
for i, e in enumerate(train['spoken_languages'][:2]): print(i,e )<count_values>
subs.to_csv("submission.csv", index= False )
Digit Recognizer
4,907,015
print('Number of languages for a movie:') train['spoken_languages'].apply(lambda x: len(x)if x!={} else 0 ).value_counts()<count_unique_values>
%matplotlib inline
Digit Recognizer
4,907,015
list_of_langs = list(train['spoken_languages'].apply(lambda x: [i['name'] for i in x] if x!= {} else [])) top_langs = [m[0] for m in Counter(i for j in list_of_langs for i in j ).most_common(5)] Counter(i for j in list_of_langs for i in j ).most_common(5 )<feature_engineering>
from keras.models import Sequential from keras.layers import Dense , Dropout , Lambda, Flatten from keras.optimizers import Adam ,RMSprop from sklearn.model_selection import train_test_split from keras import backend as K from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
4,907,015
train['num_of_langs'] = train['spoken_languages'].apply(lambda x: len(x)if x!= {} else 0) test['num_of_langs'] = test['spoken_languages'].apply(lambda x: len(x)if x!= {} else 0) train['all_langs'] = train['spoken_languages'].apply(lambda x: ' '.join(sorted([i['name']for i in x])) if x!= {} else '') test['all_langs'] = test['spoken_languages'].apply(lambda x: ' '.join(sorted([i['name'] for i in x])) if x!= {} else '') for l in top_langs: train['lang_' + l] = train['all_langs'].apply(lambda x: 1 if l in x else 0) test['lang_'+ l] = test['all_langs'].apply(lambda x: 1 if l in x else 0 )<define_variables>
train = pd.read_csv(".. /input/train.csv") print(train.shape )
Digit Recognizer
4,907,015
for i, e in enumerate(train['Keywords'][:2]): print(i,e )<count_values>
test= pd.read_csv(".. /input/test.csv") print(test.shape )
Digit Recognizer
4,907,015
list_of_keys = list(train['Keywords'].apply(lambda x: [i['name'] for i in x] if x!= {} else [])) Counter(i for j in list_of_keys for i in j ).most_common(10 )<feature_engineering>
X_train =(train.iloc[:,1:].values ).astype('float32') y_train = train.iloc[:,0].values.astype('int32') X_test = test.values.astype('float32' )
Digit Recognizer
4,907,015
top_keywords = [m[0] for m in Counter(i for j in list_of_keys for i in j ).most_common(10)] train['num_of_keywords'] = train['Keywords'].apply(lambda x: len(x)if x!={} else 0) test['num_of_keywords'] = test['Keywords'].apply(lambda x: len(x)if x!={} else 0) train['all_keywords'] = train['Keywords'].apply(lambda x: ' '.join(sorted([i['name']for i in x])) if x!= {} else '') test['all_keywords'] = test['Keywords'].apply(lambda x: ' '.join(sorted([i['name'] for i in x])) if x!={} else '') for k in top_keywords: train['keyword_'+ k] = train['all_keywords'].apply(lambda x: 1 if k in x else 0) test['keyword_'+ k] = test['all_keywords'].apply(lambda x: 1 if k in x else 0) <define_variables>
mean_px = X_train.mean().astype(np.float32) std_px = X_train.std().astype(np.float32) def standardize(x): return(x-mean_px)/std_px
Digit Recognizer
4,907,015
for i, e in enumerate(train['cast'][:1]): print(i,e )<count_values>
y_train= to_categorical(y_train) num_classes = y_train.shape[1] num_classes
Digit Recognizer
4,907,015
print('Number of casts used per movie:') train['cast'].apply(lambda x: len(x)if x!={} else 0 ).value_counts().head(10 )<define_variables>
seed = 10 np.random.seed(seed )
Digit Recognizer
4,907,015
list_cast_name = list(train['cast'].apply(lambda x: [i['name'] for i in x]if x!= {} else [])) top_cast_name = [m[0] for m in Counter(i for j in list_cast_name for i in j ).most_common(20)] Counter(i for j in list_cast_name for i in j ).most_common(20 )<feature_engineering>
from keras.models import Sequential from keras.layers.core import Lambda , Dense, Flatten, Dropout from keras.callbacks import EarlyStopping from keras.layers import BatchNormalization, Convolution2D , MaxPooling2D
Digit Recognizer
4,907,015
train['num_of_cast']= train['cast'].apply(lambda x: len(x)if x!={} else 0) test['num_of_cast'] = test['cast'].apply(lambda x: len(x)if x!={} else 0) train['all_cast_name'] = train['cast'].apply(lambda x: ' '.join(sorted([i['name']for i in x])) if x!={} else '') test['all_cast_name'] = test['cast'].apply(lambda x: ' '.join(sorted([i['name']for i in x])) if x!= {} else '') for c in top_cast_name: train['cast_name_'+ c]= train['all_cast_name'].apply(lambda x: 1 if c in x else 0) test['cast_name_'+ c]= test['all_cast_name'].apply(lambda x: 1 if c in x else 0 )<count_values>
model= Sequential() model.add(Lambda(standardize,input_shape=(28,28,1))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) print("input shape ",model.input_shape) print("output shape ",model.output_shape )
Digit Recognizer
4,907,015
print('Number of crew members per movie:') train['crew'].apply(lambda x: len(x)if x!= {} else 0 ).value_counts().head(10 )<count_values>
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
4,907,015
list_crew_names = list(train['crew'].apply(lambda x: [i['name'] for i in x] if x!= {} else [] ).values) Counter(i for j in list_crew_names for i in j ).most_common(15 )<feature_engineering>
X = X_train y = y_train X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=10) batches = gen.flow(X_train, y_train, batch_size=64) val_batches=gen.flow(X_val, y_val, batch_size=64 )
Digit Recognizer
4,907,015
top_crew_names = [m[0] for m in Counter(i for j in list_crew_names for i in j ).most_common(20)] train['num_of_crew'] = train['crew'].apply(lambda x: len(x)if x!= {} else 0) test['num_of_crew']= test['crew'].apply(lambda x: len(x)if x!= {} else 0) for cn in top_crew_names: train['crew_name_'+ cn]= train['crew'].apply(lambda x: 1 if cn in str(x)else 0) test['crew_name_'+ cn] = test['crew'].apply(lambda x: 1 if cn in str(x)else 0 )<count_missing_values>
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=3, validation_data=val_batches, validation_steps=val_batches.n )
Digit Recognizer
4,907,015
train['homepage'].isna().sum()<feature_engineering>
def get_cnn_model() : model = Sequential([ Lambda(standardize, input_shape=(28,28,1)) , Convolution2D(32,(3,3), activation='relu'), Convolution2D(32,(3,3), activation='relu'), MaxPooling2D() , Convolution2D(64,(3,3), activation='relu'), Convolution2D(64,(3,3), activation='relu'), MaxPooling2D() , Flatten() , Dense(512, activation='relu'), Dense(10, activation='softmax') ]) model.compile(Adam() , loss='categorical_crossentropy', metrics=['accuracy']) return model
Digit Recognizer
4,907,015
train['has_homepage'] = 1 train.loc[pd.isnull(train['homepage']),"has_homepage"] = 0 test['has_homepage'] = 1 test.loc[pd.isnull(test['homepage']),"has_homepage"] = 0<count_missing_values>
model= get_cnn_model() model.optimizer.lr=0.01
Digit Recognizer
4,907,015
train['runtime'].isna().sum()<data_type_conversions>
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=1, validation_data=val_batches, validation_steps=val_batches.n )
Digit Recognizer
4,907,015
train['runtime'].fillna(train['runtime'].mean() ,inplace= True )<correct_missing_values>
def get_bn_model() : model = Sequential([ Lambda(standardize, input_shape=(28,28,1)) , Convolution2D(32,(3,3), activation='relu'), BatchNormalization(axis=1), Convolution2D(32,(3,3), activation='relu'), MaxPooling2D() , BatchNormalization(axis=1), Convolution2D(64,(3,3), activation='relu'), BatchNormalization(axis=1), Convolution2D(64,(3,3), activation='relu'), MaxPooling2D() , Flatten() , BatchNormalization() , Dense(512, activation='relu'), BatchNormalization() , Dense(10, activation='softmax') ]) model.compile(Adam() , loss='categorical_crossentropy', metrics=['accuracy']) return model
Digit Recognizer
4,907,015
test['runtime'].fillna(test['runtime'].mean() ,inplace= True )<feature_engineering>
model= get_bn_model() model.optimizer.lr=0.01 history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=1, validation_data=val_batches, validation_steps=val_batches.n )
Digit Recognizer
4,907,015
train.loc[train['release_date'].isnull() == True, 'release_date'] = '01/01/98' test.loc[test['release_date'].isnull() == True, 'release_date'] = '01/01/98' <categorify>
model.optimizer.lr=0.01 gen = image.ImageDataGenerator() batches = gen.flow(X, y, batch_size=64) history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=3 )
Digit Recognizer
4,907,015
<data_type_conversions><EOS>
predictions = model.predict_classes(X_test, verbose=0) submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("Digit_Recog.csv", index=False, header=True )
Digit Recognizer
10,296,992
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions>
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import Adam,RMSprop from keras.layers.normalization import BatchNormalization from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix
Digit Recognizer
10,296,992
def process_date(df): date_parts = ["year", "weekday", "month", 'weekofyear', 'day', 'quarter'] for part in date_parts: part_col = 'release' + "_" + part df[part_col] = getattr(df['release_date'].dt, part ).astype(int) return df train = process_date(train) test = process_date(test )<groupby>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
10,296,992
d = train['release_date'].dt.year.value_counts().sort_index() g = train.groupby('release_date')['revenue'].sum()<count_values>
y_train = train['label'].astype('int32') X_train =(train.drop(['label'], axis = 1)).values.astype('float32') X_test = test.values.astype('float32') batch_size, img_rows, img_cols = 64, 28, 28 X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) X_train.shape, X_test.shape
Digit Recognizer
10,296,992
train['status'].value_counts()<merge>
X_train /= 255 X_test /= 255
Digit Recognizer
10,296,992
rating_na = train.groupby(["release_year","original_language"])['rating'].mean().reset_index() train[train.rating.isna() ]['rating'] = train.merge(rating_na, how = 'left' ,on = ["release_year","original_language"]) vote_count_na = train.groupby(["release_year","original_language"])['totalVotes'].mean().reset_index() train[train.totalVotes.isna() ]['totalVotes'] = train.merge(vote_count_na, how = 'left' ,on = ["release_year","original_language"]) train['weightedRating'] =(train['rating']*train['totalVotes'] + 6.367 * 1000)/(train['totalVotes'] + 1000) train['inflationBudget'] = np.log1p(np.expm1(train['budget'])+ np.expm1(train['budget'])*1.8/100*(2018-train['release_year'])) train['_popularity_mean_year'] = train['popularity'] / train.groupby("release_year")["popularity"].transform('mean') train['_budget_runtime_ratio'] = train['budget']/train['runtime'] train['_budget_popularity_ratio'] = train['budget']/train['popularity'] train['_budget_year_ratio'] = train['budget']/(train['release_year']*train['release_year']) train['_releaseYear_popularity_ratio'] = train['release_year']/train['popularity'] train['_popularity_totalVotes_ratio'] = train['totalVotes']/train['popularity'] train['_rating_popularity_ratio'] = train['rating']/train['popularity'] train['_rating_totalVotes_ratio'] = train['totalVotes']/train['rating'] train['_totalVotes_releaseYear_ratio'] = train['totalVotes']/train['release_year'] train['_budget_rating_ratio'] = train['budget']/train['rating'] train['_runtime_rating_ratio'] = train['runtime']/train['rating'] train['_budget_totalVotes_ratio'] = train['budget']/train['totalVotes'] train['meanruntimeByYear'] = train.groupby("release_year")["runtime"].aggregate('mean') train['meanPopularityByYear'] = train.groupby("release_year")["popularity"].aggregate('mean') train['meanBudgetByYear'] = train.groupby("release_year")["budget"].aggregate('mean') train['meantotalVotesByYear'] = train.groupby("release_year")["totalVotes"].aggregate('mean') train['meanTotalVotesByRating'] = train.groupby("rating")["totalVotes"].aggregate('mean') train['isTaglineNA'] = 0 train.loc[train['tagline'] == 0 ,"isTaglineNA"] = 1 train['isTitleDifferent'] = 1 train.loc[ train['original_title'] == train['title'] ,"isTitleDifferent"] = 0 <merge>
y_train = np_utils.to_categorical(y_train, 10 )
Digit Recognizer
10,296,992
rating_na = test.groupby(["release_year","original_language"])['rating'].mean().reset_index() test[test.rating.isna() ]['rating'] = test.merge(rating_na, how = 'left' ,on = ["release_year","original_language"]) vote_count_na = test.groupby(["release_year","original_language"])['totalVotes'].mean().reset_index() test[test.totalVotes.isna() ]['totalVotes'] = test.merge(vote_count_na, how = 'left' ,on = ["release_year","original_language"]) test['weightedRating'] =(test['rating']*test['totalVotes'] + 6.367 * 1000)/(test['totalVotes'] + 1000) test['inflationBudget'] = np.log1p(np.expm1(test['budget'])+ np.expm1(test['budget'])*1.8/100*(2018-test['release_year'])) test['_popularity_mean_year'] = test['popularity'] / test.groupby("release_year")["popularity"].transform('mean') test['_budget_runtime_ratio'] = test['budget']/test['runtime'] test['_budget_popularity_ratio'] = test['budget']/test['popularity'] test['_budget_year_ratio'] = test['budget']/(test['release_year']*test['release_year']) test['_releaseYear_popularity_ratio'] = test['release_year']/train['popularity'] test['_popularity_totalVotes_ratio'] = test['totalVotes']/test['popularity'] test['_rating_popularity_ratio'] = test['rating']/test['popularity'] test['_rating_totalVotes_ratio'] = test['totalVotes']/test['rating'] test['_totalVotes_releaseYear_ratio'] = test['totalVotes']/test['release_year'] test['_budget_rating_ratio'] = test['budget']/test['rating'] test['_runtime_rating_ratio'] = test['runtime']/test['rating'] test['_budget_totalVotes_ratio'] = test['budget']/test['totalVotes'] test['meanruntimeByYear'] = test.groupby("release_year")["runtime"].aggregate('mean') test['meanPopularityByYear'] = test.groupby("release_year")["popularity"].aggregate('mean') test['meanBudgetByYear'] = test.groupby("release_year")["budget"].aggregate('mean') test['meantotalVotesByYear'] = test.groupby("release_year")["totalVotes"].aggregate('mean') test['meanTotalVotesByRating'] = test.groupby("rating")["totalVotes"].aggregate('mean') test['isTaglineNA'] = 0 test.loc[test['tagline'] == 0 ,"isTaglineNA"] = 1 test['isTitleDifferent'] = 1 test.loc[ test['original_title'] == test['title'] ,"isTitleDifferent"] = 0 <drop_column>
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.1, random_state = 12345 )
Digit Recognizer
10,296,992
train = train.drop(['id','belongs_to_collection','genres','homepage','imdb_id','overview','runtime' ,'poster_path','production_companies','production_countries','release_date','spoken_languages' ,'status','title','Keywords','cast','crew','original_language','original_title','tagline','all_genres', 'all_prod_companies','all_prod_countries','all_langs','all_keywords','all_cast_name'],axis=1) test = test.drop(['id','belongs_to_collection','genres','homepage','imdb_id','overview','runtime' ,'poster_path','production_companies','production_countries','release_date','spoken_languages' ,'status','title','Keywords','cast','crew','original_language','original_title','tagline','all_genres', 'all_prod_companies','all_prod_countries','all_langs','all_keywords','all_cast_name'],axis=1 )<train_model>
input_shape =(img_rows, img_cols, 1) callback_es = EarlyStopping(monitor = 'val_accuracy', patience = 3) def first_cnn_model_keras(optimizer): model = Sequential() model.add(Convolution2D(64, 5, 5, padding = 'same', kernel_initializer = 'he_uniform', input_shape = input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2), padding = 'same')) model.add(Convolution2D(128, 5, 5, padding = 'same', kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2), padding = 'same')) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(optimizer, loss='categorical_crossentropy', metrics = ['accuracy']) return model
Digit Recognizer
10,296,992
train.fillna(value=0.0, inplace = True) test.fillna(value=0.0, inplace = True )<data_type_conversions>
model1 = first_cnn_model_keras(Adam(learning_rate = 0.001, amsgrad = True)) h1 = model1.fit(X_train, y_train, batch_size = batch_size, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_first_adam, final_acc_first_adam = model1.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_first_adam, final_acc_first_adam))
Digit Recognizer
10,296,992
def clean_dataset(df): assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame" df.dropna(inplace=True) indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf] ).any(1) return df[indices_to_keep].astype(np.float64) train = clean_dataset(train )<split>
model2 = first_cnn_model_keras(RMSprop(lr=0.001)) h2 = model2.fit(X_train, y_train, batch_size = batch_size, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid),callbacks = [callback_es]) final_loss_first_rmsprop, final_acc_first_rmsprop = model2.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_first_rmsprop, final_acc_first_rmsprop))
Digit Recognizer
10,296,992
X = train.drop(['revenue'],axis=1) y = train.revenue X_train, X_valid, y_train, y_valid = train_test_split(X,y,test_size=0.2,random_state=25 )<compute_train_metric>
datagen = ImageDataGenerator(rotation_range = 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1) datagen.fit(X_train) train_batches = datagen.flow(X_train, y_train, batch_size = batch_size )
Digit Recognizer
10,296,992
lr = LinearRegression() lr.fit(X_train, y_train) pred = lr.predict(X_valid) accuracy = r2_score(y_valid,pred) print('Linear Regression R2 Score: ', accuracy) mse = mean_squared_error(y_valid,pred) print('Mean Squared Error: ', mse) print('Root Mean Square Error',np.sqrt(mse)) cv_pred = cross_val_predict(lr,X,y,n_jobs=-1, cv=10) cv_accuracy = r2_score(y,cv_pred) print('Cross-Predicted(KFold)R2 Score: ', cv_accuracy) <compute_train_metric>
model3 = first_cnn_model_keras(Adam(learning_rate = 0.001, amsgrad = True)) h3 = model3.fit_generator(train_batches, epochs = 40, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_first_adam_aug, final_acc_first_adam_aug = model3.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_first_adam_aug, final_acc_first_adam_aug))
Digit Recognizer
10,296,992
ls = Lasso() ls.fit(X_train, y_train) pred = ls.predict(X_valid) accuracy = r2_score(y_valid,pred) print('Lasso Regression R2 Score: ', accuracy) mse = mean_squared_error(y_valid,pred) print('Mean Squared Error: ', mse) print('Root Mean Squared Error', np.sqrt(mse)) cv_pred = cross_val_predict(ls,X,y,n_jobs=-1, cv=10) cv_accuracy = r2_score(y,cv_pred) print('Cross-Predicted(KFold)Lasso Regression Accuracy: ', cv_accuracy )<compute_train_metric>
model4 = first_cnn_model_keras(RMSprop(lr=0.001)) h4 = model4.fit_generator(train_batches, epochs = 40, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_first_rmsprop_aug, final_acc_first_rmsprop_aug = model4.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_first_rmsprop_aug, final_acc_first_rmsprop_aug))
Digit Recognizer
10,296,992
dt = DecisionTreeRegressor() dt.fit(X_train, y_train) pred = dt.predict(X_valid) accuracy = r2_score(y_valid,pred) print('Decision Tree R2 Score: ', accuracy) mse = mean_squared_error(y_valid,pred) print('Mean Squared Error: ', mse) print('Root Mean Square Error',np.sqrt(mse)) cv_pred = cross_val_predict(dt,X,y,n_jobs=-1, cv=10) cv_accuracy = r2_score(y,cv_pred) print('Cross-Predicted(KFold)Decision Tree Accuracy: ', cv_accuracy )<compute_train_metric>
def second_cnn_model_keras(optimizer): model = Sequential() model.add(Convolution2D(64, kernel_size =(5, 5), input_shape = input_shape, kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(Convolution2D(64, kernel_size =(5, 5), kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2), padding = 'same')) model.add(Dropout(0.25)) model.add(Convolution2D(128, kernel_size =(3, 3), kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Convolution2D(128, kernel_size =(3, 3), kernel_initializer = 'he_uniform')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2), padding = 'same')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256)) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics=['accuracy']) return model
Digit Recognizer
10,296,992
rf = RandomForestRegressor() rf.fit(X_train, y_train) pred = rf.predict(X_valid) accuracy = r2_score(y_valid,pred) print('Random Forest Regressor R2: ', accuracy) mse = mean_squared_error(y_valid,pred) print('Mean Squared Error: ', mse) print('Root Mean Square Error',np.sqrt(mse)) cv_pred = cross_val_predict(rf,X,y,n_jobs=-1, cv=10) cv_accuracy = r2_score(y,cv_pred) print('Cross-Predicted(KFold)Random Forest R2: ', cv_accuracy )<define_search_space>
model5 = second_cnn_model_keras(Adam(learning_rate = 0.001, amsgrad = True)) h5 = model5.fit(X_train, y_train, batch_size = batch_size, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_second_adam, final_acc_second_adam = model5.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_second_adam, final_acc_second_adam))
Digit Recognizer
10,296,992
rfr = RandomForestRegressor() n_estimators = [int(x)for x in np.linspace(start = 50 , stop = 300, num = 5)] max_features = [10,20,40,60,80,100,120] max_depth = [int(x)for x in np.linspace(5, 10, num = 2)] max_depth.append(None) bootstrap = [True, False] r_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'bootstrap': bootstrap} cv_random = RandomizedSearchCV(estimator=rfr, param_distributions=r_grid, n_iter = 20, scoring='neg_mean_squared_error', cv = 3, verbose=2, random_state=42, n_jobs=-1, return_train_score=True) cv_random.fit(X_train, y_train); print(cv_random.best_params_) pred = cv_random.predict(X_valid) mse = mean_squared_error(y_valid,pred) print('Mean Squared Error: ', mse) print('Root Mean Square Error',np.sqrt(mse)) cv_accuracy = r2_score(y_valid,pred) print('Random Forest Predict R2: ', cv_accuracy )<import_modules>
model6 = second_cnn_model_keras(RMSprop(lr=0.001)) h6 = model6.fit(X_train, y_train, batch_size = batch_size, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_second_rmsprop, final_acc_second_rmsprop = model6.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_second_rmsprop, final_acc_second_rmsprop))
Digit Recognizer
10,296,992
import h2o from h2o.estimators.gbm import H2OGradientBoostingEstimator from h2o.automl import H2OAutoML<set_options>
model7 = second_cnn_model_keras(Adam(learning_rate = 0.001, amsgrad = True)) h7 = model7.fit_generator(train_batches, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid),callbacks = [callback_es]) final_loss_second_adam_aug, final_acc_second_adam_aug = model7.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_second_adam_aug, final_acc_second_adam_aug))
Digit Recognizer
10,296,992
h2o.init()<prepare_output>
model8 = second_cnn_model_keras(RMSprop(lr=0.001)) h8 = model8.fit_generator(train_batches, epochs = 20, verbose = 1, validation_data =(X_valid, y_valid), callbacks = [callback_es]) final_loss_second_rmsprop_aug, final_acc_second_rmsprop_aug = model8.evaluate(X_valid, y_valid, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss_second_rmsprop_aug, final_acc_second_rmsprop_aug))
Digit Recognizer
10,296,992
h2o_df=h2o.H2OFrame(train) h2o_df.head()<split>
models = ['first_cnn_adam', 'first_cnn_rmsprop', 'first_cnn_adam_aug', 'first_cnn_rmsprop_aug', 'second_cnn_adam', 'second_cnn_rmsprop', 'second_cnn_adam_aug', 'second_cnn_rmsprop_aug'] dict_values = {'loss': [final_loss_first_adam, final_loss_first_rmsprop, final_loss_first_adam_aug, final_loss_first_rmsprop_aug, final_loss_second_adam, final_loss_second_rmsprop, final_loss_second_adam_aug, final_loss_second_rmsprop_aug], 'accuracy': [final_acc_first_adam, final_acc_first_rmsprop, final_acc_first_adam_aug, final_acc_first_rmsprop_aug, final_acc_second_adam, final_acc_second_rmsprop, final_acc_second_adam_aug, final_acc_second_rmsprop_aug]} df = pd.DataFrame(dict_values, index = models, columns = ['loss', 'accuracy']) df
Digit Recognizer
10,296,992
splits = h2o_df.split_frame(ratios=[0.8],seed=1) h2o_train = splits[0] h2o_valid = splits[1]<prepare_x_and_y>
model = [0]*10 for i in range(10): model[i] = Sequential() model[i].add(Convolution2D(64, kernel_size =(3, 3), input_shape = input_shape, kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(Convolution2D(64, kernel_size =(3, 3), kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(Convolution2D(64, kernel_size =(5, 5), kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2))) model[i].add(Dropout(0.45)) model[i].add(Convolution2D(128, kernel_size =(3, 3), kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(Convolution2D(128, kernel_size =(3, 3), kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(Convolution2D(128, kernel_size =(5, 5), kernel_initializer = 'he_uniform')) model[i].add(Activation('relu')) model[i].add(BatchNormalization()) model[i].add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2))) model[i].add(Dropout(0.45)) model[i].add(Flatten()) model[i].add(Dense(512)) model[i].add(Activation('relu')) model[i].add(Dropout(0.45)) model[i].add(Dense(1024)) model[i].add(Activation('relu')) model[i].add(Dropout(0.45)) model[i].add(Dense(10)) model[i].add(Activation('softmax')) model[i].compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = 0.0005, amsgrad = True), metrics=['accuracy'] )
Digit Recognizer