kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
14,584,011 | for num in range(0,6):
questions_df["tags"+str(num)] = questions_df["tags"].apply(lambda row: gettags(row,num))
le = LabelEncoder()
le.fit(np.unique(questions_df['tags'+str(num)].values))
questions_df['tags'+str(num)]=questions_df[['tags'+str(num)]].apply(le.transform)
questions_df_dict = {
'tags0': 'int8',
'tags1': 'int8',
'tags2': 'int8',
'tags3': 'int8',
'tags4': 'int8',
'tags5': 'int8',
}
questions_df = questions_df.astype(questions_df_dict)
questions_df.drop(columns=['tags'], inplace=True)
questions_df['part_bundle_id']=questions_df['part']*100000+questions_df['bundle_id']
questions_df.part_bundle_id=questions_df.part_bundle_id.astype('int32')
questions_df.rename(columns={'question_id':'content_id'}, inplace=True)
questions_df = pd.merge(questions_df, content_explation_agg, on='content_id', how='left',right_index=True)
del content_explation_agg
questions_df['content_correctness'] = questions_df['content_id'].map(content_agg['sum'] / content_agg['count'])
questions_df.content_correctness=questions_df.content_correctness.astype('float16')
questions_df['content_correctness_std'] = questions_df['content_id'].map(content_agg['var'])
questions_df.content_correctness_std=questions_df.content_correctness_std.astype('float16')
questions_df['content_uncorrect_count'] = questions_df['content_id'].map(content_agg['count']-content_agg['sum'] ).astype('int32')
questions_df['content_correct_count'] = questions_df['content_id'].map(content_agg['sum'] ).astype('int32')
questions_df['content_elapsed_time_mean'] = questions_df['content_id'].map(content_elapsed_time_agg['mean'])
questions_df.content_elapsed_time_mean=questions_df.content_elapsed_time_mean.astype('float16')
questions_df['content_had_explanation_mean'] = questions_df['content_id'].map(content_had_explanation_agg['mean'])
questions_df.content_had_explanation_mean=questions_df.content_had_explanation_mean.astype('float16')
del content_elapsed_time_agg
del content_had_explanation_agg
gc.collect()
<categorify> | Digit Recognizer |
|
14,584,011 | part_agg = questions_df.groupby('part')['content_correctness'].agg(['mean', 'var'])
questions_df['part_correctness_mean'] = questions_df['part'].map(part_agg['mean'])
questions_df['part_correctness_std'] = questions_df['part'].map(part_agg['var'])
questions_df.part_correctness_mean=questions_df.part_correctness_mean.astype('float16')
questions_df.part_correctness_std=questions_df.part_correctness_std.astype('float16')
part_agg = questions_df.groupby('part')['content_uncorrect_count'].agg(['sum'])
questions_df['part_uncor_count'] = questions_df['part'].map(part_agg['sum'] ).astype('int32')
part_agg = questions_df.groupby('part')['content_correct_count'].agg(['sum'])
questions_df['part_cor_count'] = questions_df['part'].map(part_agg['sum'] ).astype('int32')
bundle_agg = questions_df.groupby('bundle_id')['content_correctness'].agg(['mean'])
questions_df['bundle_correctness_mean'] = questions_df['bundle_id'].map(bundle_agg['mean'])
questions_df.bundle_correctness_mean=questions_df.bundle_correctness_mean.astype('float16')
<drop_column> | count_network = 5
size_for_network = X_train.shape[0] // count_network
X_train_list = []
X_valid_list = []
Y_train_list = []
Y_valid_list = []
for i in range(count_network):
X_train_list.append(X_train[i * size_for_network :(i + 1)* size_for_network])
Y_train_list.append(Y_train[i * size_for_network :(i + 1)* size_for_network])
X_valid_list.append(
np.concatenate(( X_train[0 * size_for_network : i * size_for_network],
X_train[(i + 1)* size_for_network : count_network * size_for_network]))
)
Y_valid_list.append(
np.concatenate(( Y_train[0 * size_for_network : i * size_for_network],
Y_train[(i + 1)* size_for_network : count_network * size_for_network]))
) | Digit Recognizer |
14,584,011 | del content_agg
del bundle_agg
del part_agg
gc.collect()
<define_variables> | def build_model(lr):
model = models.Sequential()
model.add(Conv2D(96, 3, activation='relu', padding='same', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(160, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(256, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(128, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(64, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(96, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=optimizers.Adam(lr=lr),
loss='categorical_crossentropy', metrics=['categorical_accuracy'])
return model | Digit Recognizer |
14,584,011 | features_dict = {
'timestamp':'float16',
'user_interaction_count':'int16',
'user_interaction_timestamp_mean':'float32',
'lagtime':'float32',
'lagtime2':'float32',
'lagtime3':'float32',
'content_id':'int16',
'task_container_id':'int16',
'user_lecture_sum':'int16',
'user_lecture_lv':'float16',
'prior_question_elapsed_time':'float32',
'delta_prior_question_elapsed_time':'int32',
'user_correctness':'float16',
'user_uncorrect_count':'int16',
'user_correct_count':'int16',
'content_correctness_std':'float16',
'content_correct_count':'int32',
'content_uncorrect_count':'int32',
'content_elapsed_time_mean':'float16',
'content_had_explanation_mean':'float16',
'content_explation_false_mean':'float16',
'content_explation_true_mean':'float16',
'task_container_correctness':'float16',
'task_container_std':'float16',
'task_container_cor_count':'int32',
'task_container_uncor_count':'int32',
'attempt_no':'int8',
'part':'int8',
'part_correctness_mean':'float16',
'part_correctness_std':'float16',
'part_uncor_count':'int32',
'part_cor_count':'int32',
'tags0': 'int8',
'tags1': 'int8',
'tags2': 'int8',
'tags3': 'int8',
'tags4': 'int8',
'tags5': 'int8',
'part_bundle_id':'int32',
'content_sub_bundle':'int8',
'prior_question_had_explanation':'int8',
'explanation_mean':'float16',
'explanation_false_count':'int16',
'explanation_true_count':'int16',
}
categorical_columns= [
'content_id',
'task_container_id',
'part',
'tags0',
'tags1',
'tags2',
'tags3',
'tags4',
'tags5',
'part_bundle_id',
'content_sub_bundle',
'prior_question_had_explanation',
]
features=list(features_dict.keys())
<init_hyperparams> | list_models = [build_model(lr=1e-2)for _ in range(count_network)]
list_history = []
for i in range(count_network):
checkpoint_path = f'bestmodel{i + 1}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_path, monitor='val_categorical_accuracy',
verbose=0, save_best_only=True, mode='max')
scheduler = LearningRateScheduler(lambda epoch, lr: lr * 0.99, verbose=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=50, mode='min', verbose=0)
tqdm_callback = tfa.callbacks.TQDMProgressBar(leave_epoch_progress=False,
leave_overall_progress=True,
show_epoch_progress=False,
show_overall_progress=True)
callbacks_list = [
checkpoint,
scheduler,
tqdm_callback,
early_stop
]
print(f'Training {i + 1} network' + '
' + '-' * 80)
history = list_models[i].fit_generator(datagener.flow(X_train_list[i], Y_train_list[i], batch_size=50),
epochs=350, steps_per_epoch=X_train_list[i].shape[0] // 50,
callbacks=callbacks_list,
verbose=0, validation_data=(X_valid_list[i], Y_valid_list[i]))
list_history.append(history ) | Digit Recognizer |
14,584,011 | flag_lgbm=True
clfs = list()
params = {
'num_leaves': 200,
'max_bin':450,
'feature_fraction': 0.52,
'bagging_fraction': 0.52,
'objective': 'binary',
'learning_rate': 0.05,
"boosting_type": "gbdt",
"metric": 'auc',
}
trains=list()
valids=list()
num=1
for i in range(0,num):
train_df_clf=train_df[1200*10000:2*1200*10000]
print('sample end')
del train_df
users=train_df_clf['user_id'].drop_duplicates()
users=users.sample(frac=0.08)
users_df=pd.DataFrame()
users_df['user_id']=users.values
valid_df_newuser = pd.merge(train_df_clf, users_df, on=['user_id'], how='inner',right_index=True)
del users_df
del users
gc.collect()
train_df_clf.drop(valid_df_newuser.index, inplace=True)
print('pd.merge(train_df_clf, questions_df)')
train_df_clf = pd.merge(train_df_clf, questions_df, on='content_id', how='left',right_index=True)
valid_df_newuser = pd.merge(valid_df_newuser, questions_df, on='content_id', how='left',right_index=True)
print('valid_df')
valid_df=train_df_clf.sample(frac=0.1)
train_df_clf.drop(valid_df.index, inplace=True)
valid_df = valid_df.append(valid_df_newuser)
del valid_df_newuser
gc.collect()
trains.append(train_df_clf)
valids.append(valid_df)
print('train_df_clf length:',len(train_df_clf))
print('valid_df length:',len(valid_df))<drop_column> | for i in range(count_network):
list_models[i].load_weights(f'bestmodel{i + 1}.hdf5')
print(f'Model №{i + 1}')
_, acc = list_models[i].evaluate(X_train_list[i], Y_train_list[i])
_, acc2 = list_models[i].evaluate(X_valid_list[i], Y_valid_list[i])
print() | Digit Recognizer |
14,584,011 | del train_df_clf
del valid_df
gc.collect()<prepare_x_and_y> | def get_predict(models, data, method_voting='soft', count_classes=10):
if method_voting == 'soft':
for_test = np.zeros(( data.shape[0], count_classes))
for i in range(len(models)) :
for_test += models[i].predict(data)
return np.argmax(for_test, axis=1)
elif method_voting == 'hard':
for_test = np.zeros(( data.shape[0], len(models)))
for i in range(len(models)) :
for_test[:, i] += models[i].predict_classes(data)
return np.int32(mode(for_test, axis=1)[0].T.flatten() ) | Digit Recognizer |
14,584,011 | for i in range(0,num):
X_train_np = trains[i][features].values.astype(np.float32)
X_valid_np = valids[i][features].values.astype(np.float32)
tr_data = lgb.Dataset(X_train_np, label=trains[i][target], feature_name=list(features))
va_data = lgb.Dataset(X_valid_np, label=valids[i][target], feature_name=list(features))
del trains
del valids
del X_train_np
del X_valid_np
gc.collect()
model = lgb.train(
params,
tr_data,
num_boost_round=5000,
valid_sets=[tr_data, va_data],
early_stopping_rounds=50,
feature_name=features,
categorical_feature=categorical_columns,
verbose_eval=50
)
clfs.append(model)
fig,ax = plt.subplots(figsize=(15,15))
lgb.plot_importance(model, ax=ax,importance_type='gain',max_num_features=50)
plt.show()
del tr_data
del va_data
gc.collect()
<import_modules> | submit = pd.DataFrame(get_predict(list_models, X_test), columns=['Label'], index=pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')['ImageId'])
submit2 = pd.DataFrame(get_predict(list_models, X_test, method_voting='hard'), columns=['Label'],
index=pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')['ImageId'])
submit.index.name = 'ImageId'
submit.to_csv('submittion.csv')
submit2.index.name = 'ImageId'
submit2.to_csv('submittion2.csv' ) | Digit Recognizer |
14,584,011 | <choose_model_class><EOS> | comparison = submit.join(submit2, lsuffix='_1', rsuffix='_2')
comparison.loc[~(comparison['Label_1'] == comparison['Label_2'])] | Digit Recognizer |
13,709,795 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions> | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler
from keras.callbacks import EarlyStopping | Digit Recognizer |
13,709,795 | del user_agg
gc.collect()
task_container_sum_dict = task_container_agg['sum'].astype('int32' ).to_dict(defaultdict(int))
task_container_count_dict = task_container_agg['count'].astype('int32' ).to_dict(defaultdict(int))
task_container_std_dict = task_container_agg['var'].astype('float16' ).to_dict(defaultdict(int))
explanation_sum_dict = explanation_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
explanation_count_dict = explanation_agg['count'].astype('int16' ).to_dict(defaultdict(int))
del task_container_agg
del explanation_agg
gc.collect()<data_type_conversions> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
df = train.copy()
df_test = test.copy() | Digit Recognizer |
13,709,795 | user_lecture_sum_dict = user_lecture_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
user_lecture_count_dict = user_lecture_agg['count'].astype('int16' ).to_dict(defaultdict(int))
del user_lecture_agg
gc.collect()<categorify> | df.isnull().any().sum() | Digit Recognizer |
13,709,795 | max_timestamp_u_dict=max_timestamp_u.set_index('user_id' ).to_dict()
max_timestamp_u_dict2=max_timestamp_u2.set_index('user_id' ).to_dict()
max_timestamp_u_dict3=max_timestamp_u3.set_index('user_id' ).to_dict()
user_prior_question_elapsed_time_dict=user_prior_question_elapsed_time.set_index('user_id' ).to_dict()
del max_timestamp_u
del max_timestamp_u2
del max_timestamp_u3
del user_prior_question_elapsed_time
gc.collect()<categorify> | df_test.isnull().any().sum() | Digit Recognizer |
13,709,795 | attempt_no_sum_dict = attempt_no_agg['sum'].to_dict(defaultdict(int))
del attempt_no_agg
gc.collect()
<feature_engineering> | seed = 3141
np.random.seed(seed ) | Digit Recognizer |
13,709,795 | def get_max_attempt(user_id,content_id):
k =(user_id,content_id)
if k in attempt_no_sum_dict.keys() :
attempt_no_sum_dict[k]+=1
return attempt_no_sum_dict[k]
attempt_no_sum_dict[k] = 1
return attempt_no_sum_dict[k]
<split> | X = train.iloc[:,1:]
Y = train.iloc[:,0]
x_train , x_test , y_train , y_test = train_test_split(X, Y , test_size=0.1, random_state=seed ) | Digit Recognizer |
13,709,795 | env = riiideducation.make_env()
iter_test = env.iter_test()
prior_test_df = None
prev_test_df1 = None
N=[0.4,0.6]<groupby> | x_train = x_train.values.reshape(-1, 28, 28, 1)
x_test = x_test.values.reshape(-1, 28, 28, 1)
df_test=df_test.values.reshape(-1,28,28,1 ) | Digit Recognizer |
13,709,795 | %%time
for(test_df, sample_prediction_df)in iter_test:
test_df1=test_df.copy()
if(prev_test_df1 is not None):
prev_test_df1['answered_correctly'] = eval(test_df1['prior_group_answers_correct'].iloc[0])
prev_test_df1 = prev_test_df1[prev_test_df1.content_type_id == False]
prev_group = prev_test_df1[['user_id', 'content_id', 'answered_correctly']].groupby('user_id' ).apply(lambda r:(
r['content_id'].values,
r['answered_correctly'].values))
for prev_user_id in prev_group.index:
if prev_user_id in group.index:
group[prev_user_id] =(
np.append(group[prev_user_id][0], prev_group[prev_user_id][0])[-MAX_SEQ:],
np.append(group[prev_user_id][1], prev_group[prev_user_id][1])[-MAX_SEQ:]
)
else:
group[prev_user_id] =(
prev_group[prev_user_id][0],
prev_group[prev_user_id][1]
)
prev_test_df1 = test_df1.copy()
test_df1 = test_df1[test_df1.content_type_id == False]
test_dataset = TestDataset(group, test_df1, skills)
test_dataloader = DataLoader(test_dataset, batch_size=51200, shuffle=False)
outs = []
for item in test_dataloader:
x = item[0].to(device ).long()
target_id = item[1].to(device ).long()
with torch.no_grad() :
output, att_weight = nn_model(x, target_id)
outs.extend(torch.sigmoid(output)[:, -1].view(-1 ).data.cpu().numpy())
if prior_test_df is not None:
prior_test_df[target] = eval(test_df['prior_group_answers_correct'].iloc[0])
prior_test_df = prior_test_df[prior_test_df[target] != -1].reset_index(drop=True)
prior_test_df['prior_question_had_explanation'].fillna(False, inplace=True)
prior_test_df.prior_question_had_explanation=prior_test_df.prior_question_had_explanation.astype('int8')
user_ids = prior_test_df['user_id'].values
targets = prior_test_df[target].values
for user_id, answered_correctly in zip(user_ids,targets):
user_sum_dict[user_id] += answered_correctly
user_count_dict[user_id] += 1
prior_test_df = test_df.copy()
question_len=len(test_df[test_df['content_type_id'] == 0])
test_df['prior_question_had_explanation'].fillna(False, inplace=True)
test_df.prior_question_had_explanation=test_df.prior_question_had_explanation.astype('int8')
test_df['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean, inplace=True)
user_lecture_sum = np.zeros(question_len, dtype=np.int16)
user_lecture_count = np.zeros(question_len, dtype=np.int16)
user_sum = np.zeros(question_len, dtype=np.int16)
user_count = np.zeros(question_len, dtype=np.int16)
task_container_sum = np.zeros(question_len, dtype=np.int32)
task_container_count = np.zeros(question_len, dtype=np.int32)
task_container_std = np.zeros(question_len, dtype=np.float16)
explanation_sum = np.zeros(question_len, dtype=np.int32)
explanation_count = np.zeros(question_len, dtype=np.int32)
delta_prior_question_elapsed_time = np.zeros(question_len, dtype=np.int32)
attempt_no_count = np.zeros(question_len, dtype=np.int16)
lagtime = np.zeros(question_len, dtype=np.float32)
lagtime2 = np.zeros(question_len, dtype=np.float32)
lagtime3 = np.zeros(question_len, dtype=np.float32)
i=0
for j,(user_id,prior_question_had_explanation,content_type_id,prior_question_elapsed_time,timestamp, content_id,task_container_id)in enumerate(zip(test_df['user_id'].values,test_df['prior_question_had_explanation'].values,test_df['content_type_id'].values,test_df['prior_question_elapsed_time'].values,test_df['timestamp'].values, test_df['content_id'].values, test_df['task_container_id'].values)) :
user_lecture_sum_dict[user_id] += content_type_id
user_lecture_count_dict[user_id] += 1
if(content_type_id==1):
x=1
if(content_type_id==0):
user_lecture_sum[i] = user_lecture_sum_dict[user_id]
user_lecture_count[i] = user_lecture_count_dict[user_id]
user_sum[i] = user_sum_dict[user_id]
user_count[i] = user_count_dict[user_id]
task_container_sum[i] = task_container_sum_dict[task_container_id]
task_container_count[i] = task_container_count_dict[task_container_id]
task_container_std[i]=task_container_std_dict[task_container_id]
explanation_sum_dict[user_id] += prior_question_had_explanation
explanation_count_dict[user_id] += 1
explanation_sum[i] = explanation_sum_dict[user_id]
explanation_count[i] = explanation_count_dict[user_id]
if user_id in max_timestamp_u_dict['max_time_stamp'].keys() :
lagtime[i]=timestamp-max_timestamp_u_dict['max_time_stamp'][user_id]
if(max_timestamp_u_dict2['max_time_stamp2'][user_id]==lagtime_mean2):
lagtime2[i]=lagtime_mean2
lagtime3[i]=lagtime_mean3
else:
lagtime2[i]=timestamp-max_timestamp_u_dict2['max_time_stamp2'][user_id]
if(max_timestamp_u_dict3['max_time_stamp3'][user_id]==lagtime_mean3):
lagtime3[i]=lagtime_mean3
else:
lagtime3[i]=timestamp-max_timestamp_u_dict3['max_time_stamp3'][user_id]
max_timestamp_u_dict3['max_time_stamp3'][user_id]=max_timestamp_u_dict2['max_time_stamp2'][user_id]
max_timestamp_u_dict2['max_time_stamp2'][user_id]=max_timestamp_u_dict['max_time_stamp'][user_id]
max_timestamp_u_dict['max_time_stamp'][user_id]=timestamp
else:
lagtime[i]=lagtime_mean
max_timestamp_u_dict['max_time_stamp'].update({user_id:timestamp})
lagtime2[i]=lagtime_mean2
max_timestamp_u_dict2['max_time_stamp2'].update({user_id:lagtime_mean2})
lagtime3[i]=lagtime_mean3
max_timestamp_u_dict3['max_time_stamp3'].update({user_id:lagtime_mean3})
if user_id in user_prior_question_elapsed_time_dict['prior_question_elapsed_time'].keys() :
delta_prior_question_elapsed_time[i]=prior_question_elapsed_time-user_prior_question_elapsed_time_dict['prior_question_elapsed_time'][user_id]
user_prior_question_elapsed_time_dict['prior_question_elapsed_time'][user_id]=prior_question_elapsed_time
else:
delta_prior_question_elapsed_time[i]=delta_prior_question_elapsed_time_mean
user_prior_question_elapsed_time_dict['prior_question_elapsed_time'].update({user_id:prior_question_elapsed_time})
i=i+1
test_df = test_df[test_df['content_type_id'] == 0].reset_index(drop=True)
test_df=test_df.merge(questions_df.loc[questions_df.index.isin(test_df['content_id'])],
how='left', on='content_id', right_index=True)
test_df['user_lecture_lv'] = user_lecture_sum / user_lecture_count
test_df['user_lecture_sum'] = user_lecture_sum
test_df['user_interaction_count'] = user_lecture_count
test_df['user_interaction_timestamp_mean'] = test_df['timestamp']/user_lecture_count
test_df['user_correctness'] = user_sum / user_count
test_df['user_uncorrect_count'] =user_count-user_sum
test_df['user_correct_count'] =user_sum
test_df['task_container_correctness'] = task_container_sum / task_container_count
test_df['task_container_cor_count'] = task_container_sum
test_df['task_container_uncor_count'] =task_container_count-task_container_sum
test_df['task_container_std'] = task_container_std
test_df['explanation_mean'] = explanation_sum / explanation_count
test_df['explanation_true_count'] = explanation_sum
test_df['explanation_false_count'] = explanation_count-explanation_sum
test_df['delta_prior_question_elapsed_time'] = delta_prior_question_elapsed_time
test_df["attempt_no"] = test_df[["user_id", "content_id"]].apply(lambda row: get_max_attempt(row["user_id"], row["content_id"]), axis=1)
test_df["lagtime"]=lagtime
test_df["lagtime2"]=lagtime2
test_df["lagtime3"]=lagtime3
test_df['timestamp']=test_df['timestamp']/(1000*3600)
test_df.timestamp=test_df.timestamp.astype('float16')
test_df['lagtime']=test_df['lagtime']/(1000*3600)
test_df.lagtime=test_df.lagtime.astype('float32')
test_df['lagtime2']=test_df['lagtime2']/(1000*3600)
test_df.lagtime2=test_df.lagtime2.astype('float32')
test_df['lagtime3']=test_df['lagtime3']/(1000*3600)
test_df.lagtime3=test_df.lagtime3.astype('float32')
test_df['user_interaction_timestamp_mean']=test_df['user_interaction_timestamp_mean']/(1000*3600)
test_df.user_interaction_timestamp_mean=test_df.user_interaction_timestamp_mean.astype('float32')
test_df['user_correctness'].fillna(0.67, inplace=True)
sub_preds = np.zeros(test_df.shape[0])
for i, model in enumerate(clfs, 1):
test_preds = model.predict(test_df[features])
sub_preds += test_preds
o2=sub_preds / len(clfs)
test_df[target]=0.5*np.array(outs)+0.5*np.array(o2)
env.predict(test_df[['row_id', target]])
<install_modules> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False ) | Digit Recognizer |
13,709,795 | !pip install --quiet /kaggle/input/kerasapplications
!pip install --quiet /kaggle/input/efficientnet-git<set_options> | x_train = x_train.astype("float32")/255
x_test = x_test.astype("float32")/255
df_test = df_test.astype("float32")/255 | Digit Recognizer |
13,709,795 | def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore' )<define_variables> | datagen.fit(x_train ) | Digit Recognizer |
13,709,795 | BATCH_SIZE = 16 * REPLICAS
HEIGHT = 512
WIDTH = 512
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 5<normalization> | y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
print(y_train[0] ) | Digit Recognizer |
13,709,795 | def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial >.75:
image = tf.image.transpose(image)
if p_rotate >.75:
image = tf.image.rot90(image, k=3)
elif p_rotate >.5:
image = tf.image.rot90(image, k=2)
elif p_rotate >.25:
image = tf.image.rot90(image, k=1)
if p_crop >.6:
if p_crop >.9:
image = tf.image.central_crop(image, central_fraction=.5)
elif p_crop >.8:
image = tf.image.central_crop(image, central_fraction=.6)
elif p_crop >.7:
image = tf.image.central_crop(image, central_fraction=.7)
else:
image = tf.image.central_crop(image, central_fraction=.8)
elif p_crop >.3:
crop_size = tf.random.uniform([], int(HEIGHT*.6), HEIGHT, dtype=tf.int32)
image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
return image, label<normalization> | model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last',
input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid'))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same', activation='relu', data_format='channels_last'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid', strides=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
13,709,795 | def transform_rotation(image, height, rotation):
DIM = height
XDIM = DIM%2
rotation = rotation * tf.random.uniform([1],dtype='float32')
rotation = math.pi * rotation / 180.
c1 = tf.math.cos(rotation)
s1 = tf.math.sin(rotation)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3])
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shear(image, height, shear):
DIM = height
XDIM = DIM%2
shear = shear * tf.random.uniform([1],dtype='float32')
shear = math.pi * shear / 180.
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
c2 = tf.math.cos(shear)
s2 = tf.math.sin(shear)
shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3])
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3] )<choose_model_class> | optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999 ) | Digit Recognizer |
13,709,795 | def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB4(input_tensor=inputs,
include_top=False,
weights=None,
pooling='avg')
x = L.Dropout (.5 )(base_model.output)
output = L.Dense(N_CLASSES, activation='softmax', name='output' )(x)
model = Model(inputs=inputs, outputs=output)
return model
with strategy.scope() :
model = model_fn(( None, None, CHANNELS), N_CLASSES)
model.summary()<predict_on_test> | model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"] ) | Digit Recognizer |
13,709,795 | files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros(( test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True ).repeat()
ct_steps = TTA_STEPS *(( test_size/BATCH_SIZE)+ 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test)/ len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8')for img, img_name in iter(test_names_ds.unbatch())]<save_to_csv> | reduce_lr = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x ) | Digit Recognizer |
13,709,795 | submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head() )<define_variables> | decays = [(lambda x: 1e-3 * 0.9 ** x )(x)for x in range(10)]
i=1
for lr in decays:
print("Epoch " + str(i)+" Learning Rate: " + str(lr))
i+=1 | Digit Recognizer |
13,709,795 | tez_path = '.. /input/tez-lib/'
effnet_path = '.. /input/efficientnet-pytorch/'
sys.path.append(tez_path)
sys.path.append(effnet_path)
<feature_engineering> | early_stopping = EarlyStopping(
min_delta=0.001,
patience=20,
restore_best_weights=True,
) | Digit Recognizer |
13,709,795 | class LeafModel(tez.Model):
def __init__(self, num_classes):
super().__init__()
self.effnet = EfficientNet.from_name("efficientnet-b4")
self.dropout = nn.Dropout(0.1)
self.out = nn.Linear(1792, num_classes)
self.step_scheduler_after = "epoch"
def forward(self, image, targets=None):
batch_size, _, _, _ = image.shape
x = self.effnet.extract_features(image)
x = F.adaptive_avg_pool2d(x, 1 ).reshape(batch_size, -1)
outputs = self.out(self.dropout(x))
return outputs, None, None<set_options> | batch_size = 64
epochs = 50 | Digit Recognizer |
13,709,795 | test_aug = albumentations.Compose([
albumentations.RandomResizedCrop(256, 256),
albumentations.Transpose(p=0.5),
albumentations.HorizontalFlip(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.HueSaturationValue(
hue_shift_limit=0.2,
sat_shift_limit=0.2,
val_shift_limit=0.2,
p=0.5
),
albumentations.RandomBrightnessContrast(
brightness_limit=(-0.1,0.1),
contrast_limit=(-0.1, 0.1),
p=0.5
),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
p=1.0
)
], p=1.)
<load_from_csv> | history = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), epochs = epochs,
validation_data =(x_test, y_test), verbose=1,
steps_per_epoch=x_train.shape[0] // batch_size,
callbacks = [reduce_lr] ) | Digit Recognizer |
13,709,795 | dfx = pd.read_csv(".. /input/cassava-leaf-disease-classification/sample_submission.csv")
image_path = ".. /input/cassava-leaf-disease-classification/test_images/"
test_image_paths = [os.path.join(image_path, x)for x in dfx.image_id.values]
test_targets = dfx.label.values
test_dataset = ImageDataset(
image_paths=test_image_paths,
targets=test_targets,
augmentations=test_aug,
)
<load_from_csv> | import matplotlib.pyplot as plt | Digit Recognizer |
13,709,795 | <predict_on_test><EOS> | pred_digits_test = np.argmax(model.predict(df_test),axis=1)
image_id_test = []
for i in range(len(pred_digits_test)) :
image_id_test.append(i+1)
d = {'ImageId':image_id_test,'Label':pred_digits_test}
answer = pd.DataFrame(d)
answer.to_csv('answer.csv',index=False ) | Digit Recognizer |
13,706,409 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv> | %matplotlib inline
%load_ext autoreload
%autoreload 2
| Digit Recognizer |
13,706,409 | final_preds = final_preds.argmax(axis=1)
dfx.label = final_preds
dfx.to_csv("submission.csv", index=False )<save_to_csv> | train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ) | Digit Recognizer |
13,706,409 | df = pd.read_csv('/kaggle/input/finalsub3/finalsub2.csv')
df.to_csv('submission.csv', index=False )<set_options> | print("Training Data : ")
train_data.head(3 ).iloc[:,:17] | Digit Recognizer |
13,706,409 | warnings.filterwarnings("ignore" )<load_from_csv> | train_data_norm = train_data.iloc[:, 1:] / 255.0
test_data_norm = test_data / 255.0 | Digit Recognizer |
13,706,409 | train_data = pd.read_csv('/kaggle/input/pubg-finish-placement-prediction/train_V2.csv')
test_data = pd.read_csv('/kaggle/input/pubg-finish-placement-prediction/test_V2.csv')
train_data.describe().drop('count' ).T<filter> | num_examples_train = train_data.shape[0]
num_examples_test = test_data.shape[0]
n_h = 32
n_w = 32
n_c = 3 | Digit Recognizer |
13,706,409 | train_data[train_data['winPlacePerc'].isnull() ]<feature_engineering> | Train_input_images = np.zeros(( num_examples_train, n_h, n_w, n_c))
Test_input_images = np.zeros(( num_examples_test, n_h, n_w, n_c)) | Digit Recognizer |
13,706,409 |
mapper = lambda x: 'solo' if('solo'in x)else 'duo' if('duo' in x)or('crash'in x)else 'squad'
train_data['matchType'] = train_data['matchType'].apply(mapper)
match_type_counts=train_data.groupby('matchId')['matchType'].first().value_counts().sort_values(ascending=False )<concatenate> | for example in range(num_examples_train):
Train_input_images[example,:28,:28,0] = train_data.iloc[example, 1:].values.reshape(28,28)
Train_input_images[example,:28,:28,1] = train_data.iloc[example, 1:].values.reshape(28,28)
Train_input_images[example,:28,:28,2] = train_data.iloc[example, 1:].values.reshape(28,28)
for example in range(num_examples_test):
Test_input_images[example,:28,:28,0] = test_data.iloc[example, :].values.reshape(28,28)
Test_input_images[example,:28,:28,1] = test_data.iloc[example, :].values.reshape(28,28)
Test_input_images[example,:28,:28,2] = test_data.iloc[example, :].values.reshape(28,28 ) | Digit Recognizer |
13,706,409 | all_data = train_data.append(test_data, sort=False ).reset_index(drop=True)
del train_data, test_data
gc.collect()<feature_engineering> | for example in range(num_examples_train):
Train_input_images[example] = cv2.resize(Train_input_images[example],(n_h, n_w))
for example in range(num_examples_test):
Test_input_images[example] = cv2.resize(Test_input_images[example],(n_h, n_w)) | Digit Recognizer |
13,706,409 | match = all_data.groupby('matchId')
all_data['killsPerc'] = match['kills'].rank(pct=True ).values
all_data['killPlacePerc'] = match['killPlace'].rank(pct=True ).values
all_data['walkDistancePerc'] = match['walkDistance'].rank(pct=True ).values
all_data['walkPerc_killsPerc'] = all_data['walkDistancePerc'] / all_data['killsPerc']
all_data['_totalDistance'] = all_data['rideDistance'] + all_data['walkDistance'] + all_data['swimDistance']<data_type_conversions> | Train_labels = np.array(train_data.iloc[:, 0] ) | Digit Recognizer |
13,706,409 | def fillInf(df, val):
numcols = df.select_dtypes(include='number' ).columns
cols = numcols[numcols != 'winPlacePerc']
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
for c in cols: df[c].fillna(val, inplace=True )<feature_engineering> | image_generator = ImageDataGenerator(
rotation_range=27,
width_shift_range=0.3,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
samplewise_center=True,
samplewise_std_normalization=True
)
validation_datagen = ImageDataGenerator() | Digit Recognizer |
13,706,409 | all_data['_healthItems'] = all_data['heals'] + all_data['boosts']
all_data['_headshotKillRate'] = all_data['headshotKills'] / all_data['kills']
all_data['_killPlaceOverMaxPlace'] = all_data['killPlace'] / all_data['maxPlace']
all_data['_killsOverWalkDistance'] = all_data['kills'] / all_data['walkDistance']<drop_column> | pretrained_model = keras.applications.resnet50.ResNet50(input_shape=(n_h, n_w, n_c),
include_top=False, weights='imagenet')
model = keras.Sequential([
pretrained_model,
keras.layers.Flatten() ,
keras.layers.Dense(units=60, activation='relu'),
keras.layers.Dense(units=10, activation='softmax')
] ) | Digit Recognizer |
13,706,409 | all_data.drop(['boosts','heals','killStreaks','DBNOs'], axis=1, inplace=True)
all_data.drop(['headshotKills','roadKills','vehicleDestroys'], axis=1, inplace=True)
all_data.drop(['rideDistance','swimDistance','matchDuration'], axis=1, inplace=True)
all_data.drop(['rankPoints','killPoints','winPoints'], axis=1, inplace=True )<groupby> | Optimizer = 'RMSprop'
model.compile(optimizer=Optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'] ) | Digit Recognizer |
13,706,409 | match = all_data.groupby(['matchId'])
group = all_data.groupby(['matchId','groupId','matchType'])
agg_col = list(all_data.columns)
exclude_agg_col = ['Id','matchId','groupId','matchType','maxPlace','numGroups','winPlacePerc']
for c in exclude_agg_col:
agg_col.remove(c)
sum_col = ['kills','killPlace','damageDealt','walkDistance','_healthItems']<merge> | train_images, dev_images, train_labels, dev_labels = train_test_split(Train_input_images,
Train_labels,
test_size=0.1, train_size=0.9,
shuffle=True,
random_state=44)
test_images = Test_input_images | Digit Recognizer |
13,706,409 | minKills = all_data.sort_values(['matchId','groupId','kills','killPlace'] ).groupby(
['matchId','groupId','kills'] ).first().reset_index().copy()
for n in np.arange(4):
c = 'kills_' + str(n)+ '_Place'
nKills =(minKills['kills'] == n)
minKills.loc[nKills, c] = minKills[nKills].groupby(['matchId'])['killPlace'].rank().values
match_data = pd.merge(match_data, minKills[nKills][['matchId','groupId',c]], how='left')
del minKills, nKills
match_data = reduce_mem_usage(match_data )<merge> | train_datagen = ImageDataGenerator(
rotation_range=27,
width_shift_range=0.3,
height_shift_range=0.2,
shear_range=0.3,
zoom_range=0.2,
horizontal_flip=False)
validation_datagen = ImageDataGenerator() | Digit Recognizer |
13,706,409 |
all_data = pd.merge(all_data, match_data)
del match_data
gc.collect()
all_data['enemy.players'] = all_data['m.players'] - all_data['players']
for c in sum_col:
all_data['p.max_msum.' + c] = all_data['max.' + c] / all_data['m.sum.' + c]
all_data['p.max_mmax.' + c] = all_data['max.' + c] / all_data['m.max.' + c]
all_data.drop(['m.sum.' + c, 'm.max.' + c], axis=1, inplace=True)
fillInf(all_data, 0 )<merge> | class myCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')> 0.999999):
print("Stop training!")
self.model.stop_training = True | Digit Recognizer |
13,706,409 | match = all_data.groupby('matchId')
matchRank = match[numcols].rank(pct=True ).rename(columns=lambda s: 'rank.' + s)
all_data = reduce_mem_usage(pd.concat([all_data, matchRank], axis=1))
rank_col = matchRank.columns
del matchRank
gc.collect()
match = all_data.groupby('matchId')
matchRank = match[rank_col].max().rename(columns=lambda s: 'max.' + s ).reset_index()
all_data = pd.merge(all_data, matchRank)
for c in numcols:
all_data['rank.' + c] = all_data['rank.' + c] / all_data['max.rank.' + c]
all_data.drop(['max.rank.' + c], axis=1, inplace=True)
del matchRank
gc.collect()<merge> | EPOCHS = 5
batch_size = 212
history = model.fit_generator(train_datagen.flow(train_images,train_labels, batch_size=batch_size),
steps_per_epoch=train_images.shape[0] / batch_size,
epochs=EPOCHS,
validation_data=validation_datagen.flow(dev_images,dev_labels,
batch_size=batch_size),
validation_steps=dev_images.shape[0] / batch_size,
callbacks=[callbacks] ) | Digit Recognizer |
13,706,409 | killMinorRank = all_data[['matchId','min.kills','max.killPlace']].copy()
group = killMinorRank.groupby(['matchId','min.kills'])
killMinorRank['rank.minor.maxKillPlace'] = group.rank(pct=True ).values
all_data = pd.merge(all_data, killMinorRank)
killMinorRank = all_data[['matchId','max.kills','min.killPlace']].copy()
group = killMinorRank.groupby(['matchId','max.kills'])
killMinorRank['rank.minor.minKillPlace'] = group.rank(pct=True ).values
all_data = pd.merge(all_data, killMinorRank)
del killMinorRank
gc.collect()<count_unique_values> | submission = pd.read_csv('.. /input/digit-recognizer-submission/submission.csv')
submission.to_csv('digit_submission.csv', index=False ) | Digit Recognizer |
13,706,409 | constant_column = [col for col in all_data.columns if all_data[col].nunique() == 1]
all_data.drop(constant_column, axis=1, inplace=True )<feature_engineering> | mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ) | Digit Recognizer |
13,706,409 |
all_data['matchType'] = all_data['matchType'].apply(mapper)
all_data = pd.concat([all_data, pd.get_dummies(all_data['matchType'])], axis=1)
all_data.drop(['matchType'], axis=1, inplace=True)
all_data['matchId'] = all_data['matchId'].apply(lambda x: int(x,16))
all_data['groupId'] = all_data['groupId'].apply(lambda x: int(x,16))<count_missing_values> | mnist_train_data = mnist_train.loc[:, "pixel0":]
mnist_train_label = mnist_train.loc[:, "label"]
mnist_train_data = mnist_train_data/255.0
mnist_test = mnist_test/255.0 | Digit Recognizer |
13,706,409 | null_cnt = all_data.isnull().sum().sort_values()<categorify> | standardized_scalar = StandardScaler()
standardized_data = standardized_scalar.fit_transform(mnist_train_data)
standardized_data.shape | Digit Recognizer |
13,706,409 | cols = [col for col in all_data.columns if col not in ['Id','matchId','groupId']]
for i, t in all_data.loc[:, cols].dtypes.iteritems() :
if t == object:
all_data[i] = pd.factorize(all_data[i])[0]
all_data = reduce_mem_usage(all_data )<prepare_x_and_y> | cov_matrix = np.matmul(standardized_data.T, standardized_data)
cov_matrix.shape | Digit Recognizer |
13,706,409 | X_train = all_data[all_data['winPlacePerc'].notnull() ].reset_index(drop=True)
X_test = all_data[all_data['winPlacePerc'].isnull() ].drop(['winPlacePerc'], axis=1 ).reset_index(drop=True)
del all_data
gc.collect()
Y_train = X_train.pop('winPlacePerc')
X_test_grp = X_test[['matchId','groupId']].copy()
train_matchId = X_train['matchId']
X_train.drop(['matchId','groupId'], axis=1, inplace=True)
X_test.drop(['matchId','groupId'], axis=1, inplace=True )<train_model> | lambdas, vectors = eigh(cov_matrix, eigvals=(782, 783))
vectors.shape | Digit Recognizer |
13,706,409 | params={'learning_rate': 0.05,
'objective':'mae',
'metric':'mae',
'num_leaves': 128,
'verbose': 1,
'random_state':42,
'bagging_fraction': 0.7,
'feature_fraction': 0.7
}
reg = lgb.LGBMRegressor(**params, n_estimators=10000)
reg.fit(X_train, Y_train)
pred = reg.predict(X_test, num_iteration=reg.best_iteration_ )<concatenate> | new_coordinates = np.matmul(vectors, standardized_data.T)
print(new_coordinates.shape)
new_coordinates = np.vstack(( new_coordinates, mnist_train_label)).T | Digit Recognizer |
13,706,409 | X_test_grp['_nofit.winPlacePerc'] = pred
group = X_test_grp.groupby(['matchId'])
X_test_grp['winPlacePerc'] = pred
X_test_grp['_rank.winPlacePerc'] = group['winPlacePerc'].rank(method='min')
X_test = pd.concat([X_test, X_test_grp], axis=1 )<feature_engineering> | df_new = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_new.head() | Digit Recognizer |
13,706,409 | fullgroup =(X_test['numGroups'] == X_test['maxPlace'])
subset = X_test.loc[fullgroup]
X_test.loc[fullgroup, 'winPlacePerc'] =(subset['_rank.winPlacePerc'].values - 1)/(subset['maxPlace'].values - 1)
subset = X_test.loc[~fullgroup]
gap = 1.0 /(subset['maxPlace'].values - 1)
new_perc = np.around(subset['winPlacePerc'].values / gap)* gap
X_test.loc[~fullgroup, 'winPlacePerc'] = new_perc
X_test['winPlacePerc'] = X_test['winPlacePerc'].clip(lower=0,upper=1 )<feature_engineering> | pca = decomposition.PCA()
pca.n_components = 2
pca_data = pca.fit_transform(standardized_data)
pca_data.shape | Digit Recognizer |
13,706,409 | X_test.loc[X_test['maxPlace'] == 0, 'winPlacePerc'] = 0
X_test.loc[X_test['maxPlace'] == 1, 'winPlacePerc'] = 1
X_test.loc[(X_test['maxPlace'] > 1)&(X_test['numGroups'] == 1), 'winPlacePerc'] = 0<save_to_csv> | pca_data = np.vstack(( pca_data.T, mnist_train_label)).T | Digit Recognizer |
13,706,409 | test = pd.read_csv('/kaggle/input/pubg-finish-placement-prediction/test_V2.csv')
test['matchId'] = test['matchId'].apply(lambda x: int(x,16))
test['groupId'] = test['groupId'].apply(lambda x: int(x,16))
submission = pd.merge(test, X_test[['matchId','groupId','winPlacePerc']])
submission = submission[['Id','winPlacePerc']]
submission.to_csv("submission.csv", index=False )<set_options> | df_PCA = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_PCA.head() | Digit Recognizer |
13,706,409 | warnings.filterwarnings('ignore')
%matplotlib inline
<load_from_csv> | mnist_train_data = np.array(mnist_train_data)
mnist_train_label = np.array(mnist_train_label ) | Digit Recognizer |
13,706,409 | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )<prepare_x_and_y> | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D
from tensorflow.keras.optimizers import Adadelta
from keras.utils.np_utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler | Digit Recognizer |
13,706,409 | X_train = train.drop(['label'], axis=1)
y_train = train['label']<feature_engineering> | nclasses = mnist_train_label.max() - mnist_train_label.min() + 1
mnist_train_label = to_categorical(mnist_train_label, num_classes = nclasses)
print("Shape of ytrain after encoding: ", mnist_train_label.shape ) | Digit Recognizer |
13,706,409 | X_train /= 255.0
test /= 255.0<train_model> | def build_model(input_shape=(28, 28, 1)) :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def compile_model(model, optimizer='adam', loss='categorical_crossentropy'):
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
def train_model(model, train, test, epochs, split):
history = model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split)
return history | Digit Recognizer |
13,706,409 | X_train1 = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1 )<categorify> | cnn_model = build_model(( 28, 28, 1))
compile_model(cnn_model, 'adam', 'categorical_crossentropy')
model_history = train_model(cnn_model, mnist_train_data, mnist_train_label, 80, 0.2 ) | Digit Recognizer |
13,706,409 | y_train = to_categorical(y_train, num_classes=10 )<split> | predictions = cnn_model.predict(mnist_test_arr ) | Digit Recognizer |
13,706,409 | X_train, X_val , y_train, y_val = train_test_split(X_train1, y_train, test_size=0.2 )<choose_model_class> | predictions_test = []
for i in predictions:
predictions_test.append(np.argmax(i)) | Digit Recognizer |
13,706,409 | model = keras.models.Sequential()
model.add(keras.layers.Conv2D(filters = 64, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28, 28, 1)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(filters = 64, kernel_size=(5,5), padding='same', activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Conv2D(filters = 64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(filters = 64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(10, activation='softmax'))<choose_model_class> | submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": predictions_test
})
submission.to_csv('my_first_submission.csv', index=False ) | Digit Recognizer |
13,706,409 | model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=["accuracy"] )<train_model> | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import models, layers, utils
from tensorflow.keras import Sequential
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, MaxPool2D
| Digit Recognizer |
13,706,409 | history = model.fit(X_train, y_train, epochs=25, validation_data=(X_val, y_val))<predict_on_test> | x_train, x_val, y_train, y_val = train_test_split(mnist_train_data, mnist_train_label, test_size = 0.2, random_state = 2 ) | Digit Recognizer |
13,706,409 | y_pred = model.predict(test )<save_to_csv> | def define_model() :
model = Sequential()
model.add(Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(MaxPooling2D(( 2, 2)))
model.add(layers.BatchNormalization())
model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu"))
model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3,3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(layers.BatchNormalization())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
return model | Digit Recognizer |
13,706,409 | submission['Label'] = results
submission.to_csv('submission.csv', index=False )<import_modules> | model = define_model()
model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
13,706,409 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from skimage import color
from skimage import measure
from skimage.filters import try_all_threshold
from skimage.filters import threshold_otsu
from skimage.filters import threshold_local
import keras
from keras import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint<load_from_csv> | model.fit(x_train, y_train , epochs=30 ) | Digit Recognizer |
13,706,409 | df_train = pd.read_csv('.. /input/digit-recognizer/train.csv')
df_test = pd.read_csv('.. /input/digit-recognizer/test.csv' )<prepare_x_and_y> | predictions = model.predict(mnist_test_arr ) | Digit Recognizer |
13,706,409 | y_train = df_train['label']
X_train = df_train.drop('label', axis = 1)
X_test = np.array(df_test )<categorify> | predictions_test = []
for i in predictions:
predictions_test.append(np.argmax(i)) | Digit Recognizer |
13,706,409 | y_train = to_categorical(y_train, num_classes = 10)
y_train.shape<split> | submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": predictions_test
})
submission.to_csv('my_second_submission.csv', index=False ) | Digit Recognizer |
13,663,724 | X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.25,
random_state=1 )<define_search_space> | def rotate_image(image, angle = 90, scale = 1.0):
h, w = image.shape
M = cv2.getRotationMatrix2D(( w/2, h/2), angle, scale)
return cv2.warpAffine(image, M,(w, h)) | Digit Recognizer |
13,663,724 | kernel_ =(5,5 )<choose_model_class> | data = np.loadtxt('/kaggle/input/digit-recognizer/train.csv', delimiter = ',', skiprows = 1 ) | Digit Recognizer |
13,663,724 | model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28, 28, 1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.summary()<define_variables> | y = data[:, 0]
x = data[:, 1:].reshape(-1, 28, 28)
x_train, x_cv, y_train, y_cv = train_test_split(x, y, test_size = 0.1)
x_train_temp = x_train.copy()
for angle in np.arange(-10, 15, 5):
x_train = np.concatenate(( x_train, np.array([rotate_image(image, angle, scale = 1)for image in x_train_temp])))
x_train = x_train.reshape(-1, 28, 28, 1)
x_cv = x_cv.reshape(-1, 28, 28, 1)
y_train_temp = y_train.copy()
for i in range(x_train.shape[0]//y_train_temp.shape[0] - 1):
y_train = np.concatenate(( y_train, y_train_temp)) | Digit Recognizer |
13,663,724 | aug = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1)
gen_train = aug.flow(X_train, y_train, batch_size=64)
gen_val = aug.flow(X_val, y_val, batch_size=64 )<choose_model_class> | print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')) ) | Digit Recognizer |
13,663,724 | model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )<choose_model_class> | YaNet = Sequential(name = 'YaNet')
YaNet.add(Conv2D(filters = 32,
kernel_size =(5, 5),
kernel_initializer = 'he_uniform',
padding = 'same',
activation = 'relu',
input_shape =(28, 28, 1)))
YaNet.add(BatchNormalization())
YaNet.add(Conv2D(filters = 32,
kernel_size =(5, 5),
kernel_initializer = 'he_uniform',
padding = 'same',
activation = 'relu'))
YaNet.add(MaxPooling2D())
YaNet.add(BatchNormalization())
YaNet.add(Conv2D(filters = 64,
kernel_size =(3, 3),
kernel_initializer = 'he_uniform',
padding = 'same',
activation = 'relu'))
YaNet.add(MaxPooling2D())
YaNet.add(BatchNormalization())
YaNet.add(Conv2D(filters = 128,
kernel_size =(3, 3),
kernel_initializer = 'he_uniform',
padding = 'same',
activation = 'relu'))
YaNet.add(BatchNormalization())
YaNet.add(Conv2D(filters = 128,
kernel_size =(3, 3),
kernel_initializer = 'he_uniform',
padding = 'same',
activation = 'relu'))
YaNet.add(MaxPooling2D())
YaNet.add(BatchNormalization())
YaNet.add(Flatten())
YaNet.add(Dense(units = 512,
activation = 'relu'))
YaNet.add(Dropout(0.5))
YaNet.add(BatchNormalization())
YaNet.add(Dense(units = 256,
activation = 'relu'))
YaNet.add(Dropout(0.5))
YaNet.add(BatchNormalization())
YaNet.add(Dense(units = 128,
activation = 'relu'))
YaNet.add(Dropout(0.5))
YaNet.add(BatchNormalization())
YaNet.add(Dense(units = 64,
activation = 'relu'))
YaNet.add(Dropout(0.5))
YaNet.add(BatchNormalization())
YaNet.add(Dense(units = 10,
activation = 'softmax'))
YaNet.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
YaNet.fit(x_train, y_train,
epochs = 100,
validation_data =(x_cv, y_cv),
use_multiprocessing = True ) | Digit Recognizer |
13,663,724 | checkpoint = tf.keras.callbacks.ModelCheckpoint("weights.hdf5",
monitor='val_accuracy',
verbose=1,
save_best_only=True)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=4,
min_lr=0.00005,
verbose=1)
early_stop = tf.keras.callbacks.EarlyStopping(patience=5, restore_best_weights=True )<train_model> | x_test = np.loadtxt('/kaggle/input/digit-recognizer/test.csv', skiprows = 1, delimiter = ',')
x_test = x_test.reshape(-1, 28, 28, 1 ) | Digit Recognizer |
13,663,724 | <load_pretrained><EOS> | final_prediction = np.argmax(YaNet.predict(x_test), axis = 1)
output = pd.DataFrame({'ImageId': np.arange(1, x_test.shape[0]+1), 'Label': final_prediction})
output.to_csv('my_submission.csv', index = False ) | Digit Recognizer |
13,128,051 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import seaborn as sns
import matplotlib.image as mpimg | Digit Recognizer |
13,128,051 | y_test = model.predict(X_test)
y_pred = np.argmax(y_test, axis=1 )<save_to_csv> | train_input = ".. /input/digit-recognizer/train.csv"
test_input = ".. /input/digit-recognizer/test.csv"
train_dataset = pd.read_csv(train_input)
test_dataset = pd.read_csv(test_input ) | Digit Recognizer |
13,128,051 | output_csv = {"ImageId":[*range(1,1+len(y_pred)) ], "Label":y_pred}
Y_pre = pd.DataFrame(output_csv)
Y_pre.set_index("ImageId", drop=True, append=False, inplace=True)
Y_pre.to_csv("/kaggle/working/submission.csv" )<import_modules> | train_labels = tf.keras.utils.to_categorical(train_dataset.pop("label")) | Digit Recognizer |
13,128,051 | import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
import torch.optim as optim
from PIL import Image
from matplotlib import pyplot as plt
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
from torchvision import models
import tqdm
from sklearn.metrics import f1_score,roc_auc_score,accuracy_score,confusion_matrix<load_from_csv> | train_dataset = np.array(train_dataset.values.reshape(-1, 28, 28, 1))
test_dataset = np.array(test_dataset.values.reshape(-1, 28, 28, 1)) | Digit Recognizer |
13,128,051 | train_df=pd.read_csv(".. /input/digit-recognizer/train.csv")
test_df=pd.read_csv(".. /input/digit-recognizer/test.csv" )<categorify> | train_dataset = train_dataset/255.0
test_dataset = test_dataset/255.0 | Digit Recognizer |
13,128,051 | def get_image(data_df,idx):
return Image.fromarray(np.uint8(np.reshape(data_df.iloc[idx][data_df.columns[-784:]].to_numpy() ,(28,28)))).convert('RGB')
<categorify> | checkpoint_path = "logs/checkpoints/" | Digit Recognizer |
13,128,051 | class TrainDataSet(Dataset):
def __init__(self,data_df,transforms=T.ToTensor()):
self.data_df=data_df
self.transform=transforms
def __len__(self):
return self.data_df.shape[0]
def __getitem__(self,idx):
image=self.transform(get_image(self.data_df,idx))
label=torch.tensor(self.data_df.label.iloc[idx],dtype=torch.long)
return image,label<categorify> | model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3, 3), input_shape=(28, 28, 1), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Conv2D(64,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Conv2D(128,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(128,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax")
])
epochs=50
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
monitor="accuracy",
save_best_only=True,
save_weights_only=True),
tf.keras.callbacks.EarlyStopping(monitor="accuracy", patience=15)
]
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_dataset, train_labels, epochs=epochs, callbacks=callbacks, batch_size=64 ) | Digit Recognizer |
13,128,051 | class TestDataSet(TrainDataSet):
def __getitem__(self,idx):
image=self.transform(get_image(self.data_df,idx))
return image<choose_model_class> | model.load_weights(checkpoint_path ) | Digit Recognizer |
13,128,051 | def create_model() :
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 10)
return model<categorify> | labels = [np.argmax(predict)for predict in model.predict(test_dataset)]
df = pd.DataFrame({
"ImageId": list(range(1, len(test_dataset)+1)) ,
"Label": labels,
} ) | Digit Recognizer |
13,128,051 | transform=T.Compose([
T.Resize(( 256,256)) ,
T.ToTensor() ,
T.Normalize(( 0.485, 0.456, 0.406),(0.229, 0.224, 0.225))
] )<choose_model_class> | df.to_csv("submission.csv", index=False ) | Digit Recognizer |
13,128,051 | def train_once(model,dataloader,criterion,optimizer,device):
total_loss=0
n_total=0
criterion.reduction="sum"
model.train()
for i,(images,labels)in enumerate(tqdm.tqdm(dataloader)) :
optimizer.zero_grad()
images=images.to(device)
labels=labels.to(device)
outputs=model(images)
loss=criterion(outputs,labels)
total_loss+=loss.item()
n_total+=labels.shape[0]
loss.backward()
optimizer.step()
return total_loss/n_total<choose_model_class> | model.save("model.h5" ) | Digit Recognizer |
13,441,242 | class Validation_Metrics(object):
def __init__(self,activation_func=nn.Softmax(dim=1)) :
self.predictions=[]
self.labels=[]
self.activation_func=activation_func
self.collapsed=False
def update(self,model_outputs,labels):
if not self.collapsed:
self.predictions.append(self.activation_func(model_outputs ).detach())
self.labels.append(labels.detach())
else:
raise ValueError('Error, one cannot add further values to a logger once it has been collapsed')
def collapse(self):
if self.collapsed:
pass
else:
self.predictions=torch.cat(self.predictions ).cpu().numpy()
self.labels=torch.cat(self.labels ).cpu().numpy()
self.collapsed=True
def Confusion_matrix(self):
self.collapse()
Confusion_matrix=np.zeros(10,10)
pred=np.argmax(self.predictions,axis=1)
labels=self.labels
return confusion_matrix(labels,pred)
def AUC(self):
self.collapse()
pred=self.predictions
labels=np.zeros(pred.shape)
labels[np.arange(label.shape[0]),self.labels]=1.0
aucs = []
for i in range(labels.shape[1]):
aucs.append(roc_auc_score(labels[:, i], pred[:, i]))
return aucs
def F1_score(self):
self.collapse()
pred=np.argmax(self.predictions,axis=1)
labels=self.labels
return f1_score(labels, pred, average=None)
def Accuracy(self):
self.collapse()
pred=np.argmax(self.predictions,axis=1)
labels=self.labels
return accuracy_score(labels, pred)
<compute_train_metric> | train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
print(train.shape)
train.head() | Digit Recognizer |
13,441,242 | def val(model,dataloader,criterion,device):
total_loss=0
n_total=0
criterion.reduction="sum"
Metrics=Validation_Metrics()
model.eval()
with torch.no_grad() :
for images,labels in tqdm.tqdm(dataloader):
images=images.to(device)
labels=labels.to(device)
outputs=model(images)
loss=criterion(outputs,labels)
Metrics.update(outputs,labels)
total_loss+=loss.item()
n_total+=labels.shape[0]
return total_loss/n_total,Metrics
<define_variables> | x_train =(train.iloc[:,1:].values ).astype('float32')
y_train = train.iloc[:,0].values.astype('int32')
x_test = test.values.astype('float32' ) | Digit Recognizer |
13,441,242 | n_folds=5<prepare_output> | x_train = x_train/255.0
x_test = x_test/255.0 | Digit Recognizer |
13,441,242 | train_df.insert(1,"fold",np.random.randint(1,n_folds+1,size=train_df.shape[0]))<define_variables> | y_train = keras.utils.to_categorical(y_train, 10)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state = 42 ) | Digit Recognizer |
13,441,242 | def Get_Train_Val_Set(fold_i,transform=transform):
train_set=TrainDataSet(train_df[train_df.fold!=fold_i],transforms=transform)
test_set=TrainDataSet(train_df[train_df.fold==fold_i],transforms=transform)
return train_set, test_set<set_options> | batch_size = 64
epochs = 20
input_shape =(28, 28, 1 ) | Digit Recognizer |
13,441,242 | USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu" )<choose_model_class> | model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal'))
model.add(MaxPool2D(( 2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
13,441,242 | criterion=nn.CrossEntropyLoss()
optimizer_name="Adam"
optimizer_parameters={"lr":0.0001}
epochs=1<choose_model_class> | model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics = ['accuracy'] ) | Digit Recognizer |
13,441,242 | def create_optimizer(model,optimizer_name,optimizer_parameters):
if optimizer_name=="SGD":
return optim.SGD(model.parameters() ,**optimizer_parameters)
elif optimizer_name=="Adam":
return optim.Adam(model.parameters() ,**optimizer_parameters )<load_pretrained> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=15,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False ) | Digit Recognizer |
13,441,242 | Best_val_accuracy=0
for fold in range(1,n_folds+1):
print(f"Training fold {fold}")
model=create_model()
model.to(device)
optimizer=create_optimizer(model,optimizer_name,optimizer_parameters)
TrainSet,ValSet=Get_Train_Val_Set(fold)
TrainLoader=DataLoader(TrainSet, batch_size=256)
ValLoader=DataLoader(ValSet, batch_size=1024)
for epoch in range(epochs):
train_loss=train_once(model,TrainLoader,criterion,optimizer,device)
print(f"For epoch {epoch+1}, the Train Loss was: {train_loss}")
val_loss,Metrics=val(model,ValLoader,criterion,device)
print(f"The Val Loss was {val_loss}, and the val accuracy was {Metrics.Accuracy() }")
if Metrics.Accuracy() >Best_val_accuracy:
print("New Best, saving")
torch.save(model.state_dict() ,f"fold{fold}Best.pt")
<choose_model_class> | datagen.fit(X_train)
model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 1,
steps_per_epoch = X_train.shape[0] // batch_size
) | Digit Recognizer |
13,441,242 | optimizer=create_optimizer(model,optimizer,optimizer_parameters)
optimizer<define_variables> | predictions = model.predict(X_test)
results = np.argmax(predictions, axis = 1 ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.