kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,858,410
MAX_SEQ = 100 n_part = 7 D_MODEL = 256 N_LAYER = 2 DROPOUT = 0.1<feature_engineering>
X_train,Y_train=read_csv(".. /input/train.csv") X_test,_=read_csv(".. /input/test.csv") m,pixels=X_train.shape classes=10 height,width,channels=28,28,1 X_train, X_test=X_train/255, X_test/255 X_train=X_train.reshape(-1,height,width,channels) X_test=X_test.reshape(-1,height,width,channels) print(Y_train.shape,X_train.shape,X_test.shape) random_seed = 2 X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed) print(Y_train.shape,X_train.shape,X_test.shape,X_val.shape,Y_val.shape )
Digit Recognizer
2,858,410
def feature_time_lag(df, time_dict): tt = np.zeros(len(df), dtype=np.int64) for ind, row in enumerate(df[['user_id','timestamp','task_container_id']].values): if row[0] in time_dict.keys() : if row[2]-time_dict[row[0]][1] == 0: tt[ind] = time_dict[row[0]][2] else: t_last = time_dict[row[0]][0] task_ind_last = time_dict[row[0]][1] tt[ind] = row[1]-t_last time_dict[row[0]] =(row[1], row[2], tt[ind]) else: time_dict[row[0]] =(row[1], row[2], -1) tt[ind] = 0 df["time_lag"] = tt return df<define_search_model>
def DigitalRecognizerModel(input_shape): X_input = Input(input_shape) X=Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))(X_input) X=Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu' )(X) X=MaxPooling2D(pool_size=(2,2))(X) X=Dropout(0.25 )(X) X=Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu' )(X) X=Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu' )(X) X=MaxPooling2D(pool_size=(2,2), strides=(2,2))(X) X=Dropout(0.25 )(X) X=Flatten()(X) X=Dense(256, activation = "relu" )(X) X=Dropout(0.5 )(X) X=Dense(10, activation = "softmax" )(X) model = Model(inputs = X_input, outputs = X, name='DigitalRecognizer') return model
Digit Recognizer
2,858,410
class FFN(nn.Module): def __init__(self, state_size=200): super(FFN, self ).__init__() self.state_size = state_size self.lr1 = nn.Linear(state_size, state_size) self.relu = nn.ReLU() self.lr2 = nn.Linear(state_size, state_size) self.dropout = nn.Dropout(DROPOUT) def forward(self, x): x = self.lr1(x) x = self.relu(x) x = self.lr2(x) return self.dropout(x) def future_mask(seq_length): future_mask = np.triu(np.ones(( seq_length, seq_length)) , k=1 ).astype('bool') return torch.from_numpy(future_mask) class SAINTModel(nn.Module): def __init__(self, n_skill, n_part, max_seq=MAX_SEQ, embed_dim= 128, elapsed_time_cat_flag = True): super(SAINTModel, self ).__init__() self.n_skill = n_skill self.embed_dim = embed_dim self.n_cat = n_part self.elapsed_time_cat_flag = elapsed_time_cat_flag self.e_embedding = nn.Embedding(self.n_skill+1, embed_dim) self.c_embedding = nn.Embedding(self.n_cat+1, embed_dim) self.pos_embedding = nn.Embedding(max_seq-1, embed_dim) self.res_embedding = nn.Embedding(2+1, embed_dim) if self.elapsed_time_cat_flag == True: self.elapsed_time_embedding = nn.Embedding(300+1, embed_dim) self.lag_embedding1 = nn.Embedding(300+1, embed_dim) self.lag_embedding2 = nn.Embedding(1440+1, embed_dim) self.lag_embedding3 = nn.Embedding(365+1, embed_dim) else: self.elapsed_time_embedding = nn.Linear(1, embed_dim, bias=False) self.lag_embedding = nn.Linear(1, embed_dim, bias=False) self.exp_embedding = nn.Embedding(2+1, embed_dim) self.transformer = nn.Transformer(nhead=8, d_model = embed_dim, num_encoder_layers= N_LAYER, num_decoder_layers= N_LAYER, dropout = DROPOUT) self.dropout = nn.Dropout(DROPOUT) self.layer_normal = nn.LayerNorm(embed_dim) self.ffn = FFN(embed_dim) self.pred = nn.Linear(embed_dim, 1) def forward(self, question, part, response, elapsed_time, lag_time, exp): device = question.device question = self.e_embedding(question) part = self.c_embedding(part) pos_id = torch.arange(question.size(1)).unsqueeze(0 ).to(device) pos_id = self.pos_embedding(pos_id) res = self.res_embedding(response) exp = self.exp_embedding(exp) if self.elapsed_time_cat_flag == True: elapsed_time = torch.true_divide(elapsed_time, 1000) elapsed_time = torch.round(elapsed_time) elapsed_time = torch.where(elapsed_time.float() <= 300, elapsed_time, torch.tensor(300.0 ).to(device)).long() elapsed_time = self.elapsed_time_embedding(elapsed_time) lag_time = torch.true_divide(lag_time, 1000) lag_time = torch.round(lag_time) lag_time1 = torch.where(lag_time.float() <= 300, lag_time, torch.tensor(300.0 ).to(device)).long() lag_time = torch.true_divide(lag_time, 60) lag_time = torch.round(lag_time) lag_time2 = torch.where(lag_time.float() <= 1440, lag_time, torch.tensor(1440.0 ).to(device)).long() lag_time = torch.true_divide(lag_time, 1440) lag_time = torch.round(lag_time) lag_time3 = torch.where(lag_time.float() <= 365, lag_time, torch.tensor(365.0 ).to(device)).long() lag_time1 = self.lag_embedding1(lag_time1) lag_time2 = self.lag_embedding2(lag_time2) lag_time3 = self.lag_embedding3(lag_time3) else: elapsed_time = elapsed_time.view(-1,1) elapsed_time = self.elapsed_time_embedding(elapsed_time) elapsed_time = elapsed_time.view(-1, MAX_SEQ-1, self.embed_dim) lag_time = lag_time.view(-1,1) lag_time = self.lag_embedding(lag_time) lag_time = lag_time.view(-1, MAX_SEQ-1, self.embed_dim) enc = question + part + pos_id + exp dec = pos_id + res + elapsed_time + lag_time1 + lag_time2 + lag_time3 enc = enc.permute(1, 0, 2) dec = dec.permute(1, 0, 2) mask = future_mask(enc.size(0)).to(device) att_output = self.transformer(enc, dec, src_mask=mask, tgt_mask=mask, memory_mask = mask) att_output = self.layer_normal(att_output) att_output = att_output.permute(1, 0, 2) x = self.ffn(att_output) x = self.layer_normal(x + att_output) x = self.pred(x) return x.squeeze(-1 )<load_from_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
2,858,410
n_skill = 13523 group = joblib.load(".. /input/saint-plus-data-new/group_20210102.pkl.zip") questions_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/questions.csv') time_dict = joblib.load(".. /input/saint-plus-data-new/time_dict.pkl.zip" )<load_pretrained>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
2,858,410
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model1 = SAINTModel(n_skill, n_part, embed_dim= D_MODEL) try: model1.load_state_dict(torch.load(".. /input/saint-plus-model/saint_plus_model_20210102_padding_v2.pt")) except: model1.load_state_dict(torch.load(".. /input/saint-plus-model/saint_plus_model_20210102_padding_v2.pt", map_location='cpu')) model1.to(device) model1.eval()<load_pretrained>
digitalRecognizerModel = DigitalRecognizerModel(X_train[0].shape) digitalRecognizerModel.compile(optimizer = "Adam", loss = "binary_crossentropy", metrics = ["accuracy"]) history = digitalRecognizerModel.fit_generator(datagen.flow(X_train,Y_train, batch_size=62), epochs = 30, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=610,callbacks=[learning_rate_reduction] )
Digit Recognizer
2,858,410
<define_variables><EOS>
print("Time Start:" ,time.time()) val_predictions=digitalRecognizerModel.predict(X_val) correct_val_predictions=np.mean(np.equal(np.argmax(val_predictions,axis=1), np.argmax(Y_val,axis=1))) print("Validation Accuracy",correct_val_predictions) test_predictions=digitalRecognizerModel.predict(X_test) correct_test_predictions=np.argmax(test_predictions,axis=1) write_csv('submission.csv',correct_test_predictions) print("Time End:" ,int(round(time.time())) )
Digit Recognizer
594,887
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_pretrained>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
594,887
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model2 = SAINTModel(n_skill, n_part, embed_dim= D_MODEL) try: model2.load_state_dict(torch.load(".. /input/saint-plus-model/saint_plus_model_20210103.pt_v2")) except: model2.load_state_dict(torch.load(".. /input/saint-plus-model/saint_plus_model_20210103.pt_v2", map_location='cpu')) model2.to(device) model2.eval()<load_pretrained>
from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from keras.optimizers import RMSprop from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split
Digit Recognizer
594,887
lt_correct_dict = pickle.load(open('.. /input/arvis-feature/last_timestamp_correct.pkl', 'rb')) np_uq_td = pickle.load(open(".. /input/uq-data/np_uq_td_0518.pkl.data","rb")) curr_u_dict = pickle.load(open(".. /input/uq-data/curr_u_dict_0614_only_user_three_time_diff.pkl.data","rb")) max_timestamp_u_dict = pickle.load(open(".. /input/arvis-feature/max_timestamp_u_dict_2015.pkl","rb")) max_timestamp_u_dict2 = pickle.load(open(".. /input/arvis-feature/max_timestamp_u_dict2_2015.pkl","rb")) max_timestamp_u_dict3 = pickle.load(open(".. /input/arvis-feature/max_timestamp_u_dict3_2015.pkl","rb"))<feature_engineering>
df_train_x = df_train.iloc[:,1:] df_train_y = df_train.iloc[:,:1]
Digit Recognizer
594,887
def add_uq_feats_and_update(df): conn = sqlite3.connect('user_ques_db.db') cursor = conn.cursor() global idx uq_timediff = np.zeros(len(df), dtype=np.uint64) for cnt,row in enumerate(df[['user_id','content_id','timestamp']].itertuples(index=False)) : cursor.execute(f'select idx from user where user_id = {row[0]} and content_id = {row[1]}') tmp_idx = cursor.fetchall() if tmp_idx == []: uq_timediff[cnt] = 0 np_uq_td[idx] = row[2] cursor.execute(f'insert into user(user_id, content_id, idx)values({row[0]}, {row[1]}, {idx})') idx += 1 else: tmp_idx = tmp_idx[0][0] uq_timediff[cnt] = row[2] - np_uq_td[tmp_idx] np_uq_td[tmp_idx] = row[2] cursor.close() conn.commit() conn.close() uq_feats_df = pd.DataFrame({'curr_uq_time_diff':uq_timediff}) df = pd.concat([df, uq_feats_df], axis=1) return df<feature_engineering>
def cnn_model(result_class_size): model = Sequential() model.add(Conv2D(32,(5, 5), input_shape=(28,28,1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(16,(3, 3), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(130, activation='relu')) model.add(Dense(50, activation='relu')) model.add(Dense(result_class_size, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop() , metrics=['accuracy']) return model
Digit Recognizer
594,887
def add_user_feats_without_update(df): utdiff = np.zeros(len(df), dtype=np.uint64) utdiff_mean = np.zeros(len(df), dtype=np.uint64) uelapdiff = np.zeros(len(df), dtype=np.float32) for cnt,row in enumerate(df[['user_id','timestamp','prior_question_elapsed_time']].itertuples(index=False)) : if row[0] in curr_u_dict: utdiff[cnt] = row[1] - curr_u_dict[row[0]]["uts"] utdiff_mean[cnt] = curr_u_dict[row[0]]["utsdiff"][1] / curr_u_dict[row[0]]["utsdiff"][0] uelapdiff[cnt] = row[2] - curr_u_dict[row[0]]["uelapdiff"] else: utdiff[cnt] = 0; utdiff_mean[cnt] = 0; uelapdiff[cnt] = 0; user_feats_df = pd.DataFrame({'curr_user_time_diff':utdiff, 'curr_user_time_diff_mean':utdiff_mean, 'curr_user_elapsed_time_diff':uelapdiff }) user_feats_df['curr_user_elapsed_time_diff'].fillna(0, inplace=True) df = pd.concat([df, user_feats_df], axis=1) return df def update_user_feats(df): for cnt,row in enumerate(df[['user_id','content_id','answered_correctly','timestamp','prior_question_elapsed_time']].itertuples(index=False)) : if row[0] in curr_u_dict: curr_u_dict[row[0]]["uts"] = row[3] curr_u_dict[row[0]]["utsdiff"][0] += 1 curr_u_dict[row[0]]["utsdiff"][1] += row[3] curr_u_dict[row[0]]["uelapdiff"] = row[4] else: curr_u_dict[row[0]] = {} curr_u_dict[row[0]]["uts"] = row[3] curr_u_dict[row[0]]["utsdiff"] = [1, row[3]] curr_u_dict[row[0]]["uelapdiff"] = row[4]<categorify>
df_test = df_test / 255 df_train_x = df_train_x / 255
Digit Recognizer
594,887
def add_user_feats(df): utdiff = np.zeros(len(df), dtype=np.uint64) utdiff_mean = np.zeros(len(df), dtype=np.uint64) uelapdiff = np.zeros(len(df), dtype=np.float32) for cnt,row in enumerate(tqdm(df[['user_id','content_id','answered_correctly', 'timestamp','prior_question_elapsed_time', ]].itertuples(index=False),total=df.shape[0])) : if row[0] in curr_u_dict: utdiff[cnt] = row[3] - curr_u_dict[row[0]]["uts"] utdiff_mean[cnt] = curr_u_dict[row[0]]["utsdiff"][1] / curr_u_dict[row[0]]["utsdiff"][0] uelapdiff[cnt] = row[4] - curr_u_dict[row[0]]["uelapdiff"] curr_u_dict[row[0]]["uts"] = row[3] curr_u_dict[row[0]]["utsdiff"][0] += 1 curr_u_dict[row[0]]["utsdiff"][1] += row[3] curr_u_dict[row[0]]["uelapdiff"] = row[4] else: utdiff[cnt] = 0; utdiff_mean[cnt] = 0; uelapdiff[cnt] = 0; curr_u_dict[row[0]] = {} curr_u_dict[row[0]]["uts"] = row[3] curr_u_dict[row[0]]["utsdiff"] = [1, row[3]] curr_u_dict[row[0]]["uelapdiff"] = row[4] user_feats_df = pd.DataFrame({ 'curr_user_time_diff':utdiff, 'curr_user_time_diff_mean':utdiff_mean, 'curr_user_elapsed_time_diff':uelapdiff }) user_feats_df['curr_user_elapsed_time_diff'].fillna(0, inplace=True) df = pd.concat([df, user_feats_df], axis=1) return df <categorify>
arr_train_x_28x28 = np.reshape(df_train_x.values,(df_train_x.values.shape[0], 28, 28, 1)) arr_test_x_28x28 = np.reshape(df_test.values,(df_test.values.shape[0], 28, 28, 1))
Digit Recognizer
594,887
def lagtime_for_test(df): lagtime_mean = 0 lagtime_mean2 = 0 lagtime_mean3 = 0 lagtime = np.zeros(len(df), dtype=np.float32) lagtime2 = np.zeros(len(df), dtype=np.float32) lagtime3 = np.zeros(len(df), dtype=np.float32) for i,(user_id, content_type_id, timestamp, content_id,)in enumerate(zip(df['user_id'].values, df['content_type_id'].values, df['timestamp'].values, df['content_id'].values)) : if content_type_id==0: if user_id in max_timestamp_u_dict['max_time_stamp'].keys() : lagtime[i]=timestamp-max_timestamp_u_dict['max_time_stamp'][user_id] if(max_timestamp_u_dict2['max_time_stamp2'][user_id]==lagtime_mean2): lagtime2[i]=lagtime_mean2 lagtime3[i]=lagtime_mean3 else: lagtime2[i]=timestamp-max_timestamp_u_dict2['max_time_stamp2'][user_id] if(max_timestamp_u_dict3['max_time_stamp3'][user_id]==lagtime_mean3): lagtime3[i]=lagtime_mean3 else: lagtime3[i]=timestamp-max_timestamp_u_dict3['max_time_stamp3'][user_id] max_timestamp_u_dict3['max_time_stamp3'][user_id]=max_timestamp_u_dict2['max_time_stamp2'][user_id] max_timestamp_u_dict2['max_time_stamp2'][user_id]=max_timestamp_u_dict['max_time_stamp'][user_id] max_timestamp_u_dict['max_time_stamp'][user_id]=timestamp else: lagtime[i]=lagtime_mean max_timestamp_u_dict['max_time_stamp'].update({user_id:timestamp}) lagtime2[i]=lagtime_mean2 max_timestamp_u_dict2['max_time_stamp2'].update({user_id:lagtime_mean2}) lagtime3[i]=lagtime_mean3 max_timestamp_u_dict3['max_time_stamp3'].update({user_id:lagtime_mean3}) df["lag_time"]= lagtime df["lag_time2"]= lagtime2 df["lag_time3"]= lagtime3 df["lag_time"].fillna(-1, inplace=True) df["lag_time2"].fillna(-1, inplace=True) df["lag_time3"].fillna(-1, inplace=True) df['lag_time'] = df['lag_time'].replace(0, method="ffill") df['lag_time2'] = df['lag_time2'].replace(0, method="ffill") df['lag_time3'] = df['lag_time3'].replace(0, method="ffill") df["lag_time"] = df["lag_time"].astype("uint64") df["lag_time2"] = df["lag_time2"].astype("uint64") df["lag_time3"] = df["lag_time3"].astype("uint64") return df<categorify>
random_seed = 3 split_train_x, split_val_x, split_train_y, split_val_y, = train_test_split(arr_train_x_28x28, arr_train_y, test_size = 0.08, random_state=random_seed )
Digit Recognizer
594,887
def add_feats(df_np, feat_dict, col_idx, col_feat): current_feat_value = np.zeros(len(df_np)) for cnt, row in enumerate(df_np[:,[col_idx, col_feat]]): current_feat_value[cnt] = feat_dict[row[0]] feat_dict[row[0]] += row[1] df_np[:, col_feat] = current_feat_value return df_np def add_feats_from_dict(df_np, feat_dict, col_idx, col_dict=-1): current_feat_value = np.zeros(len(df_np)) for cnt, idx in enumerate(df_np[:,col_idx]): if col_dict == -1: current_feat_value[cnt] = feat_dict[idx] else: current_feat_value[cnt] = feat_dict[idx][col_dict] return(np.c_[ df_np, current_feat_value ]) def add_feats_from_dict_got_new_user(df_np, feat_dict, col_idx, col_dict=-1): current_feat_value = np.zeros(len(df_np)) for cnt, idx in enumerate(df_np[:,col_idx]): if idx in feat_dict.keys() : if col_dict == -1: current_feat_value[cnt] = feat_dict[idx] else: current_feat_value[cnt] = feat_dict[idx][col_dict] else: if col_dict == -1: current_feat_value[cnt] = feat_dict[-100] else: current_feat_value[cnt] = feat_dict[-100][col_dict] feat_dict[idx] = feat_dict[-100] return(np.c_[ df_np, current_feat_value ]) def update_dict(df_pd, feat_dict, col_idx, col_feat, col_dict=-1): for row in df_pd[['content_type_id', col_idx, col_feat]].values: if row[0] == 0: if col_dict == -1: feat_dict[row[1]] += row[2] else: feat_dict[row[1]][col_dict] += row[2] def add_ohe(df, col_feat, oh_value): return(np.c_[ df, np.array([int(i == oh_value)for i in df[:,col_feat]])]) def user_slice_accuracy_n_get(df_pd, feat_dict): global_first_question_accuracy = 0.6453965159034877 current_list = np.zeros(len(df_pd)) for cnt,(user, content_type_id)in enumerate(df_pd[['user_id', 'content_type_id']].values): if content_type_id == 0: if user in feat_dict: current_list[cnt] = np.mean(feat_dict[user]) else: current_list[cnt] = 0.6454 else: current_list[cnt] = 0 return current_list def user_slice_accuracy_n_update(df_pd, feat_dict, border=5): for cnt,(user, answer)in enumerate(df_pd[['user_id', 'answered_correctly']].values): if user in feat_dict: feat_dict[user].append(answer) feat_dict[user] = feat_dict[user][-border:] else: feat_dict[user] = [answer] return feat_dict def user_slice_accuracy_session_get(df_pd, feat_dict, session_max_time=12): current_list = np.zeros(len(df_pd)) for cnt,(user, timestamp, content_type_id)in enumerate(df_pd[['user_id', 'timestamp', 'content_type_id']].values): if content_type_id == 0: if user in feat_dict: time_delta_h =(timestamp - feat_dict[user][1])/ 1000 / 60 / 60 if time_delta_h < session_max_time: current_list[cnt] = np.mean(feat_dict[user][0]) else: current_list[cnt] = 0.67 else: current_list[cnt] = 0.67 else: current_list[cnt] = 0 return current_list def user_slice_accuracy_session_update(df_pd, feat_dict, session_max_time=12): for cnt,(user, answer, timestamp)in enumerate(df_pd[['user_id', 'answered_correctly', 'timestamp']].values): if user in feat_dict: time_delta_h =(timestamp - feat_dict[user][1])/ 1000 / 60 / 60 if time_delta_h < session_max_time: feat_dict[user][0].append(answer) feat_dict[user][1] = timestamp else: feat_dict[user][0] = [answer] feat_dict[user][1] = timestamp else: feat_dict[user] = [[answer], timestamp] return feat_dict def user_question_attempt_cnt_get_update(df_pd, feat_dict): current_feat_value = np.zeros(len(df_pd)) for idx,(user_id, content_id, content_type_id)in enumerate(df_pd[['user_id', 'content_id', 'content_type_id']].values): if content_type_id == 0: current_feat_value[idx] = list(feat_dict[user_id] ).count(content_id) feat_dict[user_id] = np.append(feat_dict[user_id], content_id) else: current_feat_value[idx] = 0 return current_feat_value def user_lectures_part(df_pd, feat_dict): current_list = np.zeros(len(df_pd)) for cnt,(user, content_type_id, part_q, part_l)in enumerate(df_pd[['user_id', 'content_type_id', 'part_q', 'part_l']].values): part_q = max(0, part_q) if content_type_id == 0: if user in feat_dict: current_list[cnt] = feat_dict[user][part_q] else: feat_dict[user] = [0] * 8 current_list[cnt] = 0 else: if user in feat_dict: feat_dict[user][part_l] += 1 else: feat_dict[user] = [0] * 8 feat_dict[user][part_l] += 1 current_list[cnt] = 0 return current_list def user_lecture_cnt(df_pd, feat_dict): current_list = np.zeros(len(df_pd)) for cnt, user in enumerate(df_pd['user_id'].values): if user in feat_dict: current_list[cnt] = sum(feat_dict[user]) else: current_list[cnt] = 0 return current_list def user_l_q_tag_equal(df_pd, feat_dict): current_list = np.zeros(len(df_pd)) for idx,(user, content_type_id, tag, tags_list)in enumerate(df_pd[['user_id', 'content_type_id', 'tag_l', 'tags_list']].values): if content_type_id == 0: current_list[idx] = len(set(feat_dict[user])& set(tags_list)) else: feat_dict[user].append(int(tag)) current_list[idx] = 0 return current_list def get_q_l(df_pd, feat_dict, key): current_list = [] for idx, content_id in enumerate(df_pd['content_id'].values): if content_id in feat_dict: current_list.append(feat_dict[content_id][key]) else: current_list.append(-100) return current_list def dict_user_timestampsdelta_get_update_3(df, feat_dict): q_list = np.zeros(( len(df), 3), dtype = np.float32) l_list = np.zeros(( len(df), 3), dtype = np.float32) for cnt,(user_id, timestamp, content_type_id)in enumerate(df[['user_id', 'timestamp', 'content_type_id']].values): timestamp = timestamp / 1000 / 60 if user_id in feat_dict: q_list[cnt] = np.array([timestamp - t for t in feat_dict[user_id][0]]) l_list[cnt] = np.array([timestamp - t for t in feat_dict[user_id][1]]) feat_dict[user_id][int(content_type_id)].pop(0) feat_dict[user_id][int(content_type_id)].append(timestamp) else: if content_type_id == 1: feat_dict[user_id] = [[np.nan, np.nan, np.nan], [np.nan, np.nan, 0]] else: feat_dict[user_id] = [[np.nan, np.nan, 0], [np.nan, np.nan, np.nan]] q_list[cnt] = np.array([np.nan, np.nan, np.nan]) l_list[cnt] = np.array([np.nan, np.nan, np.nan]) for i in [0, 1, 2]: df['prior_question_' + str(i+1)+ '_timedelta_min'] = q_list[:, i] df['prior_lecture_' + str(i+1)+ '_timedelta_min'] = l_list[:, i] df['prior_question_' + str(i+1)+ '_timedelta_min'] = df['prior_question_' + str(i+1)+ '_timedelta_min'].fillna(-100 ).replace(0, method='ffill') df['prior_lecture_' + str(i+1)+ '_timedelta_min'] = df['prior_lecture_' + str(i+1)+ '_timedelta_min'].fillna(-100 ).replace(0, method='ffill') del [q_list, l_list] return df def user_question_tag_accuracy_get(df, feat_calc): tags_w = [0.43, 0.27, 0.18, 0.08, 0.03, 0.01] values = np.zeros(len(df)) feat_list = ['user_id', 'content_type_id', 'tags_list'] for cnt,(user, content_type_id, tags_list)in enumerate(df[feat_list].values): if tags_list == -100: tags_list = [-100] if content_type_id == 0: if user in feat_calc: user_tags_accuracy = 0 for tag_i in tags_list: tags_accuracy_list = [] if tag_i in feat_calc[user][0]: tag_i_idx = feat_calc[user][0].index(tag_i) tags_accuracy_list.append(feat_calc[user][2][tag_i_idx] / feat_calc[user][1][tag_i_idx]) else: tags_accuracy_list.append(dict_global_question_tag_accuracy[tag_i][2]) l = len(tags_accuracy_list) tags_w_l = tags_w[:l] tags_w_l_sum = sum(tags_w_l) tags_w_current = [x/tags_w_l_sum for x in tags_w_l] user_tags_accuracy = sum([tag * w for tag, w in zip(tags_accuracy_list, tags_w_current)]) else: for tag_i in tags_list: tags_accuracy_list = [] tags_accuracy_list.append(dict_global_question_tag_accuracy[tag_i][2]) l = len(tags_accuracy_list) tags_w_l = tags_w[:l] tags_w_l_sum = sum(tags_w_l) tags_w_current = [x/tags_w_l_sum for x in tags_w_l] user_tags_accuracy = sum([tag * w for tag, w in zip(tags_accuracy_list, tags_w_current)]) values[cnt] = user_tags_accuracy else: values[cnt] = 0 return values def user_question_tag_accuracy_update(df, feat_calc): feat_list = ['user_id', 'answered_correctly', 'content_type_id', 'tags_list'] for cnt,(user, answer, content_type_id, tags_list)in enumerate(df[feat_list].values): if tags_list == -100: tags_list = [-100] if content_type_id == 0: if user in feat_calc: user_tags_accuracy = 0 for tag_i in tags_list: tags_accuracy_list = [] if tag_i in feat_calc[user][0]: tag_i_idx = feat_calc[user][0].index(tag_i) feat_calc[user][1][tag_i_idx] += 1 feat_calc[user][2][tag_i_idx] += answer else: feat_calc[user][0].append(tag_i) feat_calc[user][1].append(1) feat_calc[user][2].append(answer) else: feat_calc[user] = [[], [], []] for tag_i in tags_list: feat_calc[user][0].append(tag_i) feat_calc[user][1].append(1) feat_calc[user][2].append(answer) return feat_calc def user_correct_incorrect_timestamp_get(df, feat_dict): incorrect_list = [] correct_list = [] for(user_id, timestamp, content_type_id)in df[['user_id', 'timestamp', 'content_type_id']].values: timestamp = timestamp / 1000 / 60 correct_value, incorrect_value = 0, 0 if content_type_id: incorrect_list.append(-100) correct_list.append(-100) else: if user_id in feat_dict: incorrect_value, correct_value = feat_dict[user_id][0], feat_dict[user_id][1] else: incorrect_value, correct_value = np.nan, np.nan incorrect_value =(timestamp - incorrect_value) correct_value =(timestamp - correct_value) incorrect_list.append(incorrect_value) correct_list.append(correct_value) df['prior_question_incorrect_timedelta_min'] = incorrect_list df['prior_question_correct_timedelta_min'] = correct_list df['prior_question_incorrect_timedelta_min'] = df['prior_question_incorrect_timedelta_min'].fillna(-100 ).replace(0, method='ffill') df['prior_question_correct_timedelta_min'] = df['prior_question_correct_timedelta_min'].fillna(-100 ).replace(0, method='ffill') return df def user_correct_incorrect_timestamp_update(df, feat_dict): incorrect_list = [] correct_list = [] for(user_id, timestamp, content_type_id, answer)in df[['user_id', 'timestamp', 'content_type_id', 'answered_correctly']].values: timestamp = timestamp / 1000 / 60 if content_type_id == 0: if user_id in feat_dict: if answer: feat_dict[user_id][1] = timestamp else: feat_dict[user_id][0] = timestamp else: if answer: feat_dict[user_id] = [np.nan, timestamp] else: feat_dict[user_id] = [timestamp, np.nan] return feat_dict time_session_map = {'q_count_all' : 0, 'q_count_n' : 1, 'time_all_n' : 2, 'time_n' : 3, 'time_dict_all' : 4, 'time_dict_n' : 5, 'prior_timestamp' : 6, 'prior_container' : 7, 'prior_container_shape' : 8, 'prior_content_id' : 9, } def user_slice_question_time_mean_session(df_pd, feat_dict, session_max_time_min = 180): prior_question_elapsed_time_mean = 25452.541 out_mean_n = np.zeros(len(df_pd)) out_mean_all = np.zeros(len(df_pd)) out_delta_n = np.zeros(len(df_pd)) out_delta_all = np.zeros(len(df_pd)) calc_list = ['user_id', 'timestamp', 'task_container_id', 'content_id', 'content_type_id', 'prior_question_elapsed_time', 'task_container_freq'] for cnt,(user, timestamp, task_container_id, content_id, content_type_id, prior_question_elapsed_time, task_container_freq)in enumerate(df_pd[calc_list].values): timestamp = timestamp / 1000 / 60 if content_id not in dict_content_elapsed_time_mean: content_id = -100 if content_type_id == 0: if user in feat_dict: time_delta =(timestamp - feat_dict[user][time_session_map['prior_timestamp']]) if time_delta < session_max_time_min: if task_container_id != feat_dict[user][time_session_map['prior_container']]: s = feat_dict[user][time_session_map['prior_container_shape']] c = feat_dict[user][time_session_map['prior_content_id']] feat_dict[user][time_session_map['time_all_n']] += prior_question_elapsed_time * s feat_dict[user][time_session_map['time_n']] += prior_question_elapsed_time * s feat_dict[user][time_session_map['q_count_all']] += s feat_dict[user][time_session_map['q_count_n']] += s feat_dict[user][time_session_map['time_dict_all']] += dict_content_elapsed_time_mean[c] * s feat_dict[user][time_session_map['time_dict_n']] += dict_content_elapsed_time_mean[c] * s feat_dict[user][time_session_map['prior_timestamp']] = timestamp feat_dict[user][time_session_map['prior_container']] = task_container_id feat_dict[user][time_session_map['prior_content_id']] = content_id feat_dict[user][time_session_map['prior_container_shape']] = task_container_freq out_mean_n[cnt] = feat_dict[user][time_session_map['time_n']] / feat_dict[user][time_session_map['q_count_n']] out_mean_all[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['q_count_all']] out_delta_n[cnt] = feat_dict[user][time_session_map['time_n']] / feat_dict[user][time_session_map['time_dict_n']] out_delta_all[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['time_dict_all']] else: out_mean_n[cnt] = out_mean_n[cnt-1] out_mean_all[cnt] = out_mean_all[cnt-1] out_delta_n[cnt] = out_delta_n[cnt-1] out_delta_all[cnt] = out_delta_all[cnt-1] else: if task_container_id != feat_dict[user][time_session_map['prior_container']]: s = feat_dict[user][time_session_map['prior_container_shape']] c = feat_dict[user][time_session_map['prior_content_id']] feat_dict[user][time_session_map['time_all_n']] += prior_question_elapsed_time * s feat_dict[user][time_session_map['time_n']] = 0 feat_dict[user][time_session_map['q_count_all']] += s feat_dict[user][time_session_map['q_count_n']] = 0 feat_dict[user][time_session_map['time_dict_all']] += dict_content_elapsed_time_mean[c] * s feat_dict[user][time_session_map['time_dict_n']] = 0 feat_dict[user][time_session_map['prior_timestamp']] = timestamp feat_dict[user][time_session_map['prior_container']] = task_container_id feat_dict[user][time_session_map['prior_content_id']] = content_id feat_dict[user][time_session_map['prior_container_shape']] = task_container_freq out_mean_n[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['q_count_all']] out_mean_all[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['q_count_all']] out_delta_n[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['time_dict_all']] out_delta_all[cnt] = feat_dict[user][time_session_map['time_all_n']] / feat_dict[user][time_session_map['time_dict_all']] else: out_mean_n[cnt] = out_mean_n[cnt-1] out_mean_all[cnt] = out_mean_all[cnt-1] out_delta_n[cnt] = out_delta_n[cnt-1] out_delta_all[cnt] = out_delta_all[cnt-1] else: out_mean_n[cnt] = prior_question_elapsed_time_mean out_mean_all[cnt] = prior_question_elapsed_time_mean out_delta_n[cnt] = prior_question_elapsed_time_mean / dict_content_elapsed_time_mean[content_id] out_delta_all[cnt] = prior_question_elapsed_time_mean / dict_content_elapsed_time_mean[content_id] feat_dict[user] = [1, 1, 0, 0, dict_content_elapsed_time_mean[content_id], dict_content_elapsed_time_mean[content_id], timestamp, task_container_id, task_container_freq, content_id] else: out_mean_n[cnt], out_mean_all[cnt], out_delta_n[cnt], out_delta_all[cnt] = 0, 0, 0, 0 df_pd['user_question_time_mean_n_session'] = out_mean_n df_pd['user_question_time_mean_all_session'] = out_mean_all df_pd['user_question_time_delta_n_session'] = out_delta_n df_pd['user_question_time_delta_all_session'] = out_delta_all return df_pd user_priorq_expl_types_map = {'q_count' : 0, 'c_t_cnt' : 1, 'c_f_cnt' : 2, 'w_t_cnt' : 3, 'w_f_cnt' : 4, 'prior_container' : 5, 'prior_container_shape' : 6, 'prior_answer_expl_type' : 7, } def user_priorq_expl_types_get(df_pd, feat_dict): c_t_cnt = np.zeros(len(df_pd)) c_f_cnt = np.zeros(len(df_pd)) w_t_cnt = np.zeros(len(df_pd)) w_f_cnt = np.zeros(len(df_pd)) cw_tf_type = np.zeros(len(df_pd)) calc_list = ['user_id', 'task_container_id', 'content_type_id', 'prior_question_had_explanation', 'task_container_freq'] for cnt,(user, task_container_id, content_type_id, priorq_had_expl, task_container_freq)in enumerate(df_pd[calc_list].values): if content_type_id == 0: if user in feat_dict: if task_container_id != feat_dict[user][user_priorq_expl_types_map['prior_container']]: prior_c_shape = feat_dict[user][user_priorq_expl_types_map['prior_container_shape']] q_count = feat_dict[user][user_priorq_expl_types_map['q_count']] + 0.0001 c_t_cnt[cnt] = feat_dict[user][user_priorq_expl_types_map['c_t_cnt']] / q_count c_f_cnt[cnt] = feat_dict[user][user_priorq_expl_types_map['c_f_cnt']] / q_count w_t_cnt[cnt] = feat_dict[user][user_priorq_expl_types_map['w_t_cnt']] / q_count w_f_cnt[cnt] = feat_dict[user][user_priorq_expl_types_map['w_f_cnt']] / q_count cw_tf_type[cnt] = feat_dict[user][user_priorq_expl_types_map['prior_answer_expl_type']] else: c_t_cnt[cnt] = c_t_cnt[cnt - 1] c_f_cnt[cnt] = c_f_cnt[cnt - 1] w_t_cnt[cnt] = w_t_cnt[cnt - 1] w_f_cnt[cnt] = w_f_cnt[cnt - 1] cw_tf_type[cnt] = cw_tf_type[cnt - 1] else: c_t_cnt[cnt], c_f_cnt[cnt], w_t_cnt[cnt], w_f_cnt[cnt], w_f_cnt[cnt] = 0, 0, 0, 0, 0 else: c_t_cnt[cnt], c_f_cnt[cnt], w_t_cnt[cnt], w_f_cnt[cnt], cw_tf_type[cnt] = 0, 0, 0, 0, 0 df_pd['user_prior_correct_expl_prc'] = c_t_cnt df_pd['user_prior_correct_noexpl_prc'] = c_f_cnt df_pd['user_prior_wrong_expl_prc'] = w_t_cnt df_pd['user_prior_wrong_noexpl_prc'] = w_f_cnt df_pd['user_prior_answer_expl_type'] = cw_tf_type return df_pd def user_priorq_expl_types_update(df_pd, feat_dict): calc_list = ['user_id', 'task_container_id', 'content_type_id', 'prior_question_had_explanation', 'answered_correctly', 'task_container_freq'] for cnt,(user, task_container_id, content_type_id, priorq_had_expl, answer, task_container_freq)in enumerate(df_pd[calc_list].values): if answer: t1 = 'c_' else: t1 = 'w_' if priorq_had_expl: t2 = 't_cnt' else: t2 = 'f_cnt' col = t1 + t2 if content_type_id == 0: if user in feat_dict: if task_container_id != feat_dict[user][user_priorq_expl_types_map['prior_container']]: prior_c_shape = feat_dict[user][user_priorq_expl_types_map['prior_container_shape']] feat_dict[user][user_priorq_expl_types_map['q_count']] += prior_c_shape feat_dict[user][user_priorq_expl_types_map[col]] += prior_c_shape feat_dict[user][user_priorq_expl_types_map['prior_container']] = task_container_id feat_dict[user][user_priorq_expl_types_map['prior_container_shape']] = task_container_freq feat_dict[user][user_priorq_expl_types_map['prior_answer_expl_type']] = user_priorq_expl_types_map[col] else: feat_dict[user] = [0, 0, 0, 0, 0, task_container_id, task_container_freq, 0] return feat_dict def user_lectures_typeof_cnt(df, feat_dict): concept = np.zeros(len(df)) solving_question = np.zeros(len(df)) for cnt,(user, content_id, content_type_id)in enumerate(df[['user_id', 'content_id', 'content_type_id']].values): if content_type_id: if user in feat_dict: if content_id in dict_lectures: if dict_lectures[content_id]['type_of'] == 'concept': feat_dict[user][0] += 1 elif dict_lectures[content_id]['type_of'] == 'solving question': feat_dict[user][1] += 1 else: if content_id in dict_lectures: feat_dict[user] = [0, 0] if dict_lectures[content_id]['type_of'] == 'concept': feat_dict[user][0] += 1 elif dict_lectures[content_id]['type_of'] == 'solving question': feat_dict[user][1] += 1 concept[cnt] = 0 solving_question[cnt] = 0 else: if user in feat_dict: concept[cnt] = feat_dict[user][0] solving_question[cnt] = feat_dict[user][1] else: concept[cnt] = 0 solving_question[cnt] = 0 df['lecture_concept_cnt'] = concept.astype(np.uint16) df['lecture_solving_question_cnt'] = solving_question.astype(np.uint16) return df def user_answer_mode_n_get(df_pd, feat_dict, border=10): current_list = np.zeros(len(df_pd), dtype = np.uint8) for cnt,(user, content_type_id, content_id)in enumerate(df_pd[['user_id', 'content_type_id', 'content_id']].values): if content_type_id == 0: if user in feat_dict: if content_id in dict_questions: current_list[cnt] = max(set(feat_dict[user]), key=feat_dict[user].count)== dict_questions[content_id]['correct_answer'] else: current_list[cnt] = 100 else: current_list[cnt] = 100 else: current_list[cnt] = 0 return current_list def user_answer_mode_n_update(df_pd, feat_dict, border=10): for cnt,(user, user_answer, content_type_id, content_id)in enumerate(df_pd[['user_id', 'user_answer', 'content_type_id', 'content_id']].values): if content_type_id == 0: feat_dict[user].append(user_answer) feat_dict[user] = feat_dict[user][-border:] return feat_dict first_bundle_id_map = {7900 : 1, 128 : 2, 5692 : 3, -100 : 4, } def question_bundle_id_get(df_pd, feat_dict): current_list = np.zeros(len(df_pd)) for cnt,(content_type_id, content_id)in enumerate(df_pd[['content_type_id', 'content_id']].values): if content_type_id == 0: if content_id in feat_dict: current_list[cnt] = feat_dict[content_id]['bundle_id'] else: current_list[cnt] = -100 else: current_list[cnt] = 0 df_pd['bundle_id'] = current_list.astype(np.int32) return df_pd def user_bundle_cluster_get_update(df_pd, feat_dict): l = ['user_id', 'content_type_id', 'bundle_id', 'timestamp'] current_list = np.zeros(len(df_pd)) for cnt,(user, content_type_id, bundle, timestamp)in enumerate(df_pd[l].values): if user not in feat_dict: if content_type_id == 0 and timestamp == 0: if bundle in first_bundle_id_map: feat_dict[user] = first_bundle_id_map[bundle] else: feat_dict[user] = 4 else: feat_dict[user] = 4 current_list[cnt] = feat_dict[user] df_pd['first_bundle_id_cluster'] = current_list.astype(np.uint8) return df_pd def question_bundle_accuracy_update(df_pd, feat_dict): l = ['content_type_id', 'bundle_id', 'answered_correctly', 'first_bundle_id_cluster'] for cnt,(content_type_id, bundle_id, answer, cluster)in enumerate(df_pd[l].values): if content_type_id == 0: if bundle_id in feat_dict: feat_dict[bundle_id][0][0] += 1 feat_dict[bundle_id][0][1] += answer feat_dict[bundle_id][cluster][0] += 1 feat_dict[bundle_id][cluster][1] += answer else: feat_dict[bundle_id] = [[1, answer], [0, 0], [0, 0], [0, 0], [1, answer]] return feat_dict def question_bundle_accuracy_get(df_pd, feat_dict): current_list = np.zeros(( len(df_pd), 2), dtype = np.float32) l = ['content_type_id', 'bundle_id', 'first_bundle_id_cluster'] for cnt,(content_type_id, bundle_id, cluster)in enumerate(df_pd[l].values): if content_type_id == 0: if bundle_id in feat_dict: current_list[cnt, 0] = feat_dict[bundle_id][0][1] /(feat_dict[bundle_id][0][0] + 0.000001) current_list[cnt, 1] = feat_dict[bundle_id][cluster][1] /(feat_dict[bundle_id][cluster][0] + 0.000001) else: current_list[cnt, 0] = 0.67 current_list[cnt, 1] = 0.67 else: current_list[cnt, 0] = 0 current_list[cnt, 1] = 0 df_pd['bundle_id_all_accuracy'] = current_list[:,0].astype(np.float32) df_pd['bundle_id_cluster_accuracy'] = current_list[:,1].astype(np.float32) return df_pd<define_variables>
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=3, min_lr=0.00001 )
Digit Recognizer
594,887
question_part_map = { 'part_-100_count' : 0, 'part_-100_count_correct' : 1, 'part_-100_accuracy' : 2, 'part_1_count' : 3, 'part_1_count_correct' : 4, 'part_1_accuracy' : 5, 'part_2_count' : 6, 'part_2_count_correct' : 7, 'part_2_accuracy' : 8, 'part_3_count' : 9, 'part_3_count_correct' : 10, 'part_3_accuracy' : 11, 'part_4_count' : 12, 'part_4_count_correct' : 13, 'part_4_accuracy' : 14, 'part_5_count' : 15, 'part_5_count_correct' : 16, 'part_5_accuracy' : 17, 'part_6_count' : 18, 'part_6_count_correct' : 19, 'part_6_accuracy' : 20, 'part_7_count' : 21, 'part_7_count_correct' : 22, 'part_7_accuracy' : 23 } question_part_def_accuracy = { 1 : 0.745, 2 : 0.709, 3 : 0.701, 4 : 0.631, 5 : 0.610, 6 : 0.669, 7 : 0.660, -100 : 0.6, } def dict_user_question_part_accuracy_get(df_pd, feat_dict): current_list = np.zeros(len(df_pd)) for idx,(user, part_q, content_type_id)in enumerate(df_pd[['user_id', 'part_q', 'content_type_id']].values): part_q = int(part_q) if content_type_id == 0: map_accuracy = question_part_map['part_' + str(part_q)+ '_accuracy'] if user in feat_dict: current_list[idx] = feat_dict[user][map_accuracy] else: current_list[idx] = question_part_def_accuracy[part_q] else: current_list[idx] = 0 return current_list def dict_user_question_part_accuracy_update(df_pd, feat_dict, trust_border=10): for _,(user, part_q, content_type_id, ans_corr)in enumerate(df_pd[['user_id', 'part_q', 'content_type_id', 'answered_correctly']].values): part_q = int(part_q) if content_type_id == 0: map_cnt = question_part_map['part_' + str(part_q)+ '_count'] map_cnt_correct = question_part_map['part_' + str(part_q)+ '_count_correct'] map_accuracy = question_part_map['part_' + str(part_q)+ '_accuracy'] if user in feat_dict: feat_dict[user][map_cnt] += 1 feat_dict[user][map_cnt_correct] += ans_corr feat_dict[user][map_accuracy] = feat_dict[user][map_cnt_correct] / feat_dict[user][map_cnt] if feat_dict[user][map_cnt] < trust_border: feat_dict[user][map_accuracy] =(( feat_dict[user][map_accuracy] * feat_dict[user][map_cnt] + question_part_def_accuracy[part_q] *(trust_border - feat_dict[user][map_cnt])) / trust_border) else: feat_dict[user] = [0] * len(question_part_map) for i in range(1, 7): feat_dict[user][question_part_map['part_' + str(i)+ '_accuracy']] = question_part_def_accuracy[i] feat_dict[user][map_cnt] += 1 feat_dict[user][map_cnt_correct] += ans_corr feat_dict[user][map_accuracy] = feat_dict[user][map_cnt_correct] / feat_dict[user][map_cnt] feat_dict[user][map_accuracy] =(( feat_dict[user][map_accuracy] * feat_dict[user][map_cnt] + question_part_def_accuracy[part_q] *(trust_border - feat_dict[user][map_cnt])) / trust_border) return feat_dict<load_pretrained>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1 ) datagen.fit(split_train_x )
Digit Recognizer
594,887
def load_obj(name): with open('.. /input/riiid-numpy-df-3/' + name + '.pkl', 'rb')as f: return pickle.load(f )<load_pretrained>
model.fit_generator(datagen.flow(split_train_x,split_train_y, batch_size=64), epochs = 30, validation_data =(split_val_x,split_val_y), verbose = 2, steps_per_epoch=700 , callbacks=[reduce_lr] )
Digit Recognizer
594,887
cat_model = CatBoostClassifier() cat_model.load_model('.. /input/riiid-lgb-v1/cat_arvis_v4.cbm') lgb_model = lgb.Booster(model_file='.. /input/riiid-lgb-v1/model_lgb_7946_v8_full_data_arvis.txt' )<load_pretrained>
prediction = model.predict_classes(arr_test_x_28x28, verbose=0) data_to_submit = pd.DataFrame({"ImageId": list(range(1,len(prediction)+1)) , "Label": prediction}) data_to_submit.to_csv("result.csv", header=True, index = False )
Digit Recognizer
594,887
dict_lectures = load_obj('dict_lectures') dict_questions = load_obj('dict_questions') dict_question_user_cnt = load_obj('dict_question_user_cnt') dict_correct_answers_user_cnt = load_obj('dict_correct_answers_user_cnt') dict_question_explonation_user_cnt = load_obj('dict_question_explonation_user_cnt') dict_questionid_part_tag12_avgtarget = load_obj('dict_questionid_part_tag12_avgtarget_5') dict_user_question_attempt_cnt = load_obj('dict_user_question_attempt_cnt') dict_user_lectures_part = load_obj('dict_user_lectures_part') dict_user_question_part_accuracy = load_obj('dict_user_question_part_accuracy') dict_user_l_q_tag_equal = load_obj('dict_user_l_q_tag_equal') dict_user_slice_accuracy_5 = load_obj('dict_user_slice_accuracy_5') dict_user_slice_accuracy_20 = load_obj('dict_user_slice_accuracy_20') dict_user_slice_accuracy_50 = load_obj('dict_user_slice_accuracy_50') dict_user_slice_accuracy_session_3 = load_obj('dict_user_slice_accuracy_session_3') dict_user_slice_accuracy_session_12 = load_obj('dict_user_slice_accuracy_session_12') dict_user_slice_accuracy_session_48 = load_obj('dict_user_slice_accuracy_session_48') dict_user_timestampsdelta_3 = load_obj('dict_user_timestampsdelta_3') dict_global_question_tag_accuracy = load_obj('dict_global_question_tag_accuracy') dict_global_question_tag_accuracy[-100] = 0.64 dict_user_question_tag_accuracy = load_obj('dict_user_question_tag_accuracy') dict_user_correct_incorrect_timestamp = load_obj('dict_user_correct_incorrect_timestamp') dict_content_elapsed_time_mean = load_obj('dict_content_elapsed_time_mean') dict_user_slice_question_time_mean_session = load_obj('dict_user_slice_question_time_mean_session') dict_user_priorq_expl_types = load_obj('dict_user_priorq_expl_types') dict_user_lectures_typeof_cnt = load_obj('dict_user_lectures_typeof_cnt') dict_user_answer_mode_10 = load_obj('dict_user_answer_mode_10') dict_user_answer_mode_50 = load_obj('dict_user_answer_mode_50') dict_question_bundle_accuracy = load_obj('dict_question_bundle_accuracy') dict_user_bundle_cluster = load_obj('dict_user_bundle_cluster' )<define_variables>
start_idx = randrange(df_test.shape[0]-10 )
Digit Recognizer
10,242,261
features_map = { 'row_id' : 0, 'timestamp' : 1, 'user_id' : 2, 'content_id' : 3, 'content_type_id' : 4, 'task_container_id' : 5, 'prior_question_elapsed_time' : 6, 'prior_question_had_explanation' : 7, 'prior_group_answers_correct' : 8, 'prior_group_responses' : 9, 'prior_question_1_timedelta_min' : 10, 'prior_lecture_1_timedelta_min' : 11, 'prior_question_2_timedelta_min' : 12, 'prior_lecture_2_timedelta_min' : 13, 'prior_question_3_timedelta_min' : 14, 'prior_lecture_3_timedelta_min' : 15, 'user_slice_accuracy_5' : 16, 'user_slice_accuracy_20' : 17, 'user_slice_accuracy_50' : 18, 'user_slice_accuracy_session_3' : 19, 'user_slice_accuracy_session_12' : 20, 'user_slice_accuracy_session_48' : 21, 'task_container_freq' : 22, 'task_container_counter' : 23, 'user_question_attempt_cnt' : 24, 'lecture_cnt' : 25, 'lecture_concept_cnt' : 26, 'lecture_solving_question_cnt' : 27, 'part_l_q_cnt' : 28, 'tag_l_q_equal_cnt' : 29, 'user_question_part_accuracy' : 30, 'user_question_tag_accuracy' : 31, 'prior_question_incorrect_timedelta_min' : 32, 'prior_question_correct_timedelta_min' : 33, 'user_question_time_mean_n_session' : 34, 'user_question_time_mean_all_session' : 35, 'user_question_time_delta_n_session' : 36, 'user_question_time_delta_all_session' : 37, 'user_prior_correct_expl_prc' : 38, 'user_prior_correct_noexpl_prc' : 39, 'user_prior_wrong_expl_prc' : 40, 'user_prior_wrong_noexpl_prc' : 41, 'user_prior_answer_expl_type' : 42, 'user_answer_mode_10' : 43, 'user_answer_mode_50' : 44, 'first_bundle_id_cluster' : 45, 'bundle_id_all_accuracy' : 46, 'bundle_id_cluster_accuracy' : 47, 'question_user_cnt' : 48, 'correct_answers_user_cnt' : 49, 'correct_answers_user_prc' : 50, 'prior_question_had_explanation_user_cnt' : 51, 'prior_question_had_explanation_user_prc' : 52, 'content_id_mean' : 53, 'part' : 54, 'part_mean' : 55, 'tag_1_mean' : 56, 'tag_2_mean' : 57, 'user_to_question_accuracy' : 58, 'hmean_user_content_accuracy' : 59 } dict_questionid_part_tag12_avgtarget_map = { 'content_id_cnt' : 0, 'content_correct_cnt' : 1, 'answered_correctly_avg_content_smooth' : 2, 'part' : 3, 'answered_correctly_avg_part' : 4, 'answered_correctly_avg_tag_1' : 5, 'answered_correctly_avg_tag_2' : 6, 'answered_correctly_avg_part_tag_1' : 7, 'answered_correctly_avg_part_tag_2' : 8, 'tags_encoded' : 9 } train_cols_clf = [features_map['prior_question_1_timedelta_min'], features_map['prior_question_2_timedelta_min'], features_map['prior_question_3_timedelta_min'], features_map['prior_lecture_1_timedelta_min'], features_map['prior_lecture_2_timedelta_min'], features_map['prior_lecture_3_timedelta_min'], features_map['task_container_freq'], features_map['task_container_counter'], features_map['user_question_attempt_cnt'], features_map['prior_question_elapsed_time'], features_map['prior_question_had_explanation'], features_map['question_user_cnt'], features_map['correct_answers_user_cnt'], features_map['correct_answers_user_prc'], features_map['prior_question_had_explanation_user_cnt'], features_map['prior_question_had_explanation_user_prc'], features_map['user_slice_accuracy_5'], features_map['user_slice_accuracy_20'], features_map['user_slice_accuracy_50'], features_map['user_slice_accuracy_session_3'], features_map['user_slice_accuracy_session_12'], features_map['user_slice_accuracy_session_48'], features_map['lecture_cnt'], features_map['lecture_concept_cnt'], features_map['lecture_solving_question_cnt'], features_map['part_l_q_cnt'], features_map['tag_l_q_equal_cnt'], features_map['user_question_part_accuracy'], features_map['user_question_tag_accuracy'], features_map['prior_question_incorrect_timedelta_min'], features_map['prior_question_correct_timedelta_min'], features_map['user_question_time_mean_n_session'], features_map['user_question_time_mean_all_session'], features_map['user_question_time_delta_n_session'], features_map['user_question_time_delta_all_session'], features_map['user_prior_correct_expl_prc'], features_map['user_prior_correct_noexpl_prc'], features_map['user_prior_wrong_expl_prc'], features_map['user_prior_wrong_noexpl_prc'], features_map['user_prior_answer_expl_type'], features_map['user_answer_mode_10'], features_map['user_answer_mode_50'], features_map['first_bundle_id_cluster'], features_map['bundle_id_all_accuracy'], features_map['bundle_id_cluster_accuracy'], features_map['content_id_mean'], features_map['part'], features_map['part_mean'], features_map['tag_1_mean'], features_map['tag_2_mean'], features_map['user_to_question_accuracy'], features_map['hmean_user_content_accuracy'], ] train_cols_lgb = [features_map['prior_question_1_timedelta_min'], features_map['prior_question_2_timedelta_min'], features_map['prior_question_3_timedelta_min'], features_map['prior_lecture_1_timedelta_min'], features_map['prior_lecture_2_timedelta_min'], features_map['prior_lecture_3_timedelta_min'], features_map['task_container_freq'], features_map['task_container_counter'], features_map['user_question_attempt_cnt'], features_map['prior_question_elapsed_time'], features_map['prior_question_had_explanation'], features_map['question_user_cnt'], features_map['correct_answers_user_cnt'], features_map['correct_answers_user_prc'], features_map['prior_question_had_explanation_user_cnt'], features_map['prior_question_had_explanation_user_prc'], features_map['user_slice_accuracy_5'], features_map['user_slice_accuracy_20'], features_map['user_slice_accuracy_50'], features_map['user_slice_accuracy_session_3'], features_map['user_slice_accuracy_session_12'], features_map['user_slice_accuracy_session_48'], features_map['lecture_cnt'], features_map['lecture_concept_cnt'], features_map['lecture_solving_question_cnt'], features_map['part_l_q_cnt'], features_map['tag_l_q_equal_cnt'], features_map['user_question_part_accuracy'], features_map['user_question_tag_accuracy'], features_map['prior_question_incorrect_timedelta_min'], features_map['prior_question_correct_timedelta_min'], features_map['user_question_time_mean_n_session'], features_map['user_question_time_mean_all_session'], features_map['user_question_time_delta_n_session'], features_map['user_question_time_delta_all_session'], features_map['user_prior_correct_expl_prc'], features_map['user_prior_correct_noexpl_prc'], features_map['user_prior_wrong_expl_prc'], features_map['user_prior_wrong_noexpl_prc'], features_map['user_prior_answer_expl_type'], features_map['user_answer_mode_10'], features_map['user_answer_mode_50'], features_map['first_bundle_id_cluster'], features_map['bundle_id_all_accuracy'], features_map['bundle_id_cluster_accuracy'], features_map['content_id_mean'], features_map['part'], features_map['part_mean'], features_map['tag_1_mean'], features_map['tag_2_mean'], features_map['user_to_question_accuracy'], features_map['hmean_user_content_accuracy'], ]<define_variables>
df_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') df_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
10,242,261
idx = 86867031<split>
x_train = np.array(df_train.iloc[:,1:]) x_train = np.array([np.reshape(i,(28, 28, 1)) for i in x_train]) y_train = np.array(df_train.iloc[:,0] )
Digit Recognizer
10,242,261
env = riiideducation.make_env() iter_test = env.iter_test()<feature_engineering>
x_train = x_train/255.0 y_train = keras.utils.to_categorical(y_train )
Digit Recognizer
10,242,261
previous_test_df = pd.DataFrame() prev_test_df = None for(test_df, sample_prediction_df)in iter_test: test_df_saint = test_df.copy() if(prev_test_df is not None)&(psutil.virtual_memory().percent < 90): prev_test_df['answered_correctly'] = eval(test_df['prior_group_answers_correct'].iloc[0]) prev_test_df = prev_test_df[prev_test_df.content_type_id == False] prev_test_df = feature_time_lag(prev_test_df, time_dict) prev_group = prev_test_df[['user_id', 'content_id', 'answered_correctly', 'part', 'prior_question_elapsed_time', 'time_lag', 'prior_question_had_explanation']].groupby('user_id' ).apply(lambda r:( r['content_id'].values, r['answered_correctly'].values, r['part'].values, r['prior_question_elapsed_time'].values, r['time_lag'].values, r['prior_question_had_explanation'].values)) for prev_user_id in prev_group.index: if prev_user_id in group.index: group[prev_user_id] =( np.append(group[prev_user_id][0], prev_group[prev_user_id][0])[-MAX_SEQ:], np.append(group[prev_user_id][1], prev_group[prev_user_id][1])[-MAX_SEQ:], np.append(group[prev_user_id][2], prev_group[prev_user_id][2])[-MAX_SEQ:], np.append(group[prev_user_id][3], prev_group[prev_user_id][3])[-MAX_SEQ:], np.append(group[prev_user_id][4], prev_group[prev_user_id][4])[-MAX_SEQ:], np.append(group[prev_user_id][5], prev_group[prev_user_id][5])[-MAX_SEQ:] ) else: group[prev_user_id] =( prev_group[prev_user_id][0], prev_group[prev_user_id][1], prev_group[prev_user_id][2], prev_group[prev_user_id][3], prev_group[prev_user_id][4], prev_group[prev_user_id][5] ) test_df_saint.prior_question_elapsed_time = test_df_saint.prior_question_elapsed_time.fillna(0) test_df_saint['prior_question_had_explanation'] = test_df_saint['prior_question_had_explanation'].fillna(value = False ).astype(int) test_df_saint = test_df_saint.merge(questions_df[["question_id","part"]], how = "left",left_on = 'content_id', right_on = 'question_id') prev_test_df = test_df_saint.copy() test_df_saint = test_df_saint[test_df_saint.content_type_id == False] test_df_saint = feature_time_lag(test_df_saint, time_dict) test_dataset = TestDataset(group, test_df_saint, n_skill) test_dataloader = DataLoader(test_dataset, batch_size=51200, shuffle=False) outs1 = [] outs2 = [] outs3 = [] for item in test_dataloader: exercise = item[0].to(device ).long() part = item[1].to(device ).long() response = item[2].to(device ).long() elapsed_time = item[3].to(device ).long() lag_time = item[4].to(device ).long() pri_exp = item[5].to(device ).long() with torch.no_grad() : output1 = model1(exercise, part, response, elapsed_time, lag_time, pri_exp) output2 = model2(exercise, part, response, elapsed_time, lag_time, pri_exp) output3 = model3(exercise, part, response, elapsed_time, lag_time, pri_exp) outs1.extend(torch.sigmoid(output1)[:, -1].view(-1 ).data.cpu().numpy()) outs2.extend(torch.sigmoid(output2)[:, -1].view(-1 ).data.cpu().numpy()) outs3.extend(torch.sigmoid(output3)[:, -1].view(-1 ).data.cpu().numpy()) if previous_test_df.shape[0] != 0: previous_test_df['answered_correctly'] = eval(test_df['prior_group_answers_correct'].iloc[0]) previous_test_df['user_answer'] = eval(test_df['prior_group_responses'].iloc[0]) previous_test_df = previous_test_df[previous_test_df['content_type_id'] == 0] previous_test_df['q_counter'] = 1 update_dict(previous_test_df, dict_question_user_cnt, 'user_id', 'q_counter') update_dict(previous_test_df, dict_correct_answers_user_cnt, 'user_id', 'answered_correctly') previous_test_df['prior_question_had_explanation'] = previous_test_df['prior_question_had_explanation'].fillna(0 ).astype(int) update_dict(previous_test_df, dict_question_explonation_user_cnt, 'user_id', 'prior_question_had_explanation') update_dict(previous_test_df, dict_questionid_part_tag12_avgtarget, 'content_id', 'q_counter', 0) update_dict(previous_test_df, dict_questionid_part_tag12_avgtarget, 'content_id', 'answered_correctly', 1) answered_correctly_avg_content_global = 0.5 trust_border = 5 for c in previous_test_df['content_id'].unique() : answered_correctly_avg_content = dict_questionid_part_tag12_avgtarget[c][1] / dict_questionid_part_tag12_avgtarget[c][0] if dict_questionid_part_tag12_avgtarget[c][0] >= trust_border: dict_questionid_part_tag12_avgtarget[c][2] = answered_correctly_avg_content else: border_calc_K = np.minimum(dict_questionid_part_tag12_avgtarget[c][0], trust_border) border_calc_L =(trust_border - border_calc_K)/ trust_border dict_questionid_part_tag12_avgtarget[c][2] =( answered_correctly_avg_content * border_calc_K / trust_border + answered_correctly_avg_content_global * border_calc_L ) dict_user_question_part_accuracy = dict_user_question_part_accuracy_update(previous_test_df, dict_user_question_part_accuracy) dict_user_slice_accuracy_5 = user_slice_accuracy_n_update(previous_test_df, dict_user_slice_accuracy_5, border=5) dict_user_slice_accuracy_20 = user_slice_accuracy_n_update(previous_test_df, dict_user_slice_accuracy_20, border=20) dict_user_slice_accuracy_50 = user_slice_accuracy_n_update(previous_test_df, dict_user_slice_accuracy_50, border=50) dict_user_slice_accuracy_session_3 = user_slice_accuracy_session_update(previous_test_df, dict_user_slice_accuracy_session_3, session_max_time=3) dict_user_slice_accuracy_session_12 = user_slice_accuracy_session_update(previous_test_df, dict_user_slice_accuracy_session_12, session_max_time=12) dict_user_slice_accuracy_session_48 = user_slice_accuracy_session_update(previous_test_df, dict_user_slice_accuracy_session_48, session_max_time=48) dict_user_correct_incorrect_timestamp = user_correct_incorrect_timestamp_update(previous_test_df, dict_user_correct_incorrect_timestamp) dict_user_question_tag_accuracy = user_question_tag_accuracy_update(previous_test_df, dict_user_question_tag_accuracy) dict_user_priorq_expl_types = user_priorq_expl_types_update(previous_test_df, dict_user_priorq_expl_types) dict_user_answer_mode_10 = user_answer_mode_n_update(previous_test_df, dict_user_answer_mode_10, border=10) dict_user_answer_mode_50 = user_answer_mode_n_update(previous_test_df, dict_user_answer_mode_50, border=50) dict_question_bundle_accuracy = question_bundle_accuracy_update(previous_test_df, dict_question_bundle_accuracy) update_user_feats(previous_test_df) for user_id, answered_correctly, t in zip(previous_test_df['user_id'].values, previous_test_df['answered_correctly'].values, previous_test_df['timestamp'].values): if user_id in lt_correct_dict['timestamp']: if t == lt_correct_dict['timestamp'][user_id]: lt_correct_dict['last_timestamp_correct_cnt'][user_id] += 1 lt_correct_dict['last_timestamp_correct_sum'][user_id] += answered_correctly else: lt_correct_dict['timestamp'].update({user_id:t}) lt_correct_dict['last_timestamp_correct_cnt'][user_id] = 1 lt_correct_dict['last_timestamp_correct_sum'][user_id] = answered_correctly else: lt_correct_dict['timestamp'].update({user_id:t}) lt_correct_dict['last_timestamp_correct_cnt'].update({user_id:1}) lt_correct_dict['last_timestamp_correct_sum'].update({user_id:answered_correctly}) lt_correct_dict['last_timestamp_correct_pct'][user_id] = lt_correct_dict['last_timestamp_correct_sum'][user_id] / lt_correct_dict['last_timestamp_correct_cnt'][user_id] previous_test_df = test_df.copy() test_df2 = test_df.copy() prior_question_elapsed_time_mean = 25452.541 test_df2 = test_df[test_df['content_type_id'] == 0].reset_index(drop=True) lt_correct_cnt = np.zeros(len(test_df2), dtype=np.int8) lt_correct_sum = np.zeros(len(test_df2), dtype=np.int8) lt_correct_pct = np.zeros(len(test_df2), dtype=np.float16) test_df2 = add_user_feats_without_update(test_df2) test_df2 = add_uq_feats_and_update(test_df2) test_df2 = lagtime_for_test(test_df2) for i,(user_id, t)in enumerate(zip(test_df2['user_id'].values, test_df2['timestamp'].values)) : if user_id in lt_correct_dict['timestamp']: lt_correct_cnt[i] = lt_correct_dict['last_timestamp_correct_cnt'][user_id] lt_correct_sum[i] = lt_correct_dict['last_timestamp_correct_sum'][user_id] lt_correct_pct[i] = lt_correct_dict['last_timestamp_correct_pct'][user_id] else: lt_correct_cnt[i] = -1 lt_correct_sum[i] = -1 lt_correct_pct[i] = -1 test_df2['last_timestamp_correct_cnt'] = lt_correct_cnt test_df2['last_timestamp_correct_sum'] = lt_correct_sum test_df2['last_timestamp_correct_pct'] = lt_correct_pct additional_test_feature = test_df2[['last_timestamp_correct_cnt', 'last_timestamp_correct_sum', 'last_timestamp_correct_pct', 'lag_time', 'lag_time2', 'lag_time3', 'curr_user_time_diff', 'curr_user_time_diff_mean', 'curr_user_elapsed_time_diff', 'curr_uq_time_diff']].values test_df['prior_question_elapsed_time'] = test_df['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean) test_df['prior_group_answers_correct'] = 0 test_df['prior_group_responses'] = 0 test_df = test_df.replace([np.inf, -np.inf], np.nan) test_df = test_df.fillna(0) test_df = dict_user_timestampsdelta_get_update_3(test_df, dict_user_timestampsdelta_3) test_df['user_slice_accuracy_5'] = user_slice_accuracy_n_get(test_df, dict_user_slice_accuracy_5) test_df['user_slice_accuracy_20'] = user_slice_accuracy_n_get(test_df, dict_user_slice_accuracy_20) test_df['user_slice_accuracy_50'] = user_slice_accuracy_n_get(test_df, dict_user_slice_accuracy_50) test_df['user_slice_accuracy_session_3'] = user_slice_accuracy_session_get(test_df, dict_user_slice_accuracy_session_3, session_max_time=3) test_df['user_slice_accuracy_session_12'] = user_slice_accuracy_session_get(test_df, dict_user_slice_accuracy_session_12, session_max_time=12) test_df['user_slice_accuracy_session_48'] = user_slice_accuracy_session_get(test_df, dict_user_slice_accuracy_session_48, session_max_time=48) test_df['task_container_freq'] = test_df.groupby(['user_id', 'task_container_id'])['task_container_id'].transform('count') test_df['task_container_counter'] = test_df[['user_id', 'task_container_id', 'content_id']].groupby(['user_id', 'task_container_id'], as_index=False ).agg(['cumcount'])+ 1 test_df['user_question_attempt_cnt'] = user_question_attempt_cnt_get_update(test_df, dict_user_question_attempt_cnt ).astype(np.int16) test_df['lecture_cnt'] = user_lecture_cnt(test_df, dict_user_lectures_part) test_df = user_lectures_typeof_cnt(test_df, dict_user_lectures_typeof_cnt) test_df['part_q'] = get_q_l(test_df, dict_questions, 'part') test_df['tags_list'] = get_q_l(test_df, dict_questions, 'tags_list') test_df['part_l'] = get_q_l(test_df, dict_lectures, 'part') test_df['tag_l'] = get_q_l(test_df, dict_lectures, 'tag') test_df['part_l_q_cnt'] = user_lectures_part(test_df, dict_user_lectures_part ).astype(np.int16) test_df['tag_l_q_equal_cnt'] = user_l_q_tag_equal(test_df, dict_user_l_q_tag_equal ).astype(np.int16) test_df['user_question_part_accuracy'] = dict_user_question_part_accuracy_get(test_df, dict_user_question_part_accuracy) test_df['user_question_tag_accuracy'] = user_question_tag_accuracy_get(test_df, dict_user_question_tag_accuracy) test_df = user_correct_incorrect_timestamp_get(test_df, dict_user_correct_incorrect_timestamp) test_df = user_slice_question_time_mean_session(test_df, dict_user_slice_question_time_mean_session) test_df = user_priorq_expl_types_get(test_df, dict_user_priorq_expl_types) test_df['user_answer_mode_10'] = user_answer_mode_n_get(test_df, dict_user_answer_mode_10, border=10) test_df['user_answer_mode_50'] = user_answer_mode_n_get(test_df, dict_user_answer_mode_50, border=50) test_df = question_bundle_id_get(test_df, dict_questions) test_df = user_bundle_cluster_get_update(test_df, dict_user_bundle_cluster) test_df = question_bundle_accuracy_get(test_df, dict_question_bundle_accuracy) previous_test_df['part_q'] = test_df['part_q'].values previous_test_df['tags_list'] = test_df['tags_list'].values previous_test_df['task_container_freq'] = test_df['task_container_freq'].values previous_test_df['bundle_id'] = test_df['bundle_id'].values previous_test_df['first_bundle_id_cluster'] = test_df['first_bundle_id_cluster'].values test_df.drop(columns=['part_l', 'part_q', 'tag_l', 'tags_list', 'bundle_id'], inplace=True) test_df = test_df[test_df['content_type_id'] == 0] if test_df.shape[0] > 0: np_test_df = test_df.to_numpy(dtype=np.float64, na_value=0) col_idx = features_map['user_id'] np_test_df = add_feats_from_dict(np_test_df, dict_question_user_cnt, col_idx) np_test_df = add_feats_from_dict(np_test_df, dict_correct_answers_user_cnt, col_idx) col_numerator = features_map['correct_answers_user_cnt'] col_denominator = features_map['question_user_cnt'] np_test_df = np.c_[ np_test_df, np.divide(np_test_df[:,col_numerator], np_test_df[:,col_denominator], out=np.zeros_like(np_test_df[:,col_denominator]), where=np_test_df[:,col_denominator]!=0)] np_test_df = add_feats_from_dict(np_test_df, dict_question_explonation_user_cnt, col_idx) col_numerator = features_map['prior_question_had_explanation_user_cnt'] col_denominator = features_map['question_user_cnt'] np_test_df = np.c_[ np_test_df, np.divide(np_test_df[:,col_numerator], np_test_df[:,col_denominator], out=np.zeros_like(np_test_df[:,col_denominator]), where=np_test_df[:,col_denominator]!=0)] for i in [dict_questionid_part_tag12_avgtarget_map['answered_correctly_avg_content_smooth'], dict_questionid_part_tag12_avgtarget_map['part'], dict_questionid_part_tag12_avgtarget_map['answered_correctly_avg_part'], dict_questionid_part_tag12_avgtarget_map['answered_correctly_avg_tag_1'], dict_questionid_part_tag12_avgtarget_map['answered_correctly_avg_tag_2'], ]: np_test_df = add_feats_from_dict_got_new_user(np_test_df, dict_questionid_part_tag12_avgtarget, col_idx=features_map['content_id'], col_dict=i) col_numerator = features_map['correct_answers_user_prc'] col_denominator = features_map['content_id_mean'] np_test_df = np.c_[ np_test_df, np.divide(np_test_df[:,col_numerator], np_test_df[:,col_denominator], out=np.zeros_like(np_test_df[:,col_denominator]), where=np_test_df[:,col_denominator]!=0)] user_acc = features_map['correct_answers_user_prc'] question_acc = features_map['content_id_mean'] np_test_df = np.c_[ np_test_df, 2*(np_test_df[:,user_acc] * np_test_df[:,question_acc])/(np_test_df[:,user_acc] + np_test_df[:,question_acc])] pred_feature = np_test_df[:,train_cols_clf] pred_feature = np.concatenate(( pred_feature, additional_test_feature), axis=1) pred = pd.DataFrame() pred['score_lgbm'] = lgb_model.predict(pred_feature) pred['score_cat'] = cat_model.predict_proba(pred_feature)[:,1] pred['score_saint1'] = np.array(outs1) pred['score_saint2'] = np.array(outs2) pred['score_saint3'] = np.array(outs3) test_df['answered_correctly'] =(c1*pred['score_lgbm'] + c1_2 * pred['score_cat'] + c2*pred['score_saint1'] + c3*pred['score_saint2']+ c4*pred['score_saint3'] ).values else: test_df['answered_correctly'] = np.array([], dtype = np.float32) env.predict(test_df[['row_id', 'answered_correctly']] )<import_modules>
x_test = np.array(df_test) x_test = np.array([np.reshape(i,(28, 28, 1)) for i in x_test]) x_test = x_test/255.0
Digit Recognizer
10,242,261
import torch import pandas as pd from saintmodel import SaintModel, SaintLightningModule, SaintHistory from saintsubmit import load_saint_config, SaintPredictor, make_submission<choose_model_class>
X_train, X_test, Y_train, Y_test = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train )
Digit Recognizer
10,242,261
args = load_saint_config() model = SaintModel( seq_len=args.seq_len, n_dim=args.n_dim, std=args.std, dropout=args.dropout, nhead=args.nhead, n_layers=args.n_layers ) module = SaintLightningModule(args, model )<load_pretrained>
model = keras.models.Sequential() model.add(keras.layers.Conv2D(filters=32, kernel_size=(3,3), kernel_initializer='random_uniform', padding='same', activation='relu', input_shape=(X_train.shape[1:]))) model.add(keras.layers.Conv2D(filters=32, kernel_size=(3,3), kernel_initializer='random_uniform', padding='same', activation='relu')) model.add(keras.layers.MaxPool2D(pool_size=(2,2))) model.add(keras.layers.Conv2D(filters=64, kernel_size=(5,5), kernel_initializer='random_uniform', padding='same', activation='relu')) model.add(keras.layers.Conv2D(filters=64, kernel_size=(5,5), kernel_initializer='random_uniform', padding='same', activation='relu')) model.add(keras.layers.MaxPool2D(pool_size=(2,2))) model.add(keras.layers.Conv2D(filters=128, kernel_size=(7,7), kernel_initializer='random_uniform', padding='same', activation='relu')) model.add(keras.layers.Conv2D(filters=128, kernel_size=(7,7), kernel_initializer='random_uniform', padding='same', activation='relu')) model.add(keras.layers.MaxPool2D(pool_size=(3,3))) model.add(keras.layers.Conv2D(filters=256, kernel_size=(7,7), padding='same')) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(units=256, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(units=100, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(units=y_train.shape[1], activation='softmax')) print(model.summary()) model.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['accuracy'] )
Digit Recognizer
10,242,261
%%time module.load_state_dict(torch.load('/kaggle/input/riiid-saintp-solution/saint.ckpt')['state_dict'] )<load_pretrained>
es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=5) filepath = "model.h5" ckpt = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') rlp = ReduceLROnPlateau(monitor='loss', patience=2, factor=0.2 )
Digit Recognizer
10,242,261
%%time last_history = pd.read_pickle('/kaggle/input/riiid-saintp-solution/last_history.pickle') last_timestamp = pd.read_pickle('/kaggle/input/riiid-saintp-solution/last_timestamp.pickle') last_user_count = pd.read_pickle('/kaggle/input/riiid-saintp-solution/last_user_count.pickle') dict_lag = pd.read_pickle('/kaggle/input/riiid-saintp-solution/dict_lag.pickle') dict_elapsed = pd.read_pickle('/kaggle/input/riiid-saintp-solution/dict_elapsed.pickle') dict_user_count = pd.read_pickle('/kaggle/input/riiid-saintp-solution/dict_user_count.pickle' )<set_options>
history = model.fit(X_train, Y_train, batch_size=500, callbacks=[es, ckpt, rlp], epochs=100, validation_data=(X_test,Y_test))
Digit Recognizer
10,242,261
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') module.to(device )<choose_model_class>
id_img = [] label = [] for i in range(len(x_test)) : id_img.append(i+1) label.append(np.argmax(model.predict(x_test[i:i+1]))) img_id = np.array(id_img) label = np.array(label )
Digit Recognizer
10,242,261
<set_options><EOS>
op_df = pd.DataFrame() op_df['ImageId'] = img_id op_df['Label'] = label op_df.to_csv("submission.csv", index=False )
Digit Recognizer
9,870,532
<compute_test_metric><EOS>
decay=1e-4 xtrain = [] ytrain = [] xtest = [] xval = [] yval = [] for dirname, _, filenames in os.walk('/kaggle/input/digit-recognizer/'): for filename in filenames: print(os.path.join(dirname, filename)) train =pd.read_csv(os.path.join(dirname,'train.csv')) test =pd.read_csv(os.path.join(dirname,'test.csv')) sample_submission =pd.read_csv(os.path.join(dirname,'sample_submission.csv')) val = train[35000:] train = train[:35000] xtrain=train.drop('label',axis=1) ytrain=train.label xtest = test xval=val.drop('label',axis=1) yval=val.label xtrain=xtrain.values xtest=xtest.values xval=xval.values ytrain=ytrain.values yval=yval.values xtrain=xtrain.reshape(-1,28,28,1 ).astype('float32') xtest=xtest.reshape(-1,28,28,1 ).astype('float32') xval=xval.reshape(-1,28,28,1 ).astype('float32') xtrain=(xtrain-np.mean(xtrain)) /255 xtest=(xtest-np.mean(xtest)) /255 xval=(xval-np.mean(xval)) /255 model=Sequential() model.add(layers.Conv2D(64,(3,3), padding='same', input_shape=(28, 28, 1))) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.Conv2D(64,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.Conv2D(64,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.MaxPooling2D(2, 2)) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(128,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.Conv2D(128,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.Conv2D(128,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.MaxPooling2D(2,2)) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(256,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.Conv2D(256,(3,3), padding='same')) model.add(layers.BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform")) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.MaxPooling2D(2,2)) model.add(layers.Dropout(0.2)) model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.BatchNormalization()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.1)) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax')) adam_opt = Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, decay=0.0, amsgrad=False) optimizer = RMSprop(learning_rate=0.002,rho=0.9) model.compile(optimizer=optimizer,loss='sparse_categorical_crossentropy',metrics=['accuracy']) datagen = ImageDataGenerator( rotation_range=12, zoom_range=0.35, width_shift_range=0.3, height_shift_range=0.3, ) datagen.fit(xtrain) learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau( monitor='loss', factor=0.2, patience=2, verbose=1, mode="min", min_delta=0.0001, cooldown=0, min_lr=0.00001 ) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=300, restore_best_weights=True) model.fit_generator(datagen.flow(xtrain, ytrain, batch_size=512), steps_per_epoch=len(xtrain)//512, epochs=42, validation_data=(np.array(xval),np.array(yval)) , validation_steps=50, callbacks=[learning_rate_reduction, es]) model.summary() score=model.evaluate(np.array(xval),np.array(yval),batch_size=512) results = model.predict_classes(xtest) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False) print("accuracy",score[1] )
Digit Recognizer
9,011,532
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<install_modules>
import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt
Digit Recognizer
9,011,532
!pip install.. /input/python-datatable/datatable-0.11.0-cp37-cp37m-manylinux2010_x86_64.whl > /dev/null 2>&1<import_modules>
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") df.shape
Digit Recognizer
9,011,532
import numpy as np import random import pandas as pd import joblib<set_options>
train_data = train_data.to_numpy() train_labels = train_labels.to_numpy()
Digit Recognizer
9,011,532
_ = np.seterr(divide='ignore', invalid='ignore' )<define_variables>
train_data = train_data / 255
Digit Recognizer
9,011,532
data_types_dict = { 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id':'int8', 'task_container_id': 'int16', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'bool' } target = 'answered_correctly'<load_from_csv>
filters = 64 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=filters, kernel_size=(5,5), strides=2, padding='same', activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(5,5), strides=2, padding='same', activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Conv2D(filters=4*filters, kernel_size=(4,4), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'] )
Digit Recognizer
9,011,532
print('start read train data...') train_df = dt.fread('.. /input/riiid-test-answer-prediction/train.csv', columns=set(data_types_dict.keys())).to_pandas()<train_model>
history = model.fit(train_data, train_labels, epochs=40, batch_size=32, verbose=0 )
Digit Recognizer
9,011,532
print('start handle lecture data...' )<load_from_csv>
df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") df_test.shape
Digit Recognizer
9,011,532
lectures_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/lectures.csv' )<categorify>
test_data = df_test.to_numpy() test_data = test_data / 255 test_data = test_data.reshape(( test_data.shape[0], 28, 28, 1)) test_data.shape
Digit Recognizer
9,011,532
lectures_df['type_of'] = lectures_df['type_of'].replace('solving question', 'solving_question') lectures_df = pd.get_dummies(lectures_df, columns=['part', 'type_of']) part_lectures_columns = [column for column in lectures_df.columns if column.startswith('part')] types_of_lectures_columns = [column for column in lectures_df.columns if column.startswith('type_of_')]<merge>
predictions = model.predict(test_data) predictions = np.asarray([np.argmax(prediction)for prediction in predictions]) predictions.shape
Digit Recognizer
9,011,532
train_lectures = train_df[train_df.content_type_id == True].merge(lectures_df, left_on='content_id', right_on='lecture_id', how='left' )<groupby>
df_predictions = pd.DataFrame(predictions ).rename(columns={0: "Label"}) df_predictions.index.names = ['ImageId'] df_predictions.index += 1 df_predictions.head()
Digit Recognizer
9,011,532
user_lecture_stats_part = train_lectures.groupby('user_id',as_index = False)[part_lectures_columns + types_of_lectures_columns].sum()<data_type_conversions>
df_predictions.shape df_predictions.to_csv("predictions.csv" )
Digit Recognizer
9,011,532
lecturedata_types_dict = { 'user_id': 'int32', 'part_1': 'int8', 'part_2': 'int8', 'part_3': 'int8', 'part_4': 'int8', 'part_5': 'int8', 'part_6': 'int8', 'part_7': 'int8', 'type_of_concept': 'int8', 'type_of_intention': 'int8', 'type_of_solving_question': 'int8', 'type_of_starter': 'int8' } user_lecture_stats_part = user_lecture_stats_part.astype(lecturedata_types_dict )<data_type_conversions>
import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt
Digit Recognizer
9,011,532
for column in user_lecture_stats_part.columns: if(column !='user_id'): user_lecture_stats_part[column] =(user_lecture_stats_part[column] > 0 ).astype('int8' )<drop_column>
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") df.shape
Digit Recognizer
9,011,532
del(train_lectures) gc.collect()<categorify>
train_data = train_data.to_numpy() train_labels = train_labels.to_numpy()
Digit Recognizer
9,011,532
user_lecture_agg = train_df.groupby('user_id')['content_type_id'].agg(['sum', 'count']) user_lecture_agg=user_lecture_agg.astype('int16' )<data_type_conversions>
train_data = train_data / 255
Digit Recognizer
9,011,532
cum = train_df.groupby('user_id')['content_type_id'].agg(['cumsum', 'cumcount']) cum['cumcount']=cum['cumcount']+1 train_df['user_interaction_count'] = cum['cumcount'] train_df['user_interaction_timestamp_mean'] = train_df['timestamp']/cum['cumcount'] train_df['user_lecture_sum'] = cum['cumsum'] train_df['user_lecture_lv'] = cum['cumsum'] / cum['cumcount'] train_df.user_lecture_lv=train_df.user_lecture_lv.astype('float16') train_df.user_lecture_sum=train_df.user_lecture_sum.astype('int16') train_df.user_interaction_count=train_df.user_interaction_count.astype('int16') train_df['user_interaction_timestamp_mean']=train_df['user_interaction_timestamp_mean']/(1000*3600) train_df.user_interaction_timestamp_mean=train_df.user_interaction_timestamp_mean.astype('float32') <set_options>
filters = 64 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=filters, kernel_size=(5,5), strides=2, padding='same', activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(3,3), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=2*filters, kernel_size=(5,5), strides=2, padding='same', activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Conv2D(filters=4*filters, kernel_size=(4,4), activation=tf.nn.relu), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'] )
Digit Recognizer
9,011,532
del cum gc.collect()<train_model>
history = model.fit(train_data, train_labels, epochs=40, batch_size=32, verbose=0 )
Digit Recognizer
9,011,532
print('start handle train_df...' )<data_type_conversions>
df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") df_test.shape
Digit Recognizer
9,011,532
train_df['prior_question_had_explanation'].fillna(False, inplace=True) train_df = train_df.astype(data_types_dict) train_df = train_df[train_df[target] != -1].reset_index(drop=True )<groupby>
test_data = df_test.to_numpy() test_data = test_data / 255 test_data = test_data.reshape(( test_data.shape[0], 28, 28, 1)) test_data.shape
Digit Recognizer
9,011,532
content_explation_agg=train_df[["content_id","prior_question_had_explanation",target]].groupby(["content_id","prior_question_had_explanation"])[target].agg(['mean'] )<rename_columns>
predictions = model.predict(test_data) predictions = np.asarray([np.argmax(prediction)for prediction in predictions]) predictions.shape
Digit Recognizer
9,011,532
content_explation_agg=content_explation_agg.unstack() content_explation_agg=content_explation_agg.reset_index() content_explation_agg.columns = ['content_id', 'content_explation_false_mean','content_explation_true_mean']<data_type_conversions>
df_predictions = pd.DataFrame(predictions ).rename(columns={0: "Label"}) df_predictions.index.names = ['ImageId'] df_predictions.index += 1 df_predictions.head()
Digit Recognizer
9,011,532
content_explation_agg.content_id=content_explation_agg.content_id.astype('int16') content_explation_agg.content_explation_false_mean=content_explation_agg.content_explation_false_mean.astype('float16') content_explation_agg.content_explation_true_mean=content_explation_agg.content_explation_true_mean.astype('float16' )<train_model>
df_predictions.shape df_predictions.to_csv("predictions.csv" )
Digit Recognizer
9,999,514
print('start handle attempt_no...' )<data_type_conversions>
train=pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test=pd.read_csv("/kaggle/input/digit-recognizer/test.csv") train.head()
Digit Recognizer
9,999,514
train_df["attempt_no"] = 1 train_df.attempt_no=train_df.attempt_no.astype('int8') attempt_no_agg=train_df.groupby(["user_id","content_id"])["attempt_no"].agg(['sum'] ).astype('int8') train_df["attempt_no"] = train_df[["user_id","content_id",'attempt_no']].groupby(["user_id","content_id"])["attempt_no"].cumsum()<data_type_conversions>
X_train=train.drop("label", axis=1) y_train=train["label"] X_test=test
Digit Recognizer
9,999,514
print('start handle timestamp...') prior_question_elapsed_time_mean=train_df['prior_question_elapsed_time'].mean() train_df['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean, inplace=True )<data_type_conversions>
X_train/=255.0 X_test/=255.0 X_train=X_train.values.reshape(-1,28,28,1) X_test=X_test.values.reshape(-1,28,28,1) y_train=to_categorical(y_train, num_classes=10 )
Digit Recognizer
9,999,514
max_timestamp_u = train_df[['user_id','timestamp']].groupby(['user_id'] ).agg(['max'] ).reset_index() max_timestamp_u.columns = ['user_id', 'max_time_stamp'] max_timestamp_u.user_id=max_timestamp_u.user_id.astype('int32' )<data_type_conversions>
X_train, x_test, Y_train, y_test= train_test_split(X_train,y_train,test_size=0.1,random_state=0 )
Digit Recognizer
9,999,514
train_df['lagtime'] = train_df.groupby('user_id')['timestamp'].shift() max_timestamp_u2 = train_df[['user_id','lagtime']].groupby(['user_id'] ).agg(['max'] ).reset_index() max_timestamp_u2.columns = ['user_id', 'max_time_stamp2'] max_timestamp_u2.user_id=max_timestamp_u2.user_id.astype('int32' )<feature_engineering>
classifier=Sequential() classifier.add(Conv2D(64,3,3, input_shape=(28,28,1), activation='relu')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,3,3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,3,3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Dropout(0.4)) classifier.add(Conv2D(64,3,3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,3,3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,3,3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Dropout(0.4)) classifier.add(Flatten()) classifier.add(Dense(output_dim=256,activation='relu')) classifier.add(Dropout(0.4)) classifier.add(Dense(output_dim=512,activation='relu')) classifier.add(Dropout(0.4)) classifier.add(Dense(output_dim=1024,activation='relu')) classifier.add(Dropout(0.5)) classifier.add(Dense(output_dim=10, activation='softmax'))
Digit Recognizer
9,999,514
train_df['lagtime']=train_df['timestamp']-train_df['lagtime'] lagtime_mean=train_df['lagtime'].mean() train_df['lagtime'].fillna(lagtime_mean, inplace=True )<data_type_conversions>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
9,999,514
train_df['lagtime']=train_df['lagtime']/(1000*3600) train_df.lagtime=train_df.lagtime.astype('float32' )<data_type_conversions>
classifier.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
9,999,514
<data_type_conversions>
datagen = ImageDataGenerator(zoom_range = 0.1, height_shift_range = 0.1, width_shift_range = 0.1, rotation_range = 10 )
Digit Recognizer
9,999,514
train_df['lagtime2'] = train_df.groupby('user_id')['timestamp'].shift(2) max_timestamp_u3 = train_df[['user_id','lagtime2']].groupby(['user_id'] ).agg(['max'] ).reset_index() max_timestamp_u3.columns = ['user_id', 'max_time_stamp3'] max_timestamp_u3.user_id=max_timestamp_u3.user_id.astype('int32') train_df['lagtime2']=train_df['timestamp']-train_df['lagtime2'] lagtime_mean2=train_df['lagtime2'].mean() train_df['lagtime2'].fillna(lagtime_mean2, inplace=True) <data_type_conversions>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
Digit Recognizer
9,999,514
train_df['lagtime2']=train_df['lagtime2']/(1000*3600) train_df.lagtime2=train_df.lagtime2.astype('float32' )<data_type_conversions>
classifier.fit_generator(datagen.flow(X_train, Y_train, batch_size=16), steps_per_epoch=500, epochs=40, verbose=2, validation_data=(x_test[:400,:], y_test[:400,:]), callbacks=[annealer] )
Digit Recognizer
9,999,514
train_df['lagtime3'] = train_df.groupby('user_id')['timestamp'].shift(3) train_df['lagtime3']=train_df['timestamp']-train_df['lagtime3'] lagtime_mean3=train_df['lagtime3'].mean() train_df['lagtime3'].fillna(lagtime_mean3, inplace=True) train_df['lagtime3']=train_df['lagtime3']/(1000*3600) train_df.lagtime3=train_df.lagtime3.astype('float32' )<data_type_conversions>
result=classifier.predict(X_test) result=pd.Series(np.argmax(result, axis=1), name='Label') result
Digit Recognizer
9,999,514
<data_type_conversions>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),result],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
11,220,007
train_df['timestamp']=train_df['timestamp']/(1000*3600) train_df.timestamp=train_df.timestamp.astype('float16' )<feature_engineering>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') x_test = pd.read_csv('.. /input/digit-recognizer/test.csv') train.head()
Digit Recognizer
11,220,007
train_df['delta_prior_question_elapsed_time'] = train_df.groupby('user_id')['prior_question_elapsed_time'].shift() train_df['delta_prior_question_elapsed_time']=train_df['prior_question_elapsed_time']-train_df['delta_prior_question_elapsed_time']<data_type_conversions>
x_train = x_train.to_numpy() x_test = x_test.to_numpy() y_train = y_train.to_numpy()
Digit Recognizer
11,220,007
delta_prior_question_elapsed_time_mean=train_df['delta_prior_question_elapsed_time'].mean() train_df['delta_prior_question_elapsed_time'].fillna(delta_prior_question_elapsed_time_mean, inplace=True) train_df.delta_prior_question_elapsed_time=train_df.delta_prior_question_elapsed_time.astype('int32' )<data_type_conversions>
x_train = x_train.reshape(-1,28,28) x_test = x_test.reshape(-1,28,28) print("(Image)Train Inputs: " , x_train.shape) print("(Image)Test Inputs: " , x_test.shape )
Digit Recognizer
11,220,007
train_df['lag'] = train_df.groupby('user_id')[target].shift() cum = train_df.groupby('user_id')['lag'].agg(['cumsum', 'cumcount']) user_agg = train_df.groupby('user_id')['lag'].agg(['sum', 'count'] ).astype('int16') cum['cumsum'].fillna(0, inplace=True) train_df['user_correctness'] = cum['cumsum'] / cum['cumcount'] train_df['user_correct_count'] = cum['cumsum'] train_df['user_uncorrect_count'] = cum['cumcount']-cum['cumsum'] train_df.drop(columns=['lag'], inplace=True) train_df['user_correctness'].fillna(0.67, inplace=True) train_df.user_correctness=train_df.user_correctness.astype('float16') train_df.user_correct_count=train_df.user_correct_count.astype('int16') train_df.user_uncorrect_count=train_df.user_uncorrect_count.astype('int16') <set_options>
def sharpner(img): img = Image.fromarray(img.astype('uint8')) img = img.filter(ImageFilter.UnsharpMask(radius=2, percent=150)) return np.array(img) for i in range(x_train.shape[0]): x_train[i] = sharpner(x_train[i]) for i in range(x_test.shape[0]): x_test[i] = sharpner(x_test[i]) print(x_train.shape) print(x_test.shape) plt.imshow(x_train[3] )
Digit Recognizer
11,220,007
del cum gc.collect()<data_type_conversions>
def one_hottie(labels,C): One_hot_matrix = tf.one_hot(labels,C) return tf.keras.backend.eval(One_hot_matrix) y_train = one_hottie(y_train, 10) print("Y shape: " + str(y_train.shape))
Digit Recognizer
11,220,007
<data_type_conversions>
model = tf.keras.Sequential([ tf.keras.layers.Conv2D(64, 3, activation='relu', input_shape=(28,28,1),padding="same"), tf.keras.layers.MaxPool2D(strides=2), tf.keras.layers.Conv2D(128, 3, activation='relu',padding="same"), tf.keras.layers.MaxPool2D(strides=2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(256, 3, activation='relu',padding="same"), tf.keras.layers.MaxPool2D(strides=2), tf.keras.layers.Conv2D(256, 3, activation='relu',padding="same"), tf.keras.layers.MaxPool2D(strides=2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(100,kernel_regularizer=tf.keras.regularizers.l2(0.01), activation='relu'), tf.keras.layers.Dense(50,kernel_regularizer=tf.keras.regularizers.l2(0.01), activation='relu'), tf.keras.layers.Dense(10, kernel_regularizer=tf.keras.regularizers.l2(0.01),activation='softmax') ]) model.summary()
Digit Recognizer
11,220,007
train_df.prior_question_had_explanation=train_df.prior_question_had_explanation.astype('int8') explanation_agg = train_df.groupby('user_id')['prior_question_had_explanation'].agg(['sum', 'count']) explanation_agg=explanation_agg.astype('int16') <data_type_conversions>
model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy() , metrics=['accuracy'] )
Digit Recognizer
11,220,007
cum = train_df.groupby('user_id')['prior_question_had_explanation'].agg(['cumsum', 'cumcount']) cum['cumcount']=cum['cumcount']+1 train_df['explanation_mean'] = cum['cumsum'] / cum['cumcount'] train_df['explanation_true_count'] = cum['cumsum'] train_df['explanation_false_count'] = cum['cumcount']-cum['cumsum'] train_df.explanation_mean=train_df.explanation_mean.astype('float16') train_df.explanation_true_count=train_df.explanation_true_count.astype('int16') train_df.explanation_false_count=train_df.explanation_false_count.astype('int16' )<set_options>
datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=45, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, fill_mode='nearest') datagen.fit(x_train) result = model.fit_generator(datagen.flow(x_train, y_train, batch_size=64), epochs=50, workers=4 )
Digit Recognizer
11,220,007
del cum gc.collect()<categorify>
model.compile(optimizer=tf.keras.optimizers.Nadam(learning_rate=0.006), loss=tf.keras.losses.CategoricalCrossentropy() , metrics=['accuracy'])
Digit Recognizer
11,220,007
content_agg = train_df.groupby('content_id')[target].agg(['sum', 'count','var']) task_container_agg = train_df.groupby('task_container_id')[target].agg(['sum', 'count','var']) content_agg=content_agg.astype('float32') task_container_agg=task_container_agg.astype('float32' )<data_type_conversions>
result = model.fit(x=x_train, y=y_train, batch_size=64, epochs=50, verbose=1, shuffle=False, initial_epoch=20, validation_split=0.2 )
Digit Recognizer
11,220,007
train_df['task_container_uncor_count'] = train_df['task_container_id'].map(task_container_agg['count']-task_container_agg['sum'] ).astype('int32') train_df['task_container_cor_count'] = train_df['task_container_id'].map(task_container_agg['sum'] ).astype('int32') train_df['task_container_std'] = train_df['task_container_id'].map(task_container_agg['var'] ).astype('float16') train_df['task_container_correctness'] = train_df['task_container_id'].map(task_container_agg['sum'] / task_container_agg['count']) train_df.task_container_correctness=train_df.task_container_correctness.astype('float16' )<groupby>
model.compile(optimizer=tf.keras.optimizers.Nadam(learning_rate=0.0001), loss=tf.keras.losses.CategoricalCrossentropy() , metrics=['accuracy'] )
Digit Recognizer
11,220,007
content_elapsed_time_agg=train_df.groupby('content_id')['prior_question_elapsed_time'].agg(['mean']) content_had_explanation_agg=train_df.groupby('content_id')['prior_question_had_explanation'].agg(['mean'] )<train_model>
result = model.fit(x=x_train, y=y_train, batch_size=64, epochs=50, verbose=1, shuffle=False, initial_epoch=20, validation_split=0.2 )
Digit Recognizer
11,220,007
print('start questions data...' )<load_from_csv>
model.compile(optimizer=tf.keras.optimizers.Nadam(learning_rate=0.00006), loss=tf.keras.losses.CategoricalCrossentropy() , metrics=['accuracy'] )
Digit Recognizer
11,220,007
questions_df = pd.read_csv( '.. /input/riiid-test-answer-prediction/questions.csv', usecols=[0, 1,3,4], dtype={'question_id': 'int16','bundle_id': 'int16', 'part': 'int8','tags': 'str'} )<groupby>
result = model.fit(x=x_train, y=y_train,batch_size=64, epochs=90, verbose=1, shuffle=False, initial_epoch=50, validation_split=0.2 )
Digit Recognizer
11,220,007
bundle_agg = questions_df.groupby('bundle_id')['question_id'].agg(['count'] )<data_type_conversions>
check = model.evaluate(x_train,y_train )
Digit Recognizer
11,220,007
questions_df['content_sub_bundle'] = questions_df['bundle_id'].map(bundle_agg['count'] ).astype('int8' )<set_options>
preds = model.predict_classes(x_train) preds.shape
Digit Recognizer
11,220,007
questions_df['tags'].fillna('188', inplace=True )<string_transform>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") y_train = train.iloc[:,0] y_train = y_train.to_numpy()
Digit Recognizer
11,220,007
def gettags(tags,num): tags_splits=tags.split(" ") result='' for t in tags_splits: x=int(t) if(x<32*(num+1)and x>=32*num): result=result+' '+t return result<categorify>
preds = model.predict_classes(x_test )
Digit Recognizer
11,220,007
for num in range(0,6): questions_df["tags"+str(num)] = questions_df["tags"].apply(lambda row: gettags(row,num)) le = LabelEncoder() le.fit(np.unique(questions_df['tags'+str(num)].values)) questions_df['tags'+str(num)]=questions_df[['tags'+str(num)]].apply(le.transform )<data_type_conversions>
arr = [x for x in range(1,28001)] label = pd.DataFrame(arr,columns = ["ImageId"]) label["Label"] = pd.DataFrame(preds) label.head()
Digit Recognizer
11,220,007
questions_df_dict = { 'tags0': 'int8', 'tags1': 'int8', 'tags2': 'int8', 'tags3': 'int8', 'tags4': 'int8', 'tags5': 'int8', } questions_df = questions_df.astype(questions_df_dict )<drop_column>
label.to_csv('Y_test.csv',header=True,index = False )
Digit Recognizer
11,220,007
questions_df.drop(columns=['tags'], inplace=True )<data_type_conversions>
model.save("MNIST_CNN_model_dataaug" )
Digit Recognizer
12,201,447
questions_df['part_bundle_id']=questions_df['part']*100000+questions_df['bundle_id'] questions_df.part_bundle_id=questions_df.part_bundle_id.astype('int32') <load_from_csv>
data_dir='/kaggle/input/digit-recognizer/'
Digit Recognizer
12,201,447
<rename_columns>
train=pd.read_csv(data_dir+'train.csv') test=pd.read_csv(data_dir+'test.csv' )
Digit Recognizer
12,201,447
questions_df.rename(columns={'question_id':'content_id'}, inplace=True )<merge>
y_train=train['label'] x_train=train.drop('label',axis=1 )
Digit Recognizer
12,201,447
questions_df = pd.merge(questions_df, content_explation_agg, on='content_id', how='left',right_index=True) <drop_column>
def image_printer(i,df): idx=i data=df.iloc[idx].to_numpy().reshape(28,28 ).astype('uint8') plt.imshow(data )
Digit Recognizer
12,201,447
del content_explation_agg<data_type_conversions>
x_test=test
Digit Recognizer
12,201,447
questions_df['content_correctness'] = questions_df['content_id'].map(content_agg['sum'] / content_agg['count']) questions_df.content_correctness=questions_df.content_correctness.astype('float16') questions_df['content_correctness_std'] = questions_df['content_id'].map(content_agg['var']) questions_df.content_correctness_std=questions_df.content_correctness_std.astype('float16') questions_df['content_uncorrect_count'] = questions_df['content_id'].map(content_agg['count']-content_agg['sum'] ).astype('int32') questions_df['content_correct_count'] = questions_df['content_id'].map(content_agg['sum'] ).astype('int32' )<data_type_conversions>
import tensorflow as tf import keras from keras import backend as k from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.preprocessing.image import ImageDataGenerator from keras.utils import plot_model from keras.callbacks import EarlyStopping
Digit Recognizer
12,201,447
questions_df['content_elapsed_time_mean'] = questions_df['content_id'].map(content_elapsed_time_agg['mean']) questions_df.content_elapsed_time_mean=questions_df.content_elapsed_time_mean.astype('float16') questions_df['content_had_explanation_mean'] = questions_df['content_id'].map(content_had_explanation_agg['mean']) questions_df.content_had_explanation_mean=questions_df.content_had_explanation_mean.astype('float16' )<drop_column>
img_cols=28 img_rows=28
Digit Recognizer
12,201,447
del content_elapsed_time_agg del content_had_explanation_agg gc.collect()<categorify>
if k.image_data_format=='channels_first': x_train=x_train.values.reshape(x_train.shape[0],1,img_rows,img_cols) test=test.values.reshape(test.shape[0],1,img_rows,img_cols) x_train=x_train/255.0 test=test/255.0 input_shape=(1,img_rows,img_cols) else: x_train=x_train.values.reshape(x_train.shape[0],img_rows,img_cols,1) test=test.values.reshape(test.shape[0],img_rows,img_cols,1) x_train=x_train/255.0 test=test/255.0 input_shape=(img_rows,img_cols,1)
Digit Recognizer
12,201,447
part_agg = questions_df.groupby('part')['content_correctness'].agg(['mean', 'var']) questions_df['part_correctness_mean'] = questions_df['part'].map(part_agg['mean']) questions_df['part_correctness_std'] = questions_df['part'].map(part_agg['var']) questions_df.part_correctness_mean=questions_df.part_correctness_mean.astype('float16') questions_df.part_correctness_std=questions_df.part_correctness_std.astype('float16' )<data_type_conversions>
earlystopping=EarlyStopping(monitor='val_accuracy',mode='auto',patience=10,restore_best_weights=True) modelacc=[] nfilters=[64,128,256] conv_layers=[1,2,3,4,5] dense_layers=[0,1,2,3,4] dropouts=[0.5] for filters in nfilters: for conl in conv_layers: for densel in dense_layers: for dp in dropouts: cnnsays='Feature Maps: {} Convlayers: {} Denselayers: {} Dropouts: {}'.format(filters,conl,densel,dp) print(cnnsays) model=Sequential() model.add(Conv2D(filters,(3,3),input_shape=input_shape)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(dp)) for i in range(conl-1): model.add(Conv2D(filters,(3,3),padding='same')) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) model.add(Dropout(dp)) model.add(Flatten()) for i in range(densel): model.add(Dense(256)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(Dropout(dp)) model.add(Dense(10,activation='softmax')) model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) EPOCHS=30 history=model.fit(x_train,y_train,batch_size=32,epochs=EPOCHS,validation_split=0.2,callbacks=[earlystopping]) modelacc.append([round(100*max(history.history['val_accuracy']),2),cnnsays])
Digit Recognizer
12,201,447
part_agg = questions_df.groupby('part')['content_uncorrect_count'].agg(['sum']) questions_df['part_uncor_count'] = questions_df['part'].map(part_agg['sum'] ).astype('int32') part_agg = questions_df.groupby('part')['content_correct_count'].agg(['sum']) questions_df['part_cor_count'] = questions_df['part'].map(part_agg['sum'] ).astype('int32' )<categorify>
print('Highest validation accuracy {}'.format(round(100*max(history.history['val_accuracy']),2)) )
Digit Recognizer
12,201,447
bundle_agg = questions_df.groupby('bundle_id')['content_correctness'].agg(['mean']) questions_df['bundle_correctness_mean'] = questions_df['bundle_id'].map(bundle_agg['mean']) questions_df.bundle_correctness_mean=questions_df.bundle_correctness_mean.astype('float16') <data_type_conversions>
modelacc.sort(reverse=True) modelacc
Digit Recognizer
12,201,447
<drop_column>
pred=model.predict([test]) soln=[] for i in range(len(pred)) : soln.append(np.argmax(pred[i]))
Digit Recognizer
12,201,447
<define_variables><EOS>
final=pd.DataFrame() final['ImageId']=[i+1 for i in x_test.index] final['Label']=soln final.to_csv('newmnistcnn',index=False )
Digit Recognizer
12,294,222
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams>
import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator, load_img from keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, GlobalAveragePooling2D, Dense, Flatten, Dropout from keras.optimizers import RMSprop, Adam, SGD from keras import regularizers from keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau
Digit Recognizer
12,294,222
flag_lgbm=True clfs = list() params = { 'num_leaves': 400, 'max_bin':500, 'min_child_weight': 0.03454472573214212, 'feature_fraction': 0.52, 'bagging_fraction': 0.52, 'objective': 'binary', 'learning_rate': 0.2, "boosting_type": "gbdt", "metric": 'auc', 'reg_alpha': 0.3899927210061127, 'reg_lambda': 0.6485237330340494, } trains=list() valids=list() num=1 for i in range(0,num): train_df_clf=train_df[1200*10000:2*1200*10000] print('sample end') del train_df users=train_df_clf['user_id'].drop_duplicates() users=users.sample(frac=0.08) users_df=pd.DataFrame() users_df['user_id']=users.values valid_df_newuser = pd.merge(train_df_clf, users_df, on=['user_id'], how='inner',right_index=True) del users_df del users gc.collect() train_df_clf.drop(valid_df_newuser.index, inplace=True) print('pd.merge(train_df_clf, questions_df)') train_df_clf = pd.merge(train_df_clf, questions_df, on='content_id', how='left',right_index=True) valid_df_newuser = pd.merge(valid_df_newuser, questions_df, on='content_id', how='left',right_index=True) print('valid_df') valid_df=train_df_clf.sample(frac=0.1) train_df_clf.drop(valid_df.index, inplace=True) valid_df = valid_df.append(valid_df_newuser) del valid_df_newuser gc.collect() trains.append(train_df_clf) valids.append(valid_df) print('train_df_clf length:',len(train_df_clf)) print('valid_df length:',len(valid_df)) <drop_column>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') train.head(5 )
Digit Recognizer
12,294,222
del train_df_clf del valid_df gc.collect()<prepare_x_and_y>
y = train['label'] train.drop('label', axis=1, inplace=True )
Digit Recognizer