kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
4,081,536 | train_user_df.secs_elapsed.fillna(-1,inplace=True)
train_user_df.action.fillna(-1,inplace=True)
train_user_df.iloc[:,-11:]=train_user_df.iloc[:,-11:].fillna(-1)
train_user_df['secs_elapsed']=train_user_df['secs_elapsed'].astype('int64')
train_user_df['action']=train_user_df['action'].astype('int64' )<data_type_conversions> | x_train,x_val,y_train,y_val=train_test_split(x,y,test_size=0.1,random_state=0 ) | Digit Recognizer |
4,081,536 | test_user_df.secs_elapsed.fillna(-1,inplace=True)
test_user_df.action.fillna(-1,inplace=True)
test_user_df.iloc[:,-11:]=test_user_df.iloc[:,-11:].fillna(-1)
test_user_df['secs_elapsed']=test_user_df['secs_elapsed'].astype('int64')
test_user_df['action']=test_user_df['action'].astype('int64' )<count_missing_values> | x_train=x_train.values
y_train=y_train.values
x_val=x_val.values
y_val=y_val.values
x_test=x_test.values | Digit Recognizer |
4,081,536 | train_user_df.isnull().sum() /train_user_df.shape[0] *100<define_variables> | x_train=x_train.reshape(37800,28,28,1)
x_val=x_val.reshape(4200,28,28,1)
x_test=x_test.reshape(28000,28,28,1 ) | Digit Recognizer |
4,081,536 | categorical_cols=[cname for cname in train_user_df.columns if cname not in ['id','date_account_created','date_first_booking','first_device_type','first_browser',
'timestamp_first_active','country_destination'] and
train_user_df[cname].dtype == "object"]
numerical_cols=[cname for cname in train_user_df.columns if cname not in ['id','date_account_created','date_first_booking','first_device_type','first_browser',
'timestamp_first_active','country_destination'] and
train_user_df[cname].dtype != "object"]
print("Categorical_cols -
",categorical_cols)
print("Numerical_cols -
",numerical_cols )<categorify> | model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax")) | Digit Recognizer |
4,081,536 | categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant')) ,
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
numerical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant'))
])
preprocessor = ColumnTransformer(
transformers=[
('cat', categorical_transformer, categorical_cols),
('num', numerical_transformer, numerical_cols)
] )<drop_column> | model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta() ,
metrics=['accuracy'] ) | Digit Recognizer |
4,081,536 | test_id = test_user_df.id
test_X = test_user_df.drop(['id'], axis='columns' )<categorify> | model.fit(x_train, y_train,
batch_size=128,
epochs=15,
verbose=1,
validation_data=(x_val, y_val)) | Digit Recognizer |
4,081,536 | labels = train_user_df.country_destination
le = LabelEncoder()
train_y = le.fit_transform(labels )<drop_column> | pred=model.predict(x_test,verbose=0)
new_pred = [np.argmax(y, axis=None, out=None)for y in pred]
output=pd.DataFrame({'ImageId':sub['ImageId'],'Label':new_pred})
output.to_csv('Digit_recognizer.csv', index=False ) | Digit Recognizer |
1,029,364 | train_X = train_user_df.drop(['id','country_destination'], axis='columns' )<find_best_model_class> | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
| Digit Recognizer |
1,029,364 | def cross_validation_with_ndcg(pipe, X, y, scorer, cv=5):
skf = StratifiedKFold(n_splits=cv, shuffle=True, random_state=100)
scores = []
for train_index, holdout_index in skf.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[holdout_index]
y_train, y_test = y[train_index], y[holdout_index]
pipe.fit(X_train, y_train)
predict = pipe.predict_proba(X_test)
y_test = pd.get_dummies(y_test ).to_numpy()
score = scorer(y_test, predict)
scores.append(round(score, 6))
print(f'{len(scores)} / {cv} DONE!', end='\r')
return scores<choose_model_class> | label = np.array(train.iloc[:,0],np.str)
data = np.array(train.iloc[:,1:],np.float32)
label_test = np.array([])
data_test = np.array(test.iloc[:,:],np.float32)
| Digit Recognizer |
1,029,364 | n_estimaters_param=[50, 100, 200]
max_depth_param=[3,4,5]
learning_rate_param=[0.1,0.2]
params = [(x, y, z)for x in learning_rate_param for y in n_estimaters_param for z in max_depth_param]
result_list=[]
for learning_rates,n_estimaters, max_depth in params:
xg_model_ = XGBClassifier(max_depth=max_depth,learning_rate=learning_rates, n_estimators=n_estimaters,verbosity=0,
objective='multi:softprob',n_jobs=-1)
search_pipe = Pipeline([
('customproccess',Custom_Proccess()),
('preprocessor', preprocessor),
("model", xg_model_)
])
print(f'learning_rate: {learning_rates}, n_estimaters: {n_estimaters}, max_depth: {max_depth}')
scores = cross_validation_with_ndcg(search_pipe, train_X, train_y, ndcg_score)
result_list.append([learning_rates,n_estimaters,max_depth,np.mean(scores)] )<prepare_output> | data = data.reshape(data.shape[0],28,28,1)
data_test = data_test.reshape(data_test.shape[0],28,28,1)
data = data/255
data_test = data_test/255 | Digit Recognizer |
1,029,364 | result_df=pd.DataFrame(result_list,columns=['learning_rate','n_estimator','max_depth','mean_score'])
result_df.sort_values(by='mean_score',ascending=False ).head(5 )<train_model> | print("Before conding")
print(label[:10])
labels = np_utils.to_categorical(label,10)
print("Encoded Data")
print(labels[:10] ) | Digit Recognizer |
1,029,364 | xg_model = XGBClassifier(max_depth=5,learning_rate=0.1, n_estimators=200,verbosity=0,objective='multi:softprob',n_jobs=-1)
pipe = Pipeline([
('customproccess',Custom_Proccess()),
('preprocessor', preprocessor),
("model", xg_model)
])
pipe.fit(train_X, train_y)
predict = pipe.predict_proba(test_X )<feature_engineering> | def model_generator(dropout=[0.25],denses=[512,10],activation="relu"):
model = Sequential()
model.add(Conv2D(filters=32,kernel_size=3,padding='same', activation='relu', input_shape=(28, 28,1)))
model.add(Conv2D(filters=32, kernel_size=3, border_mode='same', activation='relu'))
model.add(MaxPool2D(pool_size=3))
model.add(Dropout(0.20))
model.add(Conv2D(filters=64,kernel_size=3,padding='same', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, border_mode='same', activation='relu'))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=3, border_mode='same', activation='relu'))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.20))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
return model | Digit Recognizer |
1,029,364 | ids = []
cts = []
for i in range(len(test_id)) :
idx = test_id[i]
ids += [idx] * 5
cts += le.inverse_transform(np.argsort(predict[i])[::-1])[:5].tolist()<save_to_csv> | def model_generator2(dropout=[0.25],denses=[512,10],activation="relu"):
model = Sequential()
model.add(Conv2D(filters=16,kernel_size=2,padding='same', activation='relu', input_shape=(28, 28,1)))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.20))
model.add(Conv2D(filters=32, kernel_size=2, border_mode='same', activation='relu'))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.20))
model.add(Conv2D(filters=64,kernel_size=2,padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.15))
model.add(Flatten())
model.add(Dense(512, name='aux_output'))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(10, name='aux_output2'))
model.add(Activation('softmax'))
model.summary()
return model | Digit Recognizer |
1,029,364 | sub_df = pd.DataFrame(np.column_stack(( ids, cts)) , columns=['id', 'country'])
sub_df.to_csv('sub-03.csv',index=False )<load_from_csv> | def model_fit(model,batch_size=64,epochs=10):
optimizer = Adam(lr=0.0001)
model.compile(loss="categorical_crossentropy",optimizer=optimizer,metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='mnist.model.best', verbose=1, monitor='val_loss', save_best_only=True)
training = model.fit(data, labels,batch_size=batch_size, epochs=epochs,validation_split=0.25, callbacks=[checkpointer],verbose=1, shuffle=True)
return training | Digit Recognizer |
1,029,364 | train = pd.read_csv('.. /input/airbnb-recruiting-new-user-bookings/train_users_2.csv.zip')
age_gender = pd.read_csv('.. /input/airbnb-recruiting-new-user-bookings/age_gender_bkts.csv.zip')
countries_df = pd.read_csv('.. /input/airbnb-recruiting-new-user-bookings/countries.csv.zip')
session_df = pd.read_csv('.. /input/airbnb-recruiting-new-user-bookings/sessions.csv.zip')
test = pd.read_csv('.. /input/airbnb-recruiting-new-user-bookings/test_users.csv.zip')
print('train shape: ', train.shape)
print('test shape: ', test.shape)
print('age_gender_bkts: ', age_gender.shape)
print('countries: ', countries_df.shape)
print('session: ', session_df.shape )<categorify> | model1 = model_generator(dropout=[0.25],denses=[128,10],activation="relu")
training = model_fit(model1,batch_size=128,epochs=100 ) | Digit Recognizer |
1,029,364 | def glimpse(df, maxvals=10, maxlen=110):
print('Shape: ', df.shape)
def pad(y):
max_len = max([len(x)for x in y])
return [x.ljust(max_len)for x in y]
toprnt = pad(df.columns.tolist())
toprnt = pad([toprnt[i] + ' ' + str(df.iloc[:,i].dtype)for i in range(df.shape[1])])
num_nas = [df.iloc[:,i].isnull().sum() for i in range(df.shape[1])]
num_nas_ratio = [int(round(x*100/df.shape[0])) for x in num_nas]
num_nas_str = [str(x)+ '(' + str(y)+ '%)' for x,y in zip(num_nas, num_nas_ratio)]
max_len = max([len(x)for x in num_nas_str])
num_nas_str = [x.rjust(max_len)for x in num_nas_str]
toprnt = [x + ' ' + y + ' NAs' for x,y in zip(toprnt, num_nas_str)]
toprnt = [x + ' : ' for x in toprnt]
toprnt = [toprnt[i] + ', '.join([str(y)for y in df.iloc[:min([maxvals,df.shape[0]]), i]])for i in range(df.shape[1])]
toprnt = [x[:min(maxlen, len(x)) ] for x in toprnt]
for x in toprnt:
print(x )<count_values> | def scoring(model):
model.load_weights('mnist.model.best')
score = model.evaluate(data[:2000], labels[:2000], verbose=0)
accuracy = 100*score[1]
print('Test accuracy: %.4f%%' % accuracy)
label_test = model.predict_classes(data_test)
print("Sample of the prdiction",label_test[:10])
return label_test | Digit Recognizer |
1,029,364 | train.first_affiliate_tracked.value_counts()<count_values> | label_test = scoring(model1 ) | Digit Recognizer |
1,029,364 | <count_duplicates><EOS> | np.savetxt("submission.csv", np.dstack(( np.arange(1, label_test.size+1),label_test)) [0],"%d,%d",header="ImageId,Label",comments="" ) | Digit Recognizer |
1,278,424 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values> | %matplotlib inline
np.random.seed()
sns.set(style='white', context='notebook', palette='deep')
print("Done" ) | Digit Recognizer |
1,278,424 | train['gender'].value_counts()<feature_engineering> | dataset = pd.read_csv(".. /input/train.csv")
competition_dataset = pd.read_csv(".. /input/test.csv")
dataset.describe
| Digit Recognizer |
1,278,424 | train['age'] = train['age'].apply(lambda x: 122 if x > 122 else x)
train['age'] = train['age'].apply(lambda x: 18 if x < 18 else x )<feature_engineering> | del dataset
print("Done" ) | Digit Recognizer |
1,278,424 | test['age'] = test['age'].apply(lambda x: 122 if x > 122 else x)
test['age'] = test['age'].apply(lambda x: 18 if x < 18 else x )<count_values> | label = to_categorical(label, num_classes = 10)
feature = feature / 255.0
competition_dataset = competition_dataset / 255.0
print("Done" ) | Digit Recognizer |
1,278,424 | train['signup_flow'].value_counts()<count_values> | feature_train, feature_val, label_train, label_val = train_test_split(feature, label, test_size = 0.1, stratify=label ) | Digit Recognizer |
1,278,424 | train['signup_method'].value_counts()<count_duplicates> | model_1 = Sequential()
model_1.add(Dense(200, activation = "relu", input_shape =(784,)))
model_1.add(Dense(100, activation = "relu"))
model_1.add(Dense(60, activation = "relu"))
model_1.add(Dense(30, activation = "relu"))
model_1.add(Dense(10, activation = "softmax"))
optimizer = optimizers.SGD(lr=0.03, clipnorm=5.)
model_1.compile(optimizer= optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
print(model_1.summary() ) | Digit Recognizer |
1,278,424 | train[train.duplicated() ]<count_values> | history = model_1.fit(feature_train, label_train, batch_size = 100, epochs = 8,
validation_data =(feature_val, label_val), verbose = 1)
| Digit Recognizer |
1,278,424 | session_df['device_type'].value_counts()<count_values> | feature_train, feature_val, label_train, label_val = train_test_split(feature, label, test_size = 0.1, stratify=label ) | Digit Recognizer |
1,278,424 | session_df['action_detail'].value_counts() [:10]<filter> | model_2 = Sequential()
model_2.add(Conv2D(filters = 4, kernel_size =(5,5), strides = 1, padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model_2.add(Conv2D(filters = 8, kernel_size =(4,4), strides = 2, padding = 'Same',
activation ='relu'))
model_2.add(Conv2D(filters = 12, kernel_size =(4,4), strides = 2, padding = 'Same',
activation ='relu'))
model_2.add(Flatten())
model_2.add(Dense(200, activation = "relu"))
model_2.add(Dense(10, activation = "softmax"))
optimizer = optimizers.SGD(lr=0.03, clipnorm=5.)
model_2.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
print(model_2.summary())
| Digit Recognizer |
1,278,424 | view_search_time = session_df[session_df.action_detail == 'view_search_results']
view_search_time<drop_column> | history = model_2.fit(feature_train, label_train, batch_size = 100, epochs = 16,
validation_data =(feature_val, label_val), verbose = 1)
| Digit Recognizer |
1,278,424 | labels = train['country_destination']
train.drop('country_destination', inplace = True, axis = 1 )<concatenate> | datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(feature_train)
print("Done" ) | Digit Recognizer |
1,278,424 | data = pd.concat(( train, test), axis=0, ignore_index=True)
data = data.drop(['id', 'date_first_booking'], axis=1 )<categorify> | model_3 = Sequential()
model_3.add(Conv2D(filters = 6, kernel_size =(6,6), strides = 1, padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model_3.add(Conv2D(filters = 12, kernel_size =(5,5), strides = 2, padding = 'Same',
activation ='relu'))
model_3.add(Conv2D(filters = 24, kernel_size =(4,4), strides = 2, padding = 'Same',
activation ='relu'))
model_3.add(Flatten())
model_3.add(Dense(200, activation = "relu"))
model_3.add(Dropout(0.40))
model_3.add(Dense(10, activation = "softmax"))
model_3.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"])
print(model_3.summary() ) | Digit Recognizer |
1,278,424 | cat_features = ['gender', 'signup_method', 'signup_flow', 'language', 'affiliate_channel',
'affiliate_provider', 'first_affiliate_tracked', 'signup_app', 'first_device_type', 'first_browser']
for f in cat_features:
data_dummy = pd.get_dummies(data[f], prefix=f)
data.drop([f], axis=1, inplace = True)
data = pd.concat(( data, data_dummy), axis=1 )<import_modules> | history = model_3.fit(datagen.flow(feature_train,label_train, batch_size=100),
epochs = 8, validation_data =(feature_val, label_val),
verbose = 2 ) | Digit Recognizer |
1,278,424 | from datetime import datetime
from sklearn.preprocessing import LabelEncoder<data_type_conversions> | model_4 = Sequential()
model_4.add(Conv2D(filters = 32, kernel_size =(5,5), strides = 1, padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model_4.add(BatchNormalization())
model_4.add(Conv2D(filters = 32, kernel_size =(5,5), strides = 1, padding = 'Same',
activation ='relu'))
model_4.add(BatchNormalization())
model_4.add(Dropout(0.4))
model_4.add(Conv2D(filters = 64, kernel_size =(3,3), strides = 2, padding = 'Same',
activation ='relu'))
model_4.add(BatchNormalization())
model_4.add(Conv2D(filters = 64, kernel_size =(3,3), strides = 2, padding = 'Same',
activation ='relu'))
model_4.add(BatchNormalization())
model_4.add(Dropout(0.4))
model_4.add(Flatten())
model_4.add(Dense(256, activation = "relu"))
model_4.add(Dropout(0.4))
model_4.add(Dense(10, activation = "softmax"))
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model_4.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
print(model_4.summary() ) | Digit Recognizer |
1,278,424 | data['date_account_created'] = pd.to_datetime(data['date_account_created'] )<feature_engineering> | history = model_4.fit(datagen.flow(feature_train,label_train, batch_size=100),
epochs = 35, validation_data =(feature_val, label_val),
verbose = 2, callbacks=[learning_rate_reduction] ) | Digit Recognizer |
1,278,424 | data['ac_year'] = data['date_account_created'].dt.year
data['ac_month'] = data['date_account_created'].dt.month
data['ac_day'] = data['date_account_created'].dt.day<drop_column> | results = model_4.predict(competition_dataset)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" ) | Digit Recognizer |
1,278,424 | <data_type_conversions><EOS> | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission_MNIST.csv",index=False)
| Digit Recognizer |
797,209 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering> | %matplotlib inline
| Digit Recognizer |
797,209 | data['ts_fa_year'] = data['timestamp_first_active'].dt.year
data['ts_fa_month'] = data['timestamp_first_active'].dt.month
data['ts_fa_day'] = data['timestamp_first_active'].dt.day<drop_column> | train = pd.read_csv(".. /input/train.csv" ).values
test = pd.read_csv(".. /input/test.csv" ).values
X_train = train[:, 1:].astype('float32')
Y_train = train[:, 0].astype('int32')
X_test = test[:, :].astype('float32' ) | Digit Recognizer |
797,209 | data.drop('timestamp_first_active', inplace = True, axis = 1 )<categorify> | X_train = X_train/255.
X_test = X_test/255.
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
Y_train = to_categorical(Y_train, num_classes=10)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.15, random_state=23)
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=0.15,
width_shift_range=0.15,
height_shift_range=0.15)
datagen.fit(X_train ) | Digit Recognizer |
797,209 | le = LabelEncoder()
y = le.fit_transform(labels )<prepare_x_and_y> | model = Sequential()
model.add(Conv2D(16, kernel_size=(5, 5),
activation='relu',
input_shape=(28, 28, 1)))
model.add(Conv2D(32, kernel_size=(3, 3),
padding='same',
activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3),
padding='same',
activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3),
padding='same',
activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=RMSprop(lr=1e-3),
metrics=['accuracy'] ) | Digit Recognizer |
797,209 | X = data[:train.shape[0]]
X_test = data[train.shape[0]:]<train_model> | reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=2)
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=70),
epochs=20,
validation_data=(X_val, Y_val),
verbose=2,
callbacks=[reduce_lr] ) | Digit Recognizer |
797,209 | xgb = XGBClassifier(use_label_encoder=False)
xgb.fit(X, y)
<predict_on_test> | Y_val_pred = model.predict_classes(X_val)
Y_val_true = np.argmax(Y_val, axis=1)
print(confusion_matrix(Y_val_true, Y_val_pred)) | Digit Recognizer |
797,209 | y_pred = xgb.predict_proba(X_test )<define_variables> | Y_test_pred = model.predict_classes(X_test)
submission = pd.DataFrame({ 'ImageId': range(1, 28001), 'Label': Y_test_pred })
submission.to_csv("submission.csv", index=False ) | Digit Recognizer |
4,899,984 | ids = []
countries = []
test_id = test['id']<categorify> | from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop,Adam
from keras.preprocessing.image import ImageDataGenerator | Digit Recognizer |
4,899,984 | for i in range(len(test_id)) :
idx = test_id[i]
ids += [idx] * 5
countries += le.inverse_transform(np.argsort(y_pred[i])[::-1])[:5].tolist()<save_to_csv> | from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix | Digit Recognizer |
4,899,984 | submission = pd.DataFrame(np.column_stack(( ids, countries)) , columns=['id', 'country'])
submission.to_csv('submission.csv',index=False )<set_options> | train_data = pd.read_csv('.. /input/train.csv')
test_data = pd.read_csv('.. /input/test.csv' ) | Digit Recognizer |
4,899,984 | pd.set_option('display.max_columns', 500)
warnings.filterwarnings("ignore" )<load_from_csv> | X_train = train_data.copy()
y_train = train_data['label']
del X_train['label']
X_test = test_data.copy()
y_train = to_categorical(y_train, num_classes = 10 ) | Digit Recognizer |
4,899,984 | %%time
train_cudf = cudf.read_csv('/kaggle/input/jane-street-market-prediction/train.csv')
train = train_cudf.to_pandas()
del train_cudf
features = pd.read_csv('.. /input/jane-street-market-prediction/features.csv')
example_test = pd.read_csv('.. /input/jane-street-market-prediction/example_test.csv')
sample_prediction_df = pd.read_csv('.. /input/jane-street-market-prediction/example_sample_submission.csv')
print("Data is loaded!" )<count_missing_values> | X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255 | Digit Recognizer |
4,899,984 | missing_values_count = train.isnull().sum()
print(missing_values_count)
total_cells = np.product(train.shape)
total_missing = missing_values_count.sum()
print("% of missing data = ",(total_missing/total_cells)* 100 )<prepare_x_and_y> | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=2020 ) | Digit Recognizer |
4,899,984 | train = train[train['weight'] != 0]
train = train.query('date > 85' ).reset_index(drop = True)
train = train.astype({c: np.float32 for c in train.select_dtypes(include='float64' ).columns})
train['action'] =(( train['weight'].values * train['resp'].values)> 0 ).astype('int')
train.fillna(train.mean() ,inplace=True)
cols = [c for c in train.columns if 'feature' in c]
X_train = train.loc[:, train.columns.str.contains('feature')]
y_train = train.loc[:, 'action']<drop_column> | model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax")) | Digit Recognizer |
4,899,984 | del x, y, train, features, example_test, sample_prediction_df<import_modules> | optimizer = RMSprop(lr=0.001,rho=0.9, epsilon=1e-08, decay=0.0 ) | Digit Recognizer |
4,899,984 | print("XGBoost version:", xgb.__version__ )<choose_model_class> | model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
4,899,984 | clf = xgb.XGBClassifier(
n_estimators=500,
max_depth=11,
learning_rate=0.05,
subsample=0.9,
colsample_bytree=0.7,
missing=-999,
random_state=2020,
tree_method='gpu_hist'
)<train_model> | epochs = 50
batch_size = 378 | Digit Recognizer |
4,899,984 | %time clf.fit(X_train, y_train )<predict_on_test> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train ) | Digit Recognizer |
4,899,984 | TRAINING = True
start_time = time.time()
if TRAINING:
env = janestreet.make_env()
th = 0.5
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, test_df.columns.str.contains('feature')].values
if np.isnan(x_tt[:, 1:].sum()):
x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])
pred = clf.predict(x_tt)
pred_df.action = np.where(pred >= th, 1, 0 ).astype(int)
else:
pred_df.action = 0
env.predict(pred_df)
print(f"took: {time.time() - start_time} seconds" )<feature_engineering> | history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val, y_val),
steps_per_epoch=X_train.shape[0] // batch_size ) | Digit Recognizer |
4,899,984 | train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv')
train = train.query('weight>0' ).reset_index(drop=True)
train.fillna(train.median() ,inplace=True)
train['feature_stock_id_sum'] = train['feature_41'] + train['feature_42'] + train['feature_43']
train['feature_1_2_cross'] = train['feature_1']/(train['feature_2']+1e-5)
NUM_TRAIN_EXAMPLES = len(train )<feature_engineering> | y_test = model.predict(X_test ) | Digit Recognizer |
4,899,984 | features = [c for c in train.columns if 'feature' in c]
f_mean = np.nanmean(train[features[1:]].values,axis=0 )<define_variables> | y_test_classes = np.argmax(y_test, axis = 1 ) | Digit Recognizer |
4,899,984 | PATH ='.. /input/neutralizing2'
<define_variables> | num = range(1, len(y_test)+1)
output = pd.DataFrame({'ImageId': num,
'Label': y_test_classes})
output.to_csv('submission.csv', index=False ) | Digit Recognizer |
4,899,984 | <choose_model_class><EOS> | y_val_pred = model.predict(X_val ) | Digit Recognizer |
10,029,170 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering> | %matplotlib inline
| Digit Recognizer |
10,029,170 | def set_all_seeds(seed):
np.random.seed(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed )<normalization> | mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ) | Digit Recognizer |
10,029,170 | class NeutralizeTransform:
def __init__(self,proportion=1.0):
self.proportion = proportion
def fit(self,X,y):
self.lms = []
self.mean_exposure = np.mean(y,axis=0)
self.y_shape = y.shape[-1]
for x in X.T:
scores = x.reshape(( -1,1))
exposures = y
exposures = np.hstack(( exposures, np.array([np.mean(scores)] * len(exposures)).reshape(-1, 1)))
transform = np.linalg.lstsq(exposures, scores, rcond=None)[0]
self.lms.append(transform)
def transform(self,X,y=None):
out = []
for i,transform in enumerate(self.lms):
x = X[:,i]
scores = x.reshape(( -1,1))
exposures = np.repeat(self.mean_exposure,len(x),axis=0 ).reshape(( -1,self.y_shape))
exposures = np.concatenate([exposures,np.array([np.mean(scores)] * len(exposures)).reshape(( -1,1)) ],axis=1)
correction = self.proportion * exposures.dot(transform)
out.append(x - correction.ravel())
return np.asarray(out ).T
def fit_transform(self,X,y):
self.fit(X,y)
return self.transform(X,y)
<define_variables> | mnist_train.isna().any().any() | Digit Recognizer |
10,029,170 | TRAINING = False<categorify> | mnist_train_data = mnist_train.loc[:, "pixel0":]
mnist_train_label = mnist_train.loc[:, "label"]
mnist_train_data = mnist_train_data/255.0
mnist_test = mnist_test/255.0 | Digit Recognizer |
10,029,170 | %%time
if TRAINING:
mask = train[features].isna()
train.fillna(0,inplace=True)
for feature in features:
nt = NeutralizeTransform(proportion=0.25)
train[feature] = nt.fit_transform(train[feature].values.reshape(( -1,1)) ,
train['resp'].values.reshape(( -1,1)))
pd.to_pickle(nt,f'NeutralizeTransform_{feature}.pkl')
train[mask] = np.nan
else:
nts = []
for feature in features:
nt = pd.read_pickle(f'{PATH}/NeutralizeTransform_{feature}.pkl')
nts.append(nt )<set_options> | standardized_scalar = StandardScaler()
standardized_data = standardized_scalar.fit_transform(mnist_train_data)
standardized_data.shape | Digit Recognizer |
10,029,170 | gc.collect()<define_variables> | cov_matrix = np.matmul(standardized_data.T, standardized_data)
cov_matrix.shape | Digit Recognizer |
10,029,170 | TRAINING = False<prepare_x_and_y> | lambdas, vectors = eigh(cov_matrix, eigvals=(782, 783))
vectors.shape | Digit Recognizer |
10,029,170 | X_tr = train.query('date<350')[features].values
y_tr =(train.query('date<350')[resp_cols].values > 0 ).astype(int)
X_val = train.query('date>400')[features].values
y_val =(train.query('date>400')[resp_cols].values > 0 ).astype(int)
del train
gc.collect()
if TRAINING:
metric = {}
for seed in [2020,1982]:
set_all_seeds(seed)
model = create_model(X_tr.shape[-1])
hist = model.fit(X_tr,y_tr,
validation_data=(X_val,y_val),
epochs=200,
batch_size=8192,
callbacks=[tf.keras.callbacks.EarlyStopping('val_binary_accuracy',mode='max',patience=20,restore_best_weights=True),
tf.keras.callbacks.ReduceLROnPlateau('val_binary_accuracy',mode='max',patience=10,cooldown=5)])
model.save_weights(f'model_{seed}.tf')
metric[seed] = max(hist.history['val_binary_accuracy'])
tf.keras.backend.clear_session()
print(metric)
else:
models = []
for seed in [2020,1982]:
model = create_model(X_tr.shape[-1])
model.load_weights(f'{PATH}/model_{seed}.tf')
model.call = tf.function(model.call, experimental_relax_shapes=True)
models.append(model)
<feature_engineering> | new_coordinates = np.matmul(vectors, standardized_data.T)
print(new_coordinates.shape)
new_coordinates = np.vstack(( new_coordinates, mnist_train_label)).T | Digit Recognizer |
10,029,170 | if not TRAINING:
f = np.median
env = janestreet.make_env()
th = 0.495
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].values[0] > 0:
test_df['feature_stock_id_sum'] = test_df['feature_41'] + test_df['feature_42'] + test_df['feature_43']
test_df['feature_1_2_cross'] = test_df['feature_1']/(test_df['feature_2']+1e-5)
x_tt = test_df.loc[:, features].values
if np.isnan(x_tt[:, 1:].sum()):
x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean
for i in range(len(nts)) :
x_tt[:,i] = nts[i].transform(np.expand_dims(x_tt[:,i],0))
p = f(np.mean([model(x_tt,training=False ).numpy() for model in models],axis=0))
pred_df.action = np.where(p > th, 1, 0 ).astype(int)
else:
pred_df["action"].values[0] = 0
env.predict(pred_df )<import_modules> | df_new = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_new.head() | Digit Recognizer |
10,029,170 |
<load_from_csv> | pca = decomposition.PCA()
pca.n_components = 2
pca_data = pca.fit_transform(standardized_data)
pca_data.shape | Digit Recognizer |
10,029,170 | start_time = time.time()
train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv')
print('Load in data successful!')
print('-----------------------------------------------------------------------')
time_passed_m = int(( time.time() - start_time)// 60)
time_passed_s = int(( time.time() - start_time)% 60)
print(f'Time usage: {time_passed_m}m.{time_passed_s}s.' )<define_variables> | pca_data = np.vstack(( pca_data.T, mnist_train_label)).T | Digit Recognizer |
10,029,170 | SEED = 42
np.random.seed(SEED)
TRAINING_PGTS = True
MANUAL_VALIDATING = False
TRAINING = True
print(f'TRAINING_PGTS = {TRAINING_PGTS}
\
MANUAL_VALIDATING = {MANUAL_VALIDATING}
\
TRAINING = {TRAINING}' )<prepare_x_and_y> | df_PCA = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_PCA.head() | Digit Recognizer |
10,029,170 | train = train.query('date > 85' ).reset_index(drop = True)
train = train[train['weight'] != 0]
train.fillna(train.mean() , inplace=True)
train['action'] =(( train['resp'].values)> 0 ).astype(int)
features = [c for c in train.columns if 'feature' in c]
f_mean = np.mean(train[features[1:]].values, axis=0)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
X_train = train.loc[:, train.columns.str.contains('feature')]
y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T
print(X_train.shape, y_train.shape)
if MANUAL_VALIDATING:
X_train_v = train[train['date'] <= 416]
y_train_v = np.stack([(X_train_v[c] > 0 ).astype('int')for c in resp_cols] ).T
X_train_v = X_train_v.loc[:, features].values
X_test_v = train[train['date'] > 416]
y_test_v = np.stack([(X_test_v[c] > 0 ).astype('int')for c in resp_cols] ).T
X_test_v = X_test_v.loc[:, features].values
print(X_test_v.shape, y_test_v.shape)
print('Done data preprocessing!' )<define_search_model> | mnist_train_data = np.array(mnist_train_data)
mnist_train_label = np.array(mnist_train_label ) | Digit Recognizer |
10,029,170 | def create_mlp(num_columns, num_labels, hidden_units,
dropout_rates, label_smoothing, learning_rate):
inp = Input(shape=(num_columns,))
x = BatchNormalization()(inp)
x = Dropout(dropout_rates[0] )(x)
for i in range(len(hidden_units)) :
x = Dense(hidden_units[i] )(x)
x = BatchNormalization()(x)
x = Activation(tf.keras.activations.swish )(x)
x = Dropout(dropout_rates[i+1] )(x)
x = Dense(num_labels )(x)
out = Activation('sigmoid' )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(
optimizer = tfa.optimizers.RectifiedAdam(learning_rate=learning_rate),
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics = tf.keras.metrics.AUC(name='AUC')
)
return model<import_modules> | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D
from tensorflow.keras.optimizers import Adadelta
from keras.utils.np_utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler | Digit Recognizer |
10,029,170 | class PurgedGroupTimeSeriesSplit(_BaseKFold):
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if(groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}" ).format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate(( train_array,
train_array_tmp)) ,
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate(( test_array,
test_array_tmp)) ,
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i)for i in train_array], [int(i)for i in test_array]<prepare_x_and_y> | nclasses = mnist_train_label.max() - mnist_train_label.min() + 1
mnist_train_label = to_categorical(mnist_train_label, num_classes = nclasses)
print("Shape of ytrain after encoding: ", mnist_train_label.shape ) | Digit Recognizer |
10,029,170 | n_samples = 2000
n_groups = 20
assert n_samples % n_groups == 0
idx = np.linspace(0, n_samples-1, num=n_samples)
X_train_pgts = np.random.random(size=(n_samples, 5))
y_train_pgts = np.random.choice([0, 1], n_samples)
groups = np.repeat(np.linspace(0, n_groups-1, num=n_groups), n_samples/n_groups)
groups.shape<split> | def build_model(input_shape=(28, 28, 1)) :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def compile_model(model, optimizer='adam', loss='categorical_crossentropy'):
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
def train_model(model, train, test, epochs, split):
history = model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split)
return history | Digit Recognizer |
10,029,170 | FOLDS = 5
models = []
if TRAINING_PGTS:
gkf = PurgedGroupTimeSeriesSplit(n_splits=FOLDS, group_gap=20)
splits = list(gkf.split(y_train, groups=train['date'].values))
for fold,(train_indices, test_indices)in tqdm(enumerate(splits)) :
X_train_pgts, X_test_pgts = X_train.iloc[train_indices, :], X_train.iloc[test_indices, :]
y_train_pgts, y_test_pgts = y_train[train_indices], y_train[test_indices]
tf.keras.backend.clear_session()
model = create_mlp(len(features), 5, hidden_units,
dropout_rates, label_smoothing, learning_rate)
er = EarlyStopping(patience = 8,
restore_best_weights = True,
monitor = 'val_loss')
ReduceLR = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.1,
patience = 8,
verbose = 1,
mode = 'min')
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath = f'model3-1_{SEED}_{fold}.hdf5',
save_weights_only = True,
verbose = 0,
monitor = 'val_loss',
save_best_only = True)
nn_callbacks = [er, ReduceLR, model_checkpoint_callback]
model.fit(X_train_pgts, y_train_pgts,
validation_data =(X_test_pgts, y_test_pgts),
epochs = epochs[0],
batch_size = batch_size[1],
verbose=2,
callbacks = nn_callbacks)
models.append(model)
else:
for fold in tqdm(range(FOLDS)) :
tf.keras.backend.clear_session()
model = create_mlp(len(features), 5, hidden_units,
dropout_rates, label_smoothing, learning_rate)
model.load_weights(f'.. /input/js-model-v1/model4_{SEED}_{fold}.hdf5')
models.append(model )<choose_model_class> | cnn_model = build_model(( 28, 28, 1))
compile_model(cnn_model, 'adam', 'categorical_crossentropy')
model_history = train_model(cnn_model, mnist_train_data, mnist_train_label, 80, 0.2 ) | Digit Recognizer |
10,029,170 | if MANUAL_VALIDATING:
er = EarlyStopping(patience = 8,
restore_best_weights = True,
monitor = 'val_loss')
ReduceLR = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.1,
patience = 8,
verbose = 1,
mode = 'min')
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath = 'js_model_v4_weights.h5',
save_weights_only = True,
verbose = 0,
monitor = 'val_loss',
save_best_only = True)
nn_callbacks = [er, ReduceLR, model_checkpoint_callback]
clf = create_mlp(len(features), 5, hidden_units,
dropout_rates, label_smoothing, learning_rate)
history = clf.fit(
X_train_v, y_train_v,
epochs = epochs[0],
batch_size = batch_size,
verbose = 2,
validation_data =(X_test_v, y_test_v),
callbacks = nn_callbacks
)
del X_train_v, y_train_v, X_test_v, y_test_v
if TRAINING:
tf.keras.backend.clear_session()
tf.random.set_seed(SEED)
clf = create_mlp(len(features), 5, hidden_units,
dropout_rates, label_smoothing, learning_rate)
clf.fit(X_train, y_train, epochs=epochs[0],
batch_size=batch_size[1], verbose=2,
)
clf.save_weights('js_model_v2-1_weights.h5')
print('Training succeeded!
')
else:
clf = create_mlp(len(features), 5, hidden_units,
dropout_rates, label_smoothing, learning_rate)
clf.load_weights('.. /input/js-model-v1/model3-1_1111_9.hdf5')
print('Loading succeeded!
')
clf.summary()
models.append(clf )<feature_engineering> | predictions = cnn_model.predict(mnist_test_arr ) | Digit Recognizer |
10,029,170 | class JSMP_Dataset(Dataset):
def __init__(self, file_path, window_size):
self.file_path = file_path
self.window_size = window_size
train = pd.read_csv(file_path)
train = train.query('date > 85' ).reset_index(drop = True)
train.fillna(train.mean() ,inplace=True)
train['action'] =(( train['resp'].values)> 0 ).astype(int)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
self.features = [c for c in train.columns if "feature" in c]
self.f_mean = np.mean(train[self.features[1:]].values,axis=0)
self.X_train = train.loc[:, train.columns.str.contains('feature')].values
self.y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T
self.X_train = torch.from_numpy(self.X_train ).float()
self.y_train = torch.from_numpy(self.y_train ).float()
del train
gc.collect()
def __len__(self):
return len(self.X_train)- self.window_size
def __getitem__(self, i):
data = self.X_train[i:(i+ self.window_size), :]
label = self.y_train[i + self.window_size - 1]
return data, label<create_dataframe> | predictions_test = []
for i in predictions:
predictions_test.append(np.argmax(i)) | Digit Recognizer |
10,029,170 | window_size = 5
file_path = '/kaggle/input/jane-street-market-prediction/train.csv'
ds = JSMP_Dataset(file_path, window_size )<normalization> | submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": predictions_test
})
submission.to_csv('my_submission.csv', index=False ) | Digit Recognizer |
10,029,170 | class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self ).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self ).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1)if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self ).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1)* dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x )<choose_model_class> | %matplotlib inline
| Digit Recognizer |
10,029,170 | class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(TCN, self ).__init__()
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
self.fc1 = nn.Linear(130 * num_channels[-1], 128)
self.dropout1 = nn.Dropout(dropout)
self.batch_norm1 = nn.BatchNorm1d(128)
self.LeakyReLU1 = nn.LeakyReLU(negative_slope=0.01, inplace=True)
self.fc2 = nn.Linear(128, 128)
self.dropout2 = nn.Dropout(dropout)
self.batch_norm2 = nn.BatchNorm1d(128)
self.LeakyReLU2 = nn.LeakyReLU(negative_slope=0.01, inplace=True)
self.fc3 = nn.Linear(128, output_size)
def forward(self, inputs):
y1 = self.tcn(inputs)
y1 = torch.flatten(y1, start_dim=1)
y1 = self.fc1(y1)
y1 = self.batch_norm1(y1)
y1 = self.LeakyReLU1(y1)
y1 = self.dropout1(y1)
y1 = self.fc2(y1)
y1 = self.batch_norm2(y1)
y1 = self.LeakyReLU2(y1)
y1 = self.dropout2(y1)
o = self.fc3(y1)
return torch.sigmoid(o )<find_best_params> | mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ) | Digit Recognizer |
10,029,170 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('use devise:', device)
net1 = TCN(input_size=5, output_size=5, num_channels=[16, 8, 4, 2], kernel_size=2, dropout=0.5)
net2 = TCN(input_size=5, output_size=5, num_channels=[16, 8, 4, 2], kernel_size=2, dropout=0.5)
net1.load_state_dict(torch.load('/kaggle/input/jsmp-tcn-pytorch/best_accuracy_model.mdl', map_location=torch.device(device)))
net2.load_state_dict(torch.load('/kaggle/input/jsmp-tcn-pytorch/best_loss_model.mdl', map_location=torch.device(device)))
net1.eval()
net2.eval()<feature_engineering> | mnist_train.isna().any().any() | Digit Recognizer |
10,029,170 | th = 0.5
env = janestreet.make_env()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('use devise:', device)
for i,(test_df, pred_df)in enumerate(env.iter_test()):
x_tt = test_df.loc[:, ds.features].values
if np.isnan(x_tt[:, 1:].sum()):
x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* ds.f_mean
if i == 0:
x_window = x_tt.copy()
elif i < window_size:
x_window = np.concatenate([x_window, x_tt], axis=0)
else:
x_window = np.concatenate([x_window[1:, :], x_tt], axis=0)
if i < window_size - 1:
pred_df.action = 0
else:
if test_df['weight'].item() > 0:
inputs = torch.Tensor(x_window ).unsqueeze(0 ).to(device)
outputs =(net1(inputs)+ net2(inputs)) / 2
pred =(torch.median(outputs, axis=1 ).values > th ).long()
pred_df.action = pred.item()
else:
pred_df.action = 0
env.predict(pred_df )<import_modules> | mnist_train_data = mnist_train.loc[:, "pixel0":]
mnist_train_label = mnist_train.loc[:, "label"]
mnist_train_data = mnist_train_data/255.0
mnist_test = mnist_test/255.0 | Digit Recognizer |
10,029,170 | import os
import time
import gc
import random
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import pathlib
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
from tensorflow.keras import layers<set_options> | standardized_scalar = StandardScaler()
standardized_data = standardized_scalar.fit_transform(mnist_train_data)
standardized_data.shape | Digit Recognizer |
10,029,170 | GPUs = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in GPUs:
tf.config.experimental.set_memory_growth(gpu, True )<import_modules> | cov_matrix = np.matmul(standardized_data.T, standardized_data)
cov_matrix.shape | Digit Recognizer |
10,029,170 | print('tensorflow_version_is',tf.__version__ )<define_variables> | lambdas, vectors = eigh(cov_matrix, eigvals=(782, 783))
vectors.shape | Digit Recognizer |
10,029,170 | SEED=42
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
tf.random.set_seed(seed)
seed_everything(seed=SEED )<load_from_disk> | new_coordinates = np.matmul(vectors, standardized_data.T)
print(new_coordinates.shape)
new_coordinates = np.vstack(( new_coordinates, mnist_train_label)).T | Digit Recognizer |
10,029,170 | %%time
print('Loading data...')
train = pd.read_feather('.. /input/janestreet-save-as-feather/train.feather')
print('Done!' )<normalization> | df_new = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_new.head() | Digit Recognizer |
10,029,170 | class Mish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Mish, self ).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs * K.tanh(K.softplus(inputs))
def get_config(self):
base_config = super(Mish, self ).get_config()
return dict(list(base_config.items())+ list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def mish(x):
return tf.keras.layers.Lambda(lambda x: x*K.tanh(K.softplus(x)) )(x)
tf.keras.utils.get_custom_objects().update({'mish': tf.keras.layers.Activation(mish)} )<data_type_conversions> | pca = decomposition.PCA()
pca.n_components = 2
pca_data = pca.fit_transform(standardized_data)
pca_data.shape | Digit Recognizer |
10,029,170 | train = train.query('date > 85' ).reset_index(drop = True)
train = train.query('weight > 0' ).reset_index(drop = True)
train.fillna(train.mean() ,inplace=True)
base_features = [c for c in train.columns if "feature" in c]
f_mean = np.mean(train[base_features[1:]].values,axis=0)
train['action'] =(train['resp'] > 0 ).astype('int')
train['action_1'] =(train['resp_1'] > 0 ).astype('int')
train['action_2'] =(train['resp_2'] > 0 ).astype('int')
train['action_3'] =(train['resp_3'] > 0 ).astype('int')
train['action_4'] =(train['resp_4'] > 0 ).astype('int')
def add_features(train_df):
train_df['feature_cross_41_42_43'] = train_df['feature_41']+train_df['feature_42']+train_df['feature_43']
train_df['feature_cross_1_2'] = train_df['feature_1']/(train_df['feature_2']+2e-5)
return train_df
train = add_features(train)
features = [c for c in train.columns if "feature" in c]
target_cols = ['action', 'action_1', 'action_2', 'action_3', 'action_4']
X = train[features].values
y = np.stack([(train[c] > 0 ).astype('int')for c in target_cols] ).T
X.shape, y.shape<choose_model_class> | pca_data = np.vstack(( pca_data.T, mnist_train_label)).T | Digit Recognizer |
10,029,170 | def RestNet_(num_columns,
num_labels,
hidden_size,
dropout_rate,
label_smoothing,
learning_rate):
inp = layers.Input(shape=(num_columns,))
x = layers.BatchNormalization()(inp)
x = layers.Dense(132 )(x)
x = layers.LeakyReLU()(x)
x = layers.Dropout(0.315 )(x)
x1 = layers.Dense(hidden_size*1.2 )(x)
x1 = layers.BatchNormalization()(x1)
x1 = layers.Activation('mish' )(x1)
x1 = layers.Dropout(dropout_rate )(x1)
x = layers.concatenate([x, x1], axis=1)
x2 = layers.Dense(hidden_size*1.1 )(x)
x2 = layers.BatchNormalization(axis=1 )(x2)
x2 = layers.Activation('mish' )(x2)
x2 = layers.Dropout(dropout_rate )(x2)
x = layers.concatenate([x1, x2], axis=1)
x3 = layers.Dense(hidden_size*1.0 )(x)
x3 = layers.BatchNormalization(axis=1 )(x3)
x3 = layers.Activation('mish' )(x3)
x3 = layers.Dropout(dropout_rate )(x3)
x = layers.concatenate([x2, x3], axis=1)
x4 = layers.Dense(hidden_size*0.9 )(x)
x4 = layers.BatchNormalization(axis=1 )(x4)
x4 = layers.Activation('mish' )(x4)
x4 = layers.Dropout(dropout_rate )(x4)
x = layers.concatenate([x3, x4], axis=1)
x5 = layers.Dense(hidden_size*0.8 )(x)
x5 = layers.BatchNormalization(axis=1 )(x5)
x5 = layers.LeakyReLU()(x5)
x5 = layers.Dropout(dropout_rate )(x5)
x = layers.concatenate([x1, x3, x5], axis=1)
x = layers.Dense(num_labels )(x)
out = layers.Activation("sigmoid" )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate,weight_decay=1e-5),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics=tf.keras.metrics.AUC(name="AUC")
)
return model<choose_model_class> | df_PCA = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_PCA.head() | Digit Recognizer |
10,029,170 | hidden_units = 256
dropout_rates = 0.3
label_smoothing = 5e-3
learning_rate = 1e-3
model = RestNet_(X.shape[1],
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
model.summary()<set_options> | mnist_train_data = np.array(mnist_train_data)
mnist_train_label = np.array(mnist_train_label ) | Digit Recognizer |
10,029,170 | del model
gc.collect()<define_search_model> | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D
from tensorflow.keras.optimizers import Adadelta
from keras.utils.np_utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler | Digit Recognizer |
10,029,170 | class PurgedGroupTimeSeriesSplit(_BaseKFold):
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if(groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}" ).format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate(( train_array,
train_array_tmp)) ,
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate(( test_array,
test_array_tmp)) ,
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i)for i in train_array], [int(i)for i in test_array]<define_search_model> | nclasses = mnist_train_label.max() - mnist_train_label.min() + 1
mnist_train_label = to_categorical(mnist_train_label, num_classes = nclasses)
print("Shape of ytrain after encoding: ", mnist_train_label.shape ) | Digit Recognizer |
10,029,170 | NUM_FOLDS = 7
EPOCHS = 500
BATCH_SIZE = 6500
TRAINING = False
CV = True
if TRAINING:
if CV:
gkf = PurgedGroupTimeSeriesSplit(n_splits = NUM_FOLDS, group_gap=15)
splits = list(gkf.split(y, groups=train['date'].values))
for fold,(train_indices, test_indices)in enumerate(splits):
keras.backend.clear_session()
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
verbose=1,
factor=0.2,
patience=12, mode='min')
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=25)
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath='logs/BestVal_model_{}.h5'.format(fold),
save_weights_only=True,
monitor='val_AUC',
mode='max',
verbose=1,
save_best_only=True)
model = RestNet_(X.shape[1],
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
X_train, X_test = X[train_indices], X[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
model.fit(X_train,
y_train,
validation_data=(X_test,y_test),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[reduce_lr,
early_stop,
checkpoint_callback])
model.save_weights(f'logs/model_{SEED}_{fold}.h5')
del model
gc.collect()
else:
keras.backend.clear_session()
model = RestNet_(len(features1),
len(features2),
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
model.fit(X,
y,
epochs=EPOCHS,
batch_size=BATCH_SIZE)
model.save_weights(f'.logs/model_{SEED}_NONE.h5')
else:
SEED1=42
Full_models_42 = []
for fold in range(NUM_FOLDS):
tf.keras.backend.clear_session()
model1 = RestNet_(X.shape[1],
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
model1.load_weights(pathlib.Path(f'.. /input/drop0315/model_{SEED1}_{fold}.h5'))
Full_models_42.append(model1)
SEED2=73
Full_models_73 = []
for fold in range(NUM_FOLDS):
tf.keras.backend.clear_session()
model2 = RestNet_(X.shape[1],
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
model2.load_weights(pathlib.Path(f'.. /input/73cv7/model_{SEED2}_{fold}.h5'))
Full_models_73.append(model2)
SEED3=2021
Full_models_2021 = []
for fold in range(NUM_FOLDS):
tf.keras.backend.clear_session()
model3 = RestNet_(X.shape[1],
y.shape[1],
hidden_units,
dropout_rates,
label_smoothing,
learning_rate)
model3.load_weights(pathlib.Path(f'.. /input/2021cv7/model_{SEED3}_{fold}.h5'))
Full_models_2021.append(model3)
<categorify> | def build_model(input_shape=(28, 28, 1)) :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def compile_model(model, optimizer='adam', loss='categorical_crossentropy'):
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
def train_model(model, train, test, epochs, split):
history = model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split)
return history | Digit Recognizer |
10,029,170 | %%time
class LiteModel:
@classmethod
def from_file(cls, model_path):
return LiteModel(tf.lite.Interpreter(model_path=model_path))
@classmethod
def from_keras_model(cls, kmodel):
converter = tf.lite.TFLiteConverter.from_keras_model(kmodel)
tflite_model = converter.convert()
return LiteModel(tf.lite.Interpreter(model_content=tflite_model))
def __init__(self, interpreter):
self.interpreter = interpreter
self.interpreter.allocate_tensors()
input_det = self.interpreter.get_input_details() [0]
output_det = self.interpreter.get_output_details() [0]
self.input_index = input_det["index"]
self.output_index = output_det["index"]
self.input_shape = input_det["shape"]
self.output_shape = output_det["shape"]
self.input_dtype = input_det["dtype"]
self.output_dtype = output_det["dtype"]
def predict(self, inp):
inp = inp.astype(self.input_dtype)
count = inp.shape[0]
out = np.zeros(( count, self.output_shape[1]), dtype=self.output_dtype)
for i in range(count):
self.interpreter.set_tensor(self.input_index, inp[i:i+1])
self.interpreter.invoke()
out[i] = self.interpreter.get_tensor(self.output_index)[0]
return out
def predict_single(self, inp):
inp = np.array([inp], dtype=self.input_dtype)
self.interpreter.set_tensor(self.input_index, inp)
self.interpreter.invoke()
out = self.interpreter.get_tensor(self.output_index)
return out[0]
tflite_models_42=[]
for i in range(len(Full_models_42)) :
tflite_model_ = LiteModel.from_keras_model(Full_models_42[i])
tflite_models_42.append(tflite_model_)
tflite_models_73=[]
for i in range(len(Full_models_73)) :
tflite_model_ = LiteModel.from_keras_model(Full_models_73[i])
tflite_models_73.append(tflite_model_)
tflite_models_2021=[]
for i in range(len(Full_models_2021)) :
tflite_model_ = LiteModel.from_keras_model(Full_models_2021[i])
tflite_models_2021.append(tflite_model_)
print(len(tflite_models_42)+ len(tflite_models_73)+ len(tflite_models_2021))<define_search_space> | cnn_model = build_model(( 28, 28, 1))
compile_model(cnn_model, 'adam', 'categorical_crossentropy')
model_history = train_model(cnn_model, mnist_train_data, mnist_train_label, 80, 0.2 ) | Digit Recognizer |
10,029,170 | f = np.median
th = 0.502
weight_model = [1,1,1,2,2,2]<feature_engineering> | predictions = cnn_model.predict(mnist_test_arr ) | Digit Recognizer |
10,029,170 | env = janestreet.make_env()
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
test_df_ = test_df.loc[:, base_features].values
if np.isnan(test_df_[:, 1:].sum()):
test_df_[:, 1:] = np.nan_to_num(test_df_[:, 1:])+ np.isnan(test_df_[:, 1:])* f_mean
cross_41_42_43 = test_df_[:, 41] + test_df_[:, 42] + test_df_[:, 43]
cross_1_2 = test_df_[:, 1] /(test_df_[:, 2] + 2e-5)
x_tt = np.concatenate((
test_df_,
np.array(cross_41_42_43 ).reshape(test_df_.shape[0], 1),
np.array(cross_1_2 ).reshape(test_df_.shape[0], 1),
), axis=1)
pred_42 = np.average([clf.predict(x_tt)for clf in tflite_models_42], axis=0, weights=np.array(weight_model))
pred_42 = f(pred_42)
pred_73 = np.average([clf.predict(x_tt)for clf in tflite_models_73], axis=0, weights=np.array(weight_model))
pred_42 = f(pred_73)
pred_2021 = np.average([clf.predict(x_tt)for clf in tflite_models_2021], axis=0, weights=np.array(weight_model))
pred_2021 = f(pred_2021)
pred = pred_42*0.3 + pred_73*0.3 + pred_73*0.4
pred_df.action = np.where(pred >= th, 1, 0 ).astype(int)
else:
pred_df.action = 0
env.predict(pred_df )<set_options> | predictions_test = []
for i in predictions:
predictions_test.append(np.argmax(i)) | Digit Recognizer |
10,029,170 | !mkdir cache<import_modules> | submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": predictions_test
})
submission.to_csv('my_submission.csv', index=False ) | Digit Recognizer |
10,029,170 | class PurgedGroupTimeSeriesSplit(_BaseKFold):
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if(groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}" ).format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate(( train_array,
train_array_tmp)) ,
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate(( test_array,
test_array_tmp)) ,
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i)for i in train_array], [int(i)for i in test_array]
<import_modules> | %matplotlib inline
| Digit Recognizer |
10,029,170 | class FinData(Dataset):
def __init__(self, data, target, date, mode='train', transform=None, cache_dir=None, multi=False):
self.data = data
self.target = target
self.mode = mode
self.transform = transform
self.cache_dir = cache_dir
self.date = date
self.multi = multi
def __getitem__(self, index):
if torch.is_tensor(index):
index.to_list()
if self.transform:
return self.transform(self.data.iloc[index].values)
else:
if type(index)is list:
if self.multi == False:
sample = {
'target': torch.Tensor(self.target.iloc[index].values),
'data': torch.FloatTensor(self.data[index]),
'date': torch.Tensor(self.date.iloc[index].values)
}
elif self.multi == True:
sample = {
'target': torch.Tensor(self.target[index]),
'data': torch.FloatTensor(self.data[index]),
'date': torch.Tensor(self.date.iloc[index].values)
}
else:
if self.multi == False:
sample = {
'target': torch.Tensor(self.target.iloc[index]),
'data': torch.FloatTensor(self.data[index]),
'date': torch.Tensor(self.date.iloc[index])
}
elif self.multi == True:
sample = {
'target': torch.Tensor(self.target[index]),
'data': torch.FloatTensor(self.data[index]),
'date': torch.Tensor(self.date.iloc[index])
}
return sample
def __len__(self):
return len(self.data)
def load_data(root_dir, mode, overide=None):
if overide:
data = dt.fread(overide ).to_pandas()
elif mode == 'train':
data = dt.fread(root_dir + 'train.csv' ).to_pandas()
elif mode == 'test':
data = dt.fread(root_dir + 'example_test.csv' ).to_pandas()
elif mode == 'sub':
data = dt.fread(root_dir + 'example_sample_submission.csv' ).to_pandas()
return data
def preprocess_data(data: pd.DataFrame, scale: bool = False, nn: bool = False,
action: str = 'weight'):
data = data.query('weight > 0' ).reset_index(drop=True)
data = data.query('date > 85' ).reset_index(drop=True)
if action == 'weight':
data['action'] =(
(data['weight'].values * data['resp'].values)> 0 ).astype('float32')
if action == 'combined':
data['action'] =(
(data['resp'].values > 0)and(data['resp_1'].values > 0)and(data['resp_2'].values > 0)and(
data['resp_3'].values > 0)and(data['resp_4'].values > 0)).astype('float32')
if action == 'multi':
resp_cols = ['resp', 'resp_1', 'resp_2', 'resp_3', 'resp_4']
for i in range(len(resp_cols)) :
data['action_' + str(i)] =(data['weight'] * data[resp_cols[i]] > 0 ).astype('int')
features = [col for col in data.columns if 'feature' in col] + ['weight']
date = data['date']
if action == 'multi':
target = np.array([data['action_' + str(i)]
for i in range(len(resp_cols)) ] ).T
else:
target = data['action']
data = data[features]
if scale:
scaler = StandardScaler()
data = scaler.fit_transform(data)
if not scale and nn:
data = data.values
return data, target, features, date
def calc_data_mean(array, cache_dir=None, fold=None, train=True, mode='mean'):
if train:
if mode == 'mean':
f_mean = np.nanmean(array, axis=0)
if cache_dir and fold:
np.save(f'{cache_dir}/f_{fold}_mean.npy', f_mean)
elif cache_dir:
np.save(f'{cache_dir}/f_mean.npy', f_mean)
array = np.nan_to_num(array)+ np.isnan(array)* f_mean
if mode == 'median':
f_med = np.nanmedian(array, axis=0)
if cache_dir and fold:
np.save(f'{cache_dir}/f_{fold}_median.npy', f_med)
elif cache_dir:
np.save(f'{cache_dir}/f_median.npy', f_med)
array = np.nan_to_num(array)+ np.isnan(array)* f_med
if mode == 'zero':
array = np.nan_to_num(array)+ np.isnan(array)* 0
if not train:
if mode == 'mean':
f_mean = np.load(f'{cache_dir}/f_mean.npy')
array = np.nan_to_num(array)+ np.isnan(array)* f_mean
if mode == 'median':
f_med = np.load(f'{cache_dir}/f_med.npy')
array = np.nan_to_num(array)+ np.isnan(array)* f_med
if mode == 'zero':
array = np.nan_to_num(array)+ np.isnan(array)* 0
return array
def weighted_mean(scores, sizes):
largest = np.max(sizes)
weights = [size / largest for size in sizes]
return np.average(scores, weights=weights)
def create_dataloaders(dataset: Dataset, indexes: dict, batch_size):
train_idx = indexes.get('train', None)
val_idx = indexes.get('val', None)
test_idx = indexes.get('test', None)
dataloaders = {}
if train_idx:
train_set = Subset(
dataset, train_idx)
train_sampler = BatchSampler(SequentialSampler(
train_set), batch_size=batch_size, drop_last=False)
dataloaders['train'] = DataLoader(
dataset, sampler=train_sampler, num_workers=10, pin_memory=True)
if val_idx:
val_set = Subset(dataset, val_idx)
val_sampler = BatchSampler(SequentialSampler(
val_set), batch_size=batch_size, drop_last=False)
dataloaders['val'] = DataLoader(
dataset, sampler=val_sampler, num_workers=10, pin_memory=True)
if test_idx:
test_set = Subset(dataset, test_idx)
test_sampler = BatchSampler(SequentialSampler(
test_set), batch_size=batch_size, drop_last=False)
dataloaders['test'] = DataLoader(
dataset, sampler=test_sampler, num_workers=10, pin_memory=True)
return dataloaders
def seed_everything(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def load_model(path, input_size, output_size, p, pl_lightning):
if os.path.isdir(path):
models = []
for file in os.listdir(path):
if pl_lightning:
model = Regressor.load_from_checkpoint(checkpoint_path=file, input_size=input_size,
output_size=output_size, params=p)
else:
model = Regressor(input_size, output_size, params=p)
model.load_state_dict(torch.load(f'{path}/{file}'))
models.append(model)
return models
elif os.path.isfile(path):
if pl_lightning:
return Regressor.load_from_checkpoint(checkpoint_path=path, input_size=input_size,
output_size=output_size, params=p)
else:
model = Regressor(input_size, output_size, params=p)
model.load_state_dict(torch.load(path))
return model
<choose_model_class> | mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.