kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
3,811,526
for col in('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'): all_data[col] = all_data[col].fillna('None' )<data_type_conversions>
X_val_check[Ypred_val_check != Y_val_check.values].shape[0] / X_val_check.shape[0]
Digit Recognizer
3,811,526
for col in('GarageYrBlt', 'GarageCars'): all_data[col] = all_data[col].fillna(0 )<data_type_conversions>
display_digits(dim =(2,3), X = X_val_check[Ypred_val_check != Y_val_check.values], Y_true = Y_val_check.values[Ypred_val_check != Y_val_check.values], pred = Ypred_val_check[Ypred_val_check != Y_val_check.values] )
Digit Recognizer
4,285,477
for col in('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): all_data[col] = all_data[col].fillna(0 )<data_type_conversions>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,285,477
for col in('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): all_data[col] = all_data[col].fillna('None' )<data_type_conversions>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,285,477
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None") all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0 )<data_type_conversions>
def clean_inputs(train, test, img_shape =(-1,28,28,1), num_classes = 10): t_X = train.drop("label", axis=1) t_Y = train["label"] t_X = t_X / 255 test_x = test.values / 255 t_X = np.reshape(t_X.values, img_shape) test_x = np.reshape(test_x, img_shape) t_Y = keras.utils.to_categorical(t_Y, num_classes = num_classes) train_x, dev_x, train_y, dev_y = train_test_split(t_X, t_Y, test_size = 0.15, random_state = 0) return train_x, train_y, dev_x, dev_y, test_x
Digit Recognizer
4,285,477
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode() [0] )<drop_column>
train_x, train_y, dev_x, dev_y, test_x = clean_inputs(train, test )
Digit Recognizer
4,285,477
all_data = all_data.drop(['Utilities'], axis=1 )<data_type_conversions>
def model(inp_shape): X = Input(inp_shape, name='input') A = Conv2D(6,(7, 7), strides=(1, 1), padding='Same', activation='relu', name='C1' )(X) A = MaxPooling2D(pool_size=2, padding='valid' )(A) A = Conv2D(16,(5, 5), strides=(1, 1), padding='Same', activation='relu', name='C2' )(A) A = MaxPooling2D(pool_size=2, padding='valid' )(A) A = Flatten()(A) A = BatchNormalization()(A) A = Dense(120, activation='relu', kernel_regularizer=regularizers.l2(0.01), name='FC1' )(A) A = Dense(84, activation='relu', kernel_regularizer=regularizers.l2(0.01), name='FC2' )(A) A = Dense(10, activation='softmax', name='Final' )(A) model = Model(inputs=X, outputs=A, name='LeNet') return model
Digit Recognizer
4,285,477
all_data["Functional"] = all_data["Functional"].fillna("Typ" )<data_type_conversions>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1) datagen.fit(train_x )
Digit Recognizer
4,285,477
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode() [0] )<data_type_conversions>
train_x_pad = np.pad(train_x,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant', constant_values=0 ).astype(float) dev_x_pad = np.pad(dev_x,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant', constant_values=0 ).astype(float) test_x_pad = np.pad(test_x,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant', constant_values=0 ).astype(float) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1,factor=0.5, min_lr=0.00001) model = model(train_x_pad.shape[1:]) model.summary() model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) history = model.fit_generator(datagen.flow(train_x_pad, train_y, batch_size=32), validation_data=(dev_x_pad, dev_y), steps_per_epoch=len(train_x_pad)//32, epochs=25, callbacks=[learning_rate_reduction] )
Digit Recognizer
4,285,477
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode() [0] )<categorify>
def model2(num_classes = 10): model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'same', activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation = "softmax")) return model;
Digit Recognizer
4,285,477
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode() [0]) all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode() [0] )<data_type_conversions>
start = time.time() model2 = model2(10) learning_rate_reduction2 = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1,factor=0.5, min_lr=0.00001) model2.summary() model2.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) history2 = model2.fit_generator(datagen.flow(train_x, train_y, batch_size=32), validation_data=(dev_x, dev_y), steps_per_epoch=len(train_x)//32, epochs=25, callbacks=[learning_rate_reduction2]) timeRecord = time.time() - start print("--- %s seconds ---" %(timeRecord))
Digit Recognizer
4,285,477
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode() [0] )<data_type_conversions>
def model3(num_classes = 10): model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'same', activation ='relu', input_shape =(28,28,1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dense(num_classes, activation = "softmax")) return model;
Digit Recognizer
4,285,477
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None" )<sort_values>
start = time.time() model3 = model3(10) learning_rate_reduction3 = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1,factor=0.5, min_lr=0.00001) model3.summary() model3.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics=['accuracy']) history3 = model3.fit_generator(datagen.flow(train_x, train_y, batch_size=64), validation_data=(dev_x, dev_y), steps_per_epoch=len(train_x)//64, epochs=25, callbacks=[learning_rate_reduction3]) timeRecord = time.time() - start print("--- %s seconds ---" %(timeRecord))
Digit Recognizer
4,285,477
all_data_na =(all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False) missing_data = pd.DataFrame({'Missing Ratio' :all_data_na}) missing_data.head()<data_type_conversions>
prediction = model2.predict(test_x) prediction = np.argmax(prediction, axis=1) prediction = pd.Series(prediction, name="Label") submission = pd.concat([pd.Series(range(1,28001), name = "ImageId"), prediction],axis = 1) submission.to_csv('mnist-submission.csv', index = False) print(submission )
Digit Recognizer
4,315,566
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str) all_data['OverallCond'] = all_data['OverallCond'].astype(str) all_data['YrSold'] = all_data['YrSold'].astype(str) all_data['MoSold'] = all_data['MoSold'].astype(str )<categorify>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,315,566
cols =('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold') for c in cols: lbl = LabelEncoder() lbl.fit(list(all_data[c].values)) all_data[c] = lbl.transform(list(all_data[c].values)) print('Shape all_data: {}'.format(all_data.shape))<feature_engineering>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,315,566
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']<feature_engineering>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) del train g = sb.countplot(Y_train) Y_train.value_counts() print(X_train.shape) print(test.shape )
Digit Recognizer
4,315,566
y_train = np.log(y_train )<feature_engineering>
X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10) X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 1)
Digit Recognizer
4,315,566
skewness = skewness[abs(skewness)> 0.75] print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0])) skewed_features = skewness.index lam = 0.15 for feat in skewed_features: all_data[feat] = boxcox1p(all_data[feat], lam) <categorify>
model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
4,315,566
all_data = pd.get_dummies(all_data) print(all_data.shape )<split>
optimizer = Adam() model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
Digit Recognizer
4,315,566
train = all_data[:ntrain] test = all_data[ntrain:]<import_modules>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,315,566
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone from sklearn.model_selection import KFold, cross_val_score from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import xgboost as xgb import lightgbm as lgb<compute_train_metric>
epochs = 30 batch_size = 86 history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size)
Digit Recognizer
4,315,566
<choose_model_class><EOS>
predictions = model.predict_classes(test, verbose=0) submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("vvcp2.csv", index=False, header=True )
Digit Recognizer
1,562,612
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
%matplotlib inline
Digit Recognizer
1,562,612
ENet = make_pipeline(RobustScaler() , ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))<choose_model_class>
%time dfLabel = pd.read_csv('.. /input/digit-recognizer/train.csv' )
Digit Recognizer
1,562,612
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5 )<choose_model_class>
%time dfPredict = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
1,562,612
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =5 )<choose_model_class>
dfTmp = dfLabel.copy(deep=True) tmpLabel = dfTmp['label'] label = to_categorical(tmpLabel, num_classes = 10) del dfTmp['label'] dfTmp = dfTmp/255 labeledImage = dfTmp.values.reshape(-1,28,28,1) assert labeledImage.shape ==(dfTmp.shape[0],28,28,1), "The tensor shape {} is not equal to expected tensor size {}".format(labeledImage.shape ,(dfTmp.shape[0],28,28,1)) assert len(label)== dfTmp.shape[0], "The size of the labels {} is not equal to the labeld image size {}".format(len(label),dfTmp.shape[0] )
Digit Recognizer
1,562,612
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05, max_depth=3, min_child_weight=1.7817, n_estimators=2200, reg_alpha=0.4640, reg_lambda=0.8571, subsample=0.5213, silent=1, random_state =7, nthread = -1 )<choose_model_class>
random_state=42 X_train, X_valid, y_train, y_valid = train_test_split(labeledImage, label, test_size = 0.1, random_state = random_state, stratify = label )
Digit Recognizer
1,562,612
model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5, learning_rate=0.05, n_estimators=720, max_bin = 55, bagging_fraction = 0.8, bagging_freq = 5, feature_fraction = 0.2319, feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf =6, min_sum_hessian_in_leaf = 11 )<compute_test_metric>
model = models.Sequential() model.add(layers.Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(layers.Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(layers.MaxPool2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation = "relu")) model.add(layers.Dropout(0.5)) model.add(layers.Dense(10, activation = "softmax"))
Digit Recognizer
1,562,612
score = rmsle_cv(lasso) print(" Lasso score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
1,562,612
score = rmsle_cv(ENet) print("ElasticNet score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
1,562,612
score = rmsle_cv(KRR) print("Kernel Ridge score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
epochs = 30 batch_size = 512
Digit Recognizer
1,562,612
score = rmsle_cv(GBoost) print("Gradient Boosting score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
datagen = keras.preprocessing.image.ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
1,562,612
score = rmsle_cv(model_xgb) print("Xgboost score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_valid,y_valid), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
1,562,612
score = rmsle_cv(model_lgb) print("LGBM score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<predict_on_test>
test_loss, test_acc = model.evaluate(X_valid, y_valid) print("The test accuraccy is {}".format(test_acc))
Digit Recognizer
1,562,612
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models def fit(self, X, y): self.models_ = [clone(x)for x in self.models] for model in self.models_: model.fit(X, y) return self def predict(self, X): predictions = np.column_stack([ model.predict(X)for model in self.models_ ]) return np.mean(predictions, axis=1 )<compute_train_metric>
results = model.predict(testImage) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
1,562,612
averaged_models = AveragingModels(models =(ENet, GBoost, lasso, model_xgb, model_lgb)) score = rmsle_cv(averaged_models) print("Averaged base models score: {:.4f}({:.4f}) ".format(score.mean() , score.std()))<compute_test_metric>
submission_result = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission_result.to_csv("result.csv",index=False )
Digit Recognizer
6,066,161
def rmsle(y, y_pred): return np.sqrt(mean_squared_error(y, y_pred))<predict_on_test>
import time import warnings import numpy as np import pandas as pd
Digit Recognizer
6,066,161
averaged_models.fit(train.values, y_train) av_train_pred = averaged_models.predict(train.values) print(rmsle(y_train, av_train_pred))<predict_on_test>
sns.set_style("whitegrid") warnings.filterwarnings('ignore' )
Digit Recognizer
6,066,161
av_test_pred = np.expm1(averaged_models.predict(test.values))<load_from_csv>
train=pd.read_csv(".. /input/digit-recognizer/train.csv") submit=pd.read_csv(".. /input/digit-recognizer/test.csv") print(typeInfo(train)) print(typeInfo(submit))
Digit Recognizer
6,066,161
sample_submission = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv") sample_submission<save_to_csv>
x_train = train.drop('label', axis=1) y_train = train['label']
Digit Recognizer
6,066,161
sub = pd.DataFrame() sub['Id'] = test_ID sub['SalePrice'] = av_test_pred sub.to_csv('submission.csv',index=False )<load_from_csv>
x = x_train y = y_train x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=0 )
Digit Recognizer
6,066,161
X_train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv', index_col='Id') X_test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv', index_col='Id') X_train.dropna(axis=0, subset=['SalePrice'], inplace=True) y_train = X_train.SalePrice X_train.drop(['SalePrice'], axis=1, inplace=True) categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 20 and X_train[cname].dtype == "object"] numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train[my_cols].copy() X_test= X_test[my_cols].copy()<categorify>
x_train = x_train.values.reshape(-1,28,28,1) x_test = x_test.values.reshape(-1,28,28,1) submit = submit.values.reshape(-1,28,28,1 )
Digit Recognizer
6,066,161
numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')) , ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ] )<compute_train_metric>
model = Sequential() model.add(Conv2D(64,(3, 3), input_shape=(28,28,1),padding="SAME")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(64,(3, 3),padding="SAME")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(128,(3, 3),padding="SAME")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(128,(3, 3),padding="SAME")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(192,(3, 3),padding="SAME")) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(192,(5, 5),strides=2,padding="SAME")) model.add(Activation('relu')) model.add(Flatten() )
Digit Recognizer
6,066,161
def get_score(n_estimators): my_pipeline = Pipeline(steps=[ ('preprocessor', preprocessor), ('model', XGBRegressor(n_estimators=n_estimators, objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, max_depth = 5, alpha = 10)) ]) scores = -1 * cross_val_score(my_pipeline, X_train, y_train, cv=3, scoring='neg_mean_absolute_error') return scores.mean() results = {} n_estimators=[50,100,250,300,350,400] for i in n_estimators: results[i]=get_score(i) %matplotlib inline plt.plot(list(results.keys()), list(results.values())) plt.show()<save_to_csv>
model.add(Dense(256)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.3)) model.add(Dense(10)) model.add(Activation('softmax'))
Digit Recognizer
6,066,161
clf = Pipeline(steps=[ ('preprocessor', preprocessor), ('model', XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, max_depth = 5, alpha = 10, n_estimators = 250)) ]) clf.fit(X_train, y_train) preds_test = clf.predict(X_test) output = pd.DataFrame({'Id': X_test.index, 'SalePrice': preds_test}) output.to_csv('submission.csv', index=False )<set_options>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1,factor=0.5,min_lr=0.00001) best_model = ModelCheckpoint('mnist_weights.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max') early_stopping = EarlyStopping(monitor='val_loss', min_delta=1e-10, patience=10,restore_best_weights=True )
Digit Recognizer
6,066,161
warnings.filterwarnings('ignore' )<load_from_csv>
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
6,066,161
df_train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv',index_col='Id') df_test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv', index_col ='Id' )<load_from_csv>
aug = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, rotation_range=10, zoom_range = 0., width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) aug.fit(x_train )
Digit Recognizer
6,066,161
def load_data() : data_dir = Path('.. /input/house-prices-advanced-regression-techniques/') df_train = pd.read_csv(data_dir / 'train.csv', index_col = 'Id') df_test = pd.read_csv(data_dir / 'test.csv', index_col = 'Id') df = pd.concat([df_train,df_test]) df = clean(df) df = encode(df) df = impute(df) df_train = df.loc[df_train.index,:] df_test = df.loc[df_test.index, :] return df_train, df_test <feature_engineering>
h = model.fit_generator( aug.flow(x_train, y_train, batch_size=64), validation_data=(x_test, y_test), steps_per_epoch=len(x_train)// 64, epochs=20, verbose=1, callbacks=[learning_rate_reduction,best_model,early_stopping] )
Digit Recognizer
6,066,161
def clean(df): df['Exterior2nd'] = df['Exterior2nd'].replace({'Brk Cmn': 'BrkComm'}) df['GarageYrBlt'] = df['GarageYrBlt'].where(df.GarageYrBlt <=2010, df.YearBuilt) df.rename(columns = {'1stFlrSF': 'FirstFlrSF', '2ndFlrSF': 'SecondFlrSF', '3SsnPorch':'Threeseasonporch'}, inplace=True) return df<define_variables>
y_pred = model.predict(x_test) y_pred = np.argmax(y_pred,axis = 1) accuracy_score(y_test, y_pred )
Digit Recognizer
6,066,161
features_nom = ['MSSubClass', 'MSZoning', 'Street', 'Alley','LandContour', 'LotConfig','Neighborhood','Condition1','Condition2','BldgType','HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','Foundation','Heating','CentralAir','GarageType','MiscFeature','SaleType','SaleCondition'] five_levels = ['Po', 'Fa', 'TA','Gd','Ex'] ten_levels =list(range(10)) ordered_levels = { 'OverallQual':ten_levels, 'OverallCond':ten_levels, 'ExterQual':five_levels, 'ExterCond':five_levels, 'BsmtQual':five_levels, 'BsmtCond':five_levels, 'HeatingQC':five_levels, 'KitchenQual':five_levels, 'FireplaceQu':five_levels, 'GarageQual': five_levels, 'GarageCond':five_levels, 'PoolQC':five_levels, 'LotShape':['Reg', 'IR1','IR2','IR3'], 'LandSlope':['Sev', 'Mod','Gtl'], 'BsmtExposure': ['No','Mn','Av','Gd'], 'BsmtFinType1':['Unf','LwQ','Rec','BLQ','ALQ','GLQ'], 'BsmtFinType2':['Unf','LwQ','Rec','BLQ','ALQ','GLQ'], 'Functional':['Sal','Sev','Maj1','Maj2','Mod','Min2','Min1','Typ'], 'GarageFinish': ['Unf','RFn','Fin'], 'PavedDrive':['N','P','Y'], 'Utilities': ['NoSeWa','NoSewr','AllPub'], 'CentralAir':['N','Y'], 'Electrical':['Mix','FuseP','FuseF','FuseA','SBrkr'], 'Fence':['MnWw','GdWo','MnPrv','GdPrv'], } ordered_levels = {key:['None']+value for key, value in ordered_levels.items() } def encode(df): for name in features_nom: df[name] = df[name].astype('category') if 'None' not in df[name].cat.categories: df[name].cat.add_categories('None', inplace=True) for name, levels in ordered_levels.items() : df[name] = df[name].astype(CategoricalDtype(levels, ordered=True)) return df<categorify>
result = model.predict(submit) results = np.argmax(result,axis = 1) results
Digit Recognizer
6,066,161
def impute(df): for name in df.select_dtypes('number'): df[name] = df[name].fillna(0) for name in df.select_dtypes('category'): df[name] = df[name].fillna('None') return df<compute_train_metric>
Label = pd.Series(results, name = 'Label') ImageId = pd.Series(range(1,28001), name = 'ImageId') submission = pd.concat([ImageId,Label], axis = 1) submission.to_csv('submission.csv', index = False )
Digit Recognizer
7,960,918
def score_dataset(X,y,model = XGBRegressor()): for colname in X.select_dtypes(['category']): X[colname] = X[colname].cat.codes log_y = np.log(y) score = cross_val_score(model, X, log_y, cv=5, scoring = 'neg_mean_squared_error') score = -1 * score.mean() score = np.sqrt(score) return score <create_dataframe>
from keras.layers import * from keras.models import Model
Digit Recognizer
7,960,918
X = df_train.copy() y = X.pop('SalePrice') baseline_score = score_dataset(X,y) print(baseline_score )<statistical_test>
def normalize(x): return x /(K.sqrt(K.mean(K.square(x)))+ K.epsilon()) def deprocess_image(x): x -= x.mean() x /=(x.std() + K.epsilon()) x *= 0.25 x += 0.5 x = np.clip(x, 0, 1) x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose(( 1, 2, 0)) x = np.clip(x, 0, 255 ).astype('uint8') return x def process_image(x, former): if K.image_data_format() == 'channels_first': x = x.transpose(( 2, 0, 1)) return(x / 255 - 0.5)* 4 * former.std() + former.mean() def visualize_layer(model, layer_name, step=1., epochs=15, upscaling_steps=9, upscaling_factor=1.2, output_dim=(412, 412), filter_range=(0, None)) : def _generate_filter_image(input_img, layer_output, filter_index): s_time = time.time() if K.image_data_format() == 'channels_first': loss = K.mean(layer_output[:, filter_index, :, :]) else: loss = K.mean(layer_output[:, :, :, filter_index]) grads = K.gradients(loss, input_img)[0] grads = normalize(grads) iterate = K.function([input_img], [loss, grads]) intermediate_dim = tuple( int(x /(upscaling_factor ** upscaling_steps)) for x in output_dim) if K.image_data_format() == 'channels_first': input_img_data = np.random.random( (1, 3, intermediate_dim[0], intermediate_dim[1])) else: input_img_data = np.random.random( (1, intermediate_dim[0], intermediate_dim[1], 3)) input_img_data =(input_img_data - 0.5)* 20 + 128 for up in reversed(range(upscaling_steps)) : for _ in range(epochs): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step if loss_value <= K.epsilon() : return None intermediate_dim = tuple( int(x /(upscaling_factor ** up)) for x in output_dim) img = deprocess_image(input_img_data[0]) img = np.array(pil_image.fromarray(img ).resize(intermediate_dim, pil_image.BICUBIC)) input_img_data = np.expand_dims( process_image(img, input_img_data[0]), 0) img = deprocess_image(input_img_data[0]) e_time = time.time() print('Costs of filter {:3}: {:5.0f}({:4.2f}s)'.format(filter_index, loss_value, e_time - s_time)) return img, loss_value def _draw_filters(filters, n=None): if n is None: n = int(np.floor(np.sqrt(len(filters)))) filters.sort(key=lambda x: x[1], reverse=True) filters = filters[:n * n] MARGIN = 5 width = n * output_dim[0] +(n - 1)* MARGIN height = n * output_dim[1] +(n - 1)* MARGIN stitched_filters = np.zeros(( width, height, 3), dtype='uint8') for i in range(n): for j in range(n): img, _ = filters[i * n + j] width_margin =(output_dim[0] + MARGIN)* i height_margin =(output_dim[1] + MARGIN)* j stitched_filters[ width_margin: width_margin + output_dim[0], height_margin: height_margin + output_dim[1], :] = img save_img('vgg_{0:}_{1:}x{1:}.png'.format(layer_name, n), stitched_filters) assert len(model.inputs)== 1 input_img = model.inputs[0] layer_dict = dict([(layer.name, layer)for layer in model.layers[1:]]) output_layer = layer_dict[layer_name] assert isinstance(output_layer, layers.Conv2D) filter_lower = filter_range[0] filter_upper =(filter_range[1] if filter_range[1] is not None else len(output_layer.get_weights() [1])) assert(filter_lower >= 0 and filter_upper <= len(output_layer.get_weights() [1]) and filter_upper > filter_lower) print('Compute filters {:} to {:}'.format(filter_lower, filter_upper)) processed_filters = [] for f in range(filter_lower, filter_upper): img_loss = _generate_filter_image(input_img, output_layer.output, f) if img_loss is not None: processed_filters.append(img_loss) print('{} filter processed.'.format(len(processed_filters))) _draw_filters(processed_filters)
Digit Recognizer
7,960,918
def make_mi_scores(X, y): X = X.copy() for colname in X.select_dtypes(['object','category']): X[colname], _ = X[colname].factorize() discrete_features = [pd.api.types.is_integer_dtype(t)for t in X.dtypes] mi_scores = mutual_info_regression(X, y, discrete_features = discrete_features, random_state=0) mi_scores = pd.Series(mi_scores, name = 'MI Scores', index = X.columns) mi_scores = mi_scores.sort_values(ascending = False) return mi_scores <prepare_x_and_y>
train_data=pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_data=pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
7,960,918
X = df_train.copy() y = X.pop('SalePrice') mi_scores = make_mi_scores(X,y) mi_scores<drop_column>
y=train_data["label"] X=train_data.copy() del X["label"]
Digit Recognizer
7,960,918
def drop_uninformative(df, mi_scores): return df.loc[:, mi_scores>0.0] <create_dataframe>
SIZE=32
Digit Recognizer
7,960,918
X = df_train.copy() y = X.pop('SalePrice') X = drop_uninformative(X, mi_scores) score_dataset(X,y )<categorify>
def reshape32(img): img=img.reshape(( 28,28)) img=np.pad(img,(( SIZE-28)//2,(SIZE-28)//2)) img=img.reshape(( SIZE,SIZE,1)) return img
Digit Recognizer
7,960,918
def label_encode(df): X = df.copy() for colname in X.select_dtypes(['category']): X[colname] = X[colname].cat.codes return X <feature_engineering>
new_X=[] for i,img in enumerate(X.values): new_X.append(reshape32(img)) new_X=np.array(new_X) new_X[new_X<50]=0
Digit Recognizer
7,960,918
def mathematical_transforms(df): X = pd.DataFrame() X['LivLotRatio'] = df.GrLivArea / df.LotArea X['Spaciousness'] =(df.FirstFlrSF + df.SecondFlrSF)/ df.TotRmsAbvGrd X['Feet'] = np.sqrt(df.GrLivArea) X['TotalSF'] = df.TotalBsmtSF + df.FirstFlrSF + df.SecondFlrSF X['TotalBathrooms'] = df.FullBath + 0.5* df.HalfBath + df.BsmtFullBath + 0.5 * df.BsmtHalfBath X['TotalPorchSF']= df.OpenPorchSF + df.Threeseasonporch + df.EnclosedPorch + df.ScreenPorch + df.WoodDeckSF return X def interactions(df): X = pd.get_dummies(df.BldgType, prefix = 'Bldg') X = X.mul(df.GrLivArea, axis = 0) return X def counts(df): X = pd.DataFrame() X['PorchTypes'] = df[[ 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'Threeseasonporch', 'ScreenPorch', ]].gt(0.0 ).sum(axis = 1) return X def group_transforms(df): X = pd.DataFrame() X['MedNhbdArea'] = df.groupby('Neighborhood')['GrLivArea'].transform('median') return X def break_down(df): X = pd.DataFrame() X['MSClass'] = df.MSSubClass.str.split('_', n=1, expand = True)[0] return X <create_dataframe>
train_X,val_X,train_y,val_y = train_test_split(new_X/255,y,test_size=0.1 )
Digit Recognizer
7,960,918
def apply_pca(X, standarize = True): if standarize: X =(X - X.mean(axis=0)) / X.std(axis=0) pca = PCA() X_pca = pca.fit_transform(X) component_names = [f'PC{i+1}' for i in range(X_pca.shape[1])] X_pca = pd.DataFrame(X_pca, columns = component_names) loadings = pd.DataFrame(pca.components_.T, columns = component_names, index = X.columns) return pca, X_pca, loadings def pca_inspired(df): X = pd.DataFrame() X['Feature1'] = df.GrLivArea + df.TotalBsmtSF X['Feature2'] = df.YearRemodAdd * df.TotalBsmtSF return X def pca_components(df, features): X = df.loc[:, features] _, X_pca, _ = apply_pca(X) return X_pca pca_features = ['GarageArea', 'YearRemodAdd','TotalBsmtSF','GrLivArea'] <create_dataframe>
inp=Input(shape=(32,32,1)) model = Conv2D(filters=32, kernel_size=(2, 2), padding='SAME', activation='relu',name="conv32" )(inp) model = Conv2D(filters=32, kernel_size=(2, 2), padding='SAME', activation='relu' )(model) model = Conv2D(filters=32, kernel_size=(2, 2), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu' )(model) model = Dropout(rate=0.3 )(model) model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu',name="conv64" )(model) model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu' )(model) model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = Dropout(rate=0.2 )(model) model = Conv2D(filters=128, kernel_size=(7, 7), padding='SAME', activation='relu',name="conv128" )(model) model = Conv2D(filters=128, kernel_size=(7, 7), padding='SAME', activation='relu' )(model) model = Conv2D(filters=128, kernel_size=(7, 7), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=128, kernel_size=(7, 7), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = Dropout(rate=0.2 )(model) model = Conv2D(filters=256, kernel_size=(15, 15), padding='SAME', activation='relu',name="conv256" )(model) model = Conv2D(filters=256, kernel_size=(15, 15), padding='SAME', activation='relu' )(model) model = Conv2D(filters=256, kernel_size=(15, 15), padding='SAME', activation='relu' )(model) model = BatchNormalization(momentum=0.15 )(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Dropout(rate=0.2 )(model) my_x=Flatten()(model) my_x=Dense(256,activation='relu',kernel_initializer='he_uniform',bias_initializer='zeros' )(my_x) my_x=Dropout(0.2 )(my_x) my_x=Dense(128,activation='relu',kernel_initializer='he_uniform',bias_initializer='zeros' )(my_x) my_x=Dropout(0.2 )(my_x) my_x=Dense(64,activation='relu',name='my' )(my_x) my_x=Dropout(0.2 )(my_x) my_x=Dense(32,activation='relu',name='my2' )(my_x) my_x=Dropout(0.2 )(my_x) preds=Dense(10,activation='softmax',kernel_initializer='he_uniform',bias_initializer='zeros',name='output' )(my_x) my_model=Model(inputs=inp,outputs=preds)
Digit Recognizer
7,960,918
def indicate_outliers(df): X_new = pd.DataFrame() X_new['Outlier'] =(df.Neighborhood == 'Edwards')&(df.SaleCondition == 'Partial') return X_new <categorify>
my_model.compile(optimizer=Adadelta() ,loss='categorical_crossentropy',metrics=['accuracy','mse'] )
Digit Recognizer
7,960,918
class CrossFoldEncoder: def __init__(self,encoder, **kwargs): self.encoder_ = encoder self.kwargs_ = kwargs self.cv_ = KFold(n_splits = 5) def fit_transform(self,X,y,cols): self.fitted_encoders_ = [] self.cols_ = cols X_encoded = [] for idx_encode, idx_train in self.cv_.split(X): fitted_encoder = self.encoder_(cols = cols, **self.kwargs_) fitted_encoder.fit(X.iloc[idx_encode, :], y.iloc[idx_encode]) X_encoded.append(fitted_encoder.transform(X.iloc[idx_train, :])[cols]) self.fitted_encoders_.append(fitted_encoder) X_encoded = pd.concat(X_encoded) X_encoded.columns = [name + '_encoded' for name in X_encoded.columns] return X_encoded def transform(self,X): X_encoded_list = [] for fitted_encoder in self.fitted_encoders_: X_encoded = fitted_encoder.transform(X) X_encoded_list.append(X_encoded[self.cols_]) X_encoded = reduce(lambda x,y : x.add(y,fill_value=0), X_encoded_list)/ len(X_encoded_list) X_encoded.columns = [name + '_encoded' for name in X_encoded.columns] return X_encoded<merge>
rlrp = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, min_delta=1E-30,verbose=1) history=my_model.fit(x=train_X,y=pd.get_dummies(train_y),validation_data=(val_X,pd.get_dummies(val_y)) ,epochs=100, batch_size=1024,callbacks=[rlrp])
Digit Recognizer
7,960,918
def create_features(df, df_test = None): X = df.copy() y = X.pop('SalePrice') mi_scores = make_mi_scores(X,y) if df_test is not None: X_test = df_test.copy() X_test.pop('SalePrice') X = pd.concat([X, X_test]) X = drop_uninformative(X, mi_scores) X = X.join(mathematical_transforms(X)) X = X.join(interactions(X)) X = X.join(counts(X)) X = X.join(break_down(X)) X = X.join(group_transforms(X)) X = X.join(cluster_labels(X, cluster_features, n_clusters = 20)) X = X.join(pca_inspired(X)) X = label_encode(X) if df_test is not None: X_test = X.loc[df_test.index, :] X.drop(df_test.index, inplace=True) encoder = CrossFoldEncoder(MEstimateEncoder, m=1) X = X.join(encoder.fit_transform(X, y, cols= ['MSSubClass'])) if df_test is not None: X_test = X_test.join(encoder.transform(X_test)) if df_test is not None: return X, X_test else: return X df_train, df_test = load_data() X_train = create_features(df_train) y_train = df_train.loc[:, 'SalePrice'] score_dataset(X_train, y_train )<prepare_x_and_y>
for layer in my_model.layers: print(layer.name,) if 'conv' not in layer.name: continue filters, biases = layer.get_weights() filters, biases = layer.get_weights() f_min, f_max = filters.min() , filters.max() filters =(filters - f_min)/(f_max - f_min) n_filters, ix = 6, 1 for i in range(n_filters): f = filters[:, :, :, i] for j in range(1): ax = plt.subplot(n_filters, 3, ix) ax.set_xticks([]) ax.set_yticks([]) plt.imshow(f[:, :, j], cmap='gray') ix += 1 plt.show()
Digit Recognizer
7,960,918
X_train = create_features(df_train) y_train = df_train.loc[:, 'SalePrice'] xgb_params = dict(max_depth = 6, learning_rate = 0.01, n_estimators = 1000, min_child_weight = 1, colsample_bytree = 0.7, subsample = 0.7, reg_alpha = 0.5, reg_lambda = 1, num_parallel_tree = 1) xgb = XGBRegressor(**xgb_params) score_dataset(X_train, y_train, xgb )<find_best_params>
for i in range(len(val_X)) : if np.argmax(my_model.predict(val_X[i].reshape(1,32,32,1)) ,axis=1)!=val_y.values[i]: (plt.imshow(val_X[i].reshape(32,32),)) plt.show() print("Label : ",val_y.values[i]) print("Prediction : ",np.argmax(my_model.predict(val_X[i].reshape(1,32,32,1)) ,axis=1))
Digit Recognizer
7,960,918
def objective(trial): xgb_params = dict( max_depth = trial.suggest_int('max_depth',2,10), learning_rate = trial.suggest_float('learning_rate',1e-4, 1e-1, log=True), n_estimators = trial.suggest_int('n_estimators',1000,8000), min_child_weight = trial.suggest_int('min_child_weight', 1,10), colsample_bytree = trial.suggest_float('colsample_bytree',0.2,1), subsample = trial.suggest_float('subsample',0.2,1), reg_alpha = trial.suggest_float('reg_alpha',1e-4, 1e2, log=True), reg_lambda = trial.suggest_float('reg_lambda', 1e-4, 1e2, log=True)) xgb = XGBRegressor(**xgb_params) return score_dataset(X_train, y_train, xgb) study = optuna.create_study(direction = 'minimize') study.optimize(objective, n_trials = 20) xgb_params = study.best_params <save_to_csv>
test_X=[] for i,img in enumerate(test_data.values): z=reshape32(img) test_X.append(z) test_X=np.array(test_X) test_X[test_X<50]=0 test_X=test_X/255
Digit Recognizer
7,960,918
<load_from_csv><EOS>
sol=np.argmax(my_model.predict(( test_X)) ,axis=1) df=pd.DataFrame(sol) df.index+=1 df.to_csv("/kaggle/working/sol_final.csv",index=True,header=["Label"],index_label=["ImageId"] )
Digit Recognizer
6,805,765
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
%matplotlib inline Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Activation, BatchNormalization )
Digit Recognizer
6,805,765
train['SalePrice'] = np.log1p(train['SalePrice'] )<sort_values>
config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config )
Digit Recognizer
6,805,765
corr["SalePrice"].sort_values(ascending = False )<drop_column>
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
Digit Recognizer
6,805,765
data = pd.concat([train, test], axis = 0, sort = False) data.drop(['Id', 'SalePrice'], axis = 1) data<sort_values>
X_train = train.loc[:, train.columns!='label'].values.astype('uint8') y_train = train['label'].values X_train = X_train.reshape(( X_train.shape[0],28,28))
Digit Recognizer
6,805,765
missing = data.isnull().sum().sort_values(ascending = False) missing<concatenate>
X_test = test.loc[:, test.columns!='label'].values.astype('uint8') X_test = X_test.reshape(( X_test.shape[0],28,28))
Digit Recognizer
6,805,765
missingg = missing*100/len(data) missing_data = pd.concat([missing, missingg], axis=1, keys=['missing', 'missing_%']) missing_data<drop_column>
X_train = X_train[:,:,:,None] X_test = X_test[:,:,:,None]
Digit Recognizer
6,805,765
data.drop(( missing_data[missing_data['missing'] > 5] ).index, axis = 1, inplace = True )<categorify>
batch_size = 32 num_samples = X_train.shape[0] num_classes = np.unique(y_train ).shape[0] num_epochs = 50 img_rows, img_cols = X_train[0,:,:,0].shape img_channels = 1 classes = np.unique(y_train )
Digit Recognizer
6,805,765
numeric = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'GarageArea', 'GarageCars'] for feature in numeric: data[feature] = data[feature].fillna(0) categorical = ['Exterior1st', 'Exterior2nd', 'SaleType', 'MSZoning', 'Electrical', 'KitchenQual'] for feature in categorical: data[feature] = data[feature].fillna(data[feature].mode() [0] )<data_type_conversions>
y_train = np_utils.to_categorical(y_train, num_classes)
Digit Recognizer
6,805,765
data['Functional'] = data['Functional'].fillna('Typ' )<drop_column>
X_train_norm = X_train.astype('float32') X_test_norm = X_test.astype('float32') X_train_norm /= 255 X_test_norm /= 255
Digit Recognizer
6,805,765
data.drop(['Utilities'], axis = 1, inplace = True )<sort_values>
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
numeric_feats = data.dtypes[data.dtypes != 'object'].index skewed_feats = data[numeric_feats].apply(lambda x: x.skew() ).sort_values(ascending = False) high_skew = skewed_feats[abs(skewed_feats)> 0.5] high_skew<feature_engineering>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1, factor=0.2) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10 )
Digit Recognizer
6,805,765
for feature in high_skew.index: data[feature] = np.log1p(data[feature] )<categorify>
history = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
data = pd.get_dummies(data) data<prepare_x_and_y>
! mkdir newer
Digit Recognizer
6,805,765
y_train = train["SalePrice"] x_train = data[:len(y_train)] x_test = data[len(y_train):]<compute_train_metric>
model.save('newer/simple.h5' )
Digit Recognizer
6,805,765
scorer = make_scorer(mean_squared_error, greater_is_better = False) def rmse_CV_train(model): kf = KFold(5, shuffle = True, random_state = 42 ).get_n_splits(x_train.values) rmse = np.sqrt(-cross_val_score(model, x_train, y_train, scoring = "neg_mean_squared_error", cv = kf)) return(rmse) def rmse_CV_test(model): kf = KFold(5, shuffle = True, random_state = 42 ).get_n_splits(train.values) rmse = np.sqrt(-cross_val_score(model, x_test, y_test,scoring = "neg_mean_squared_error", cv = kf)) return(rmse )<train_model>
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
model = XGB.XGBRegressor(colsample_bytree = 0.4603, gamma = 0.0468, learning_rate = 0.05, max_depth = 3, min_child_weight = 1.7817, n_estimators = 2200, reg_alpha = 0.4640, reg_lambda = 0.8571, subsample = 0.5213, random_state = 7, nthread = -1) model.fit(x_train, y_train )<predict_on_test>
history1 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
prediction = np.floor(np.expm1(model.predict(x_test)) )<create_dataframe>
model.save('newer/simple_batch.h5' )
Digit Recognizer
6,805,765
submission = pd.DataFrame({'Id': test.Id, 'SalePrice': prediction}) submission<save_to_csv>
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
submission.to_csv('submission.csv', index = False )<load_from_csv>
history2 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
if tuning or training: train_data = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv') train_data.fillna(train_data.mean() ,inplace=True) start_date=86 feature_columns = [col for col in train_data.columns.values if 'feature' in col] corr=abs(train_data[feature_columns].corr()) ordered_feature_columns=[feature_columns.pop(0)] for col in feature_columns: corr_max = corr[col][ordered_feature_columns].idxmax() corr_max_idx = ordered_feature_columns.index(corr_max) ordered_feature_columns.insert(corr_max_idx+1,col) f_mean = train_data[ordered_feature_columns].mean() f_mean.to_csv('f_mean.csv',index=False) with open("ordered_columns.txt", 'wb')as f: pickle.dump(( ordered_feature_columns), f) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] train_data = train_data[train_data['date']>=start_date] X = train_data[ordered_feature_columns].values y = np.stack([(train_data[c] > 0 ).astype('int')for c in resp_cols] ).T max_date = train_data['date'].max() split_date = int(0.7*(max_date-start_date)) train_test_split = train_data['date'][train_data['date']==split_date].index[0] del train_data X_train=X[:train_test_split,:] X_test=X[train_test_split:,:] y_train=y[:train_test_split,:] y_test=y[train_test_split:,:]<choose_model_class>
model.save('newer/32x64_64x128.h5' )
Digit Recognizer
6,805,765
tf.random.set_seed(SEED) def create_model(hp, num_columns, num_labels,encoder): inp = tf.keras.layers.Input(shape =(num_columns, 1)) x1 = encoder(inp) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Conv1D(filters=8, kernel_size=hp.Int('kernel_size',5,10,step=5), strides=1, activation='relu' )(x) x = tf.keras.layers.MaxPooling1D(pool_size=2 )(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Concatenate()([x,x1]) for i in range(hp.Int('num_layers',4,6)) : x = tf.keras.layers.Dense(hp.Int(f'num_units_{i}',256,512))(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(hp.Float(f'dropout_{i}',0,0.5))(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation('sigmoid' )(x) model = tf.keras.models.Model(inputs = inp, outputs = out) model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = hp.Float('lr',0.00001,0.1,default=0.001)) , loss = tf.keras.losses.BinaryCrossentropy(label_smoothing = 0.01), metrics = tf.keras.metrics.AUC(name = 'AUC'), ) return model def create_autoencoder(input_dim,output_dim): i = tf.keras.layers.Input(input_dim) encoded = tf.keras.layers.BatchNormalization()(i) encoded = tf.keras.layers.GaussianNoise(0.05 )(encoded) encoded = tf.keras.layers.Dense(64,activation='relu' )(encoded) decoded = tf.keras.layers.Dropout(0.2 )(encoded) decoded = tf.keras.layers.Dense(input_dim,name='decoded' )(decoded) x = tf.keras.layers.Dense(32,activation='relu' )(decoded) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Dropout(0.2 )(x) x = tf.keras.layers.Dense(output_dim,activation='sigmoid',name='label_output' )(x) encoder = Model(inputs=i,outputs=encoded) autoencoder = Model(inputs=i,outputs=[decoded,x]) autoencoder.compile(optimizer=Adam(0.001),loss={'decoded':'mse','label_output':'binary_crossentropy'}) return autoencoder, encoder<categorify>
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64,(5, 5), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
autoencoder, encoder = create_autoencoder(130,5) if training: autoencoder.fit(X,(X,y), epochs=1000, batch_size=4096, validation_split=0.1, callbacks=[EarlyStopping('val_loss',patience=10,restore_best_weights=True)]) encoder.save_weights('JS_CNN_encoder.hdf5') else: encoder.load_weights('/kaggle/input/jscnn/JS_CNN_encoder_seed_111.hdf5') encoder.trainable = False<train_model>
history3 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
if tuning: model_fn = lambda hp: create_model(hp,X_train.shape[-1],y_train.shape[-1],encoder) tuner = kt.tuners.bayesian.BayesianOptimization( hypermodel=model_fn, objective= kt.Objective('val_AUC', direction='max'), num_initial_points=4, max_trials=20) tuner.search(X_train,y_train,batch_size=4096,epochs=20, validation_data =(X_test, y_test), callbacks=[EarlyStopping('val_AUC', mode='max',patience=5)]) hp = tuner.get_best_hyperparameters(1)[0] pd.to_pickle(hp,'best_hp_cnn_day_86_encoder_seed_111.pkl' )<train_model>
model.save('newer/32x64x64.h5' )
Digit Recognizer
6,805,765
if training: hp = pd.read_pickle('best_hp_cnn_day_86_encoder_seed_111.pkl') model_fn = lambda hp: create_model(hp,X_train.shape[-1],y_train.shape[-1],encoder) model = model_fn(hp) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=100,batch_size=4096, callbacks=[EarlyStopping('val_AUC',mode='max',patience=10,restore_best_weights=True)]) model.save_weights('JS_CNN_day_86_encoder_seed_111.hdf5' )<load_from_csv>
model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
if not training or tuning: model_fn = lambda hp: create_model(hp,130,5,encoder) hp = pd.read_pickle('/kaggle/input/jscnn/best_hp_cnn_day_86_encoder_seed_111.pkl') model = model_fn(hp) model.load_weights('/kaggle/input/jscnn/JS_CNN_day_86_encoder_seed_111.hdf5') samples_mean = pd.read_csv('/kaggle/input/jscnn/f_mean.csv') env = janestreet.make_env() iter_test = env.iter_test() for(test_df, sample_prediction_df)in iter_test: weight = test_df.weight.iloc[0] with open("/kaggle/input/jscnn/ordered_columns.txt", 'rb')as f: ordered_cols = pickle.load(f) test_df = test_df[ordered_cols] X_test = test_df.values[0] if weight==0: sample_prediction_df.action = 0 else: for index, x in np.ndenumerate(X_test): idx=index[0] if np.isnan(x): X_test[idx] = samples_mean.iloc[idx] X=X_test.reshape(( 1,-1)) prediction = np.median(model(X, training=False ).numpy() [0]) sample_prediction_df.action = int(round(prediction)) env.predict(sample_prediction_df) <import_modules>
history4 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation from tensorflow.keras.models import Model, Sequential from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers.experimental.preprocessing import Normalization import tensorflow as tf import numpy as np import pandas as pd from sklearn.model_selection import GroupKFold import tensorflow_addons as tfa from tqdm import tqdm from random import choices import kerastuner as kt<import_modules>
model.save('newer/64x128.h5' )
Digit Recognizer
6,805,765
class PurgedGroupTimeSeriesSplit(_BaseKFold): @_deprecate_positional_args def __init__(self, n_splits=5, *, max_train_group_size=np.inf, max_test_group_size=np.inf, group_gap=None, verbose=False ): super().__init__(n_splits, shuffle=False, random_state=None) self.max_train_group_size = max_train_group_size self.group_gap = group_gap self.max_test_group_size = max_test_group_size self.verbose = verbose def split(self, X, y=None, groups=None): if groups is None: raise ValueError( "The 'groups' parameter should not be None") X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits group_gap = self.group_gap max_test_group_size = self.max_test_group_size max_train_group_size = self.max_train_group_size n_folds = n_splits + 1 group_dict = {} u, ind = np.unique(groups, return_index=True) unique_groups = u[np.argsort(ind)] n_samples = _num_samples(X) n_groups = _num_samples(unique_groups) for idx in np.arange(n_samples): if(groups[idx] in group_dict): group_dict[groups[idx]].append(idx) else: group_dict[groups[idx]] = [idx] if n_folds > n_groups: raise ValueError( ("Cannot have number of folds={0} greater than" " the number of groups={1}" ).format(n_folds, n_groups)) group_test_size = min(n_groups // n_folds, max_test_group_size) group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size) for group_test_start in group_test_starts: train_array = [] test_array = [] group_st = max(0, group_test_start - group_gap - max_train_group_size) for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]: train_array_tmp = group_dict[train_group_idx] train_array = np.sort(np.unique( np.concatenate(( train_array, train_array_tmp)) , axis=None), axis=None) train_end = train_array.size for test_group_idx in unique_groups[group_test_start: group_test_start + group_test_size]: test_array_tmp = group_dict[test_group_idx] test_array = np.sort(np.unique( np.concatenate(( test_array, test_array_tmp)) , axis=None), axis=None) test_array = test_array[group_gap:] if self.verbose > 0: pass yield [int(i)for i in train_array], [int(i)for i in test_array]<split>
model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
class CVTuner(kt.engine.tuner.Tuner): def run_trial(self, trial, X, y, splits, batch_size=32, epochs=1,callbacks=None): val_losses = [] for train_indices, test_indices in splits: X_train, X_test = [x[train_indices] for x in X], [x[test_indices] for x in X] y_train, y_test = [a[train_indices] for a in y], [a[test_indices] for a in y] if len(X_train)< 2: X_train = X_train[0] X_test = X_test[0] if len(y_train)< 2: y_train = y_train[0] y_test = y_test[0] model = self.hypermodel.build(trial.hyperparameters) hist = model.fit(X_train,y_train, validation_data=(X_test,y_test), epochs=epochs, batch_size=batch_size, callbacks=callbacks) val_losses.append([hist.history[k][-1] for k in hist.history]) val_losses = np.asarray(val_losses) self.oracle.update_trial(trial.trial_id, {k:np.mean(val_losses[:,i])for i,k in enumerate(hist.history.keys())}) self.save_model(trial.trial_id, model )<data_type_conversions>
history5 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
TRAINING = True USE_FINETUNE = True FOLDS = 5 SEED = 1111 tf.random.set_seed(SEED) np.random.seed(SEED) train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.query('date > 85' ).reset_index(drop = True) train = train.astype({c: np.float32 for c in train.select_dtypes(include='float64' ).columns}) train.fillna(train.mean() ,inplace=True) train = train.query('weight > 0' ).reset_index(drop = True) train['action'] =(( train['resp_1'] > 0)&(train['resp_2'] > 0)&(train['resp_3'] > 0)&(train['resp_4'] > 0)&(train['resp'] > 0)).astype('int') features = [c for c in train.columns if 'feature' in c] resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X = train[features].values y = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T f_mean = np.mean(train[features[1:]].values,axis=0 )<categorify>
model.save('newer/64x128_256x512.h5' )
Digit Recognizer
6,805,765
def create_autoencoder(input_dim,output_dim,noise=0.05): i = Input(input_dim) encoded = BatchNormalization()(i) encoded = GaussianNoise(noise )(encoded) encoded = Dense(150,activation='relu' )(encoded) encoded = BatchNormalization()(encoded) encoded = Dropout(0.1 )(encoded) encoded = Dense(80,activation='relu' )(encoded) encoded = BatchNormalization()(encoded) encoded = Dropout(0.1 )(encoded) encoded = Dense(25,activation='relu',name='encoder' )(encoded) encoded = BatchNormalization()(encoded) decoded = Dropout(0.1 )(encoded) decoded = Dense(80,activation='relu' )(decoded) decoded = BatchNormalization()(decoded) decoded = Dropout(0.1 )(decoded) decoded = Dense(150,activation='relu' )(decoded) decoded = BatchNormalization()(decoded) decoded = Dropout(0.1 )(decoded) decoded = Dense(input_dim,name='decoded' )(decoded) encoder = Model(inputs=i,outputs=encoded) autoencoder = Model(inputs=i,outputs=decoded) autoencoder.compile(optimizer=tfa.optimizers.RectifiedAdam(0.001),loss={'decoded':'mse'}) return autoencoder, encoder<categorify>
model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(BatchNormalization()) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Dense(128)) model.add(BatchNormalization()) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
6,805,765
def create_model(hp,input_dim,output_dim,encoder): inputs = Input(input_dim) x = encoder(inputs) x = Concatenate()([x,inputs]) x = BatchNormalization()(x) x = Dropout(hp.Float('init_dropout',0.0,0.5))(x) for i in range(hp.Int('num_layers',1,3)) : x = Dense(hp.Int('num_units_{i}',64,256))(x) x = BatchNormalization()(x) x = Lambda(tf.keras.activations.swish )(x) x = Dropout(hp.Float(f'dropout_{i}',0.0,0.5))(x) x = Dense(output_dim,activation='sigmoid' )(x) model = Model(inputs=inputs,outputs=x) model.compile(optimizer=tfa.optimizers.RectifiedAdam(hp.Float('lr',0.00001,0.1,default=0.001)) ,loss=BinaryCrossentropy(label_smoothing=hp.Float('label_smoothing',0.0,0.1)) ,metrics=[tf.keras.metrics.AUC(name = 'auc')]) return model<categorify>
history6 = model.fit( X_train_norm, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=0.1, shuffle=True, callbacks=[learning_rate_reduction, es] )
Digit Recognizer
6,805,765
autoencoder, encoder = create_autoencoder(X.shape[-1],y.shape[-1],noise=0.1) if TRAINING: autoencoder.fit(X,X, epochs=1000, batch_size=4096, validation_split=0.1, callbacks=[EarlyStopping('val_loss',patience=10,restore_best_weights=True)]) encoder.save_weights('./encoder.hdf5') else: encoder.load_weights('.. /input/ver-0-seed-1111/encoder.hdf5') encoder.trainable = False<feature_engineering>
model.save('newer/64x128_256x512_diff_fcnn.h5' )
Digit Recognizer
6,805,765
if not TRAINING: f = np.median models = models[-2:] env = janestreet.make_env() th = 0.5 for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<import_modules>
model = load_model('newer/64x128_256x512_diff_fcnn.h5' )
Digit Recognizer