kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
6,805,765 | warnings.filterwarnings("ignore")
VERSION = ''<define_variables> | pred = model.predict_classes(X_test_norm ) | Digit Recognizer |
6,805,765 | SEED = 420
N_ESTIMATORS = 250
DEVICE = torch.device("cpu" )<compute_test_metric> | sample_submission['Label'] = pred | Digit Recognizer |
6,805,765 | <load_pretrained><EOS> | sample_submission.to_csv("submission.csv", index=False ) | Digit Recognizer |
1,909,759 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import toimage
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator | Digit Recognizer |
1,909,759 | class Best_clf_cv_transformer(BaseEstimator, TransformerMixin):
def __init__(self, myparams={'name':'LSvc', 'C':1}, **other_params):
self.myparams = myparams
self.myinit(**other_params)
return
def myinit(self, **other_params):
self.cv = 5
if 'cv' in self.myparams:
self.cv= self.myparams['cv']
clf = None
name = self.myparams['name']
if name == 'Logit':
clf = LogisticRegression(random_state=0)
elif name == 'DT':
clf = DecisionTreeClassifier(random_state=0)
elif name == 'RidgClf':
clf = RidgeClassifier(random_state=0)
elif name == 'Prcpt':
clf = Perceptron(random_state=0)
elif name == 'PssAggClf':
clf = PassiveAggressiveClassifier(random_state=0)
elif name == 'Knn':
clf = KNeighborsClassifier(random_state=0)
elif name == 'RF':
clf = RandomForestClassifier(random_state=0)
elif name == 'NearCent':
clf = NearestCentroid(random_state=0)
elif name == 'MultNB':
clf = MultinomialNB(random_state=0)
elif name == 'BernNB':
clf = BernoulliNB(random_state=0)
elif name == 'Svc':
clf = SVC(probability=True, random_state=0)
elif name == 'LSvc':
clf = LinearSVC(random_state=0)
elif name == 'Xgb':
clf = xgb.XGBClassifier(random_state=0)
elif name == 'Catb' :
clf = CatBoostClassifier(verbose=False, random_state=0)
elif name == 'FCNN':
clf = None
else:
print('ERROR Best_clf_cv_transformer: invalid @param name
')
clf = None
self.isCV = True
if 'isCV' in self.myparams:
self.isCV = self.myparams['isCV']
self.n_estimators = 1
if 'n_estimators' in self.myparams:
self.n_estimators = self.myparams['n_estimators']
if 'params' in self.myparams:
clf.set_params(**self.myparams['params'])
if other_params:
clf.set_params(**other_params)
self.param_grid = None
if 'param_grid' in self.myparams:
self.param_grid = self.myparams['param_grid']
self.clf = clf
self.cv_score = 0
self.name = name
self._estimator_type='classifier'
return
def fit(self, X, Y, **FIT_PARAMS):
train_len = len(Y[pd.isnull(Y)==False])
X, Y = X[:train_len], Y[:train_len]
print('training', self.name, 'for X.shape =', X.shape)
n_jobs = -1
if self.name == 'FCNN':
lrScheduler = LRScheduler(
CyclicLR,
base_lr=0.0001,
max_lr=0.05,
step_every='batch'
)
self.clf = NeuralNetClassifier(
module=MyModule,
module__inputCount=X.shape[1],
module__outputCount=2,
module__hiddenLayerCounts=[15],
max_epochs=1000,
verbose=0,
iterator_train__shuffle=True,
callbacks=[('LRScheduler', lrScheduler),('EarlyStopping', EarlyStopping(patience=20)) ]
)
X = X.astype(np.float32)
Y = Y.astype(np.int64)
n_jobs = None
if self.isCV:
if self.param_grid:
gridSearchCV = GridSearchCV(
self.clf, self.param_grid, iid=False, cv=self.cv, scoring=CV_SCORERS,
refit='f1_score', n_jobs=n_jobs
)
gridSearchCV.fit(X, Y)
print(self.name, ": Best_clf_cv_transformer: Best parameter(CV score=%0.3f):" % gridSearchCV.best_score_)
print(gridSearchCV.best_params_)
self.clf = gridSearchCV.best_estimator_
self.cv_score = gridSearchCV.best_score_
if self.name == 'LSvc':
self.clf = CalibratedClassifierCV(self.clf)
else:
if self.name == 'LSvc':
self.clf = CalibratedClassifierCV(self.clf)
print(self.name, ': Best_clf_cv_transformer: starting CV =', self.cv)
if self.name not in {'fss'}:
cv_results = cross_validate(self.clf, X, Y, cv=int(self.cv))
else:
voting_clf = VotingClassifier(estimators=[(self.name, self.clf)])
cv_results = cross_validate(voting_clf, X, Y, cv=self.cv)
self.cv_score = np.mean(cv_results['test_score'])
print(self.name, ": cv_score: %0.3f" % self.cv_score)
else:
if self.name == 'LSvc':
self.clf = CalibratedClassifierCV(self.clf)
if self.n_estimators > 1:
self.clf = BaggingClassifier(base_estimator=self.clf, n_estimators=self.n_estimators)
self.clf.fit(X, Y)
print("Done Fitting", self.name)
return self
def get_cv_score(self):
return self.cv_score
def transform(self, X, Y=None, **FIT_PARAMS):
if self.name == 'FCNN':
X = X.astype(np.float32)
if not Y is None:
Y = Y.astype(np.int64)
return self.clf.transform(X, Y)
def predict(self, X, **FIT_PARAMS):
if self.name == 'FCNN':
X = X.astype(np.float32)
return self.clf.predict(X)
def predict_proba(self, X):
if self.name == 'FCNN':
X = X.astype(np.float32)
return self.clf.predict_proba(X)
def predict_log_proba(self, X):
if self.name == 'FCNN':
X = X.astype(np.float32)
return self.clf.predict_log_proba(X)
def score(self, X, Y, **FIT_PARAMS):
if self.name == 'FCNN':
X = X.astype(np.float32)
return self.clf.score(X, Y, **FIT_PARAMS)
def decision_function(self, X, **FIT_PARAMS):
if self.name == 'FCNN':
X = X.astype(np.float32)
return self.clf.decision_function(X)
def set_params(self, **params):
self.myparams = params['myparams']
params.pop('myparams')
self.myinit(**params)
return self
def get_params(self, deep=True):
params = {'myparams': self.myparams}
return params
def apply(self, X):
return self.clf.apply(X)
def decision_path(self, X):
return self.clf.decision_path(X)
def staged_decision_function(self, X):
return self.clf.staged_decision_function(X)
def staged_predict(self, X):
return self.clf.staged_predict(X)
def staged_predict_proba(self, X):
return self.clf.staged_predict_proba(X)
def staged_score(self, X):
return self.clf.staged_score(X )<define_variables> | df_train = pd.read_csv(".. /input/train.csv", encoding = 'ISO-8859-1')
df_subm = pd.read_csv(".. /input/test.csv", encoding = 'ISO-8859-1' ) | Digit Recognizer |
1,909,759 | def seed_all() :
random.seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
seed_all()<load_from_csv> | df_train.isnull().sum().sum() , df_subm.isnull().sum().sum() | Digit Recognizer |
1,909,759 | def load_preprocess_data(filename='.. /input/jane-street-market-prediction/train.csv', isTrainData=True):
dtype = None
if isTrainData:
dtype = {
'date' : 'int64',
'weight' : 'float64',
'resp' : 'float64',
'ts_id' : 'int64',
'feature_0' : 'float64'
}
else:
dtype = {
'date' : 'int64',
'weight' : 'float64',
'feature_0' : 'float64'
}
for i in range(1, 130):
k = 'feature_' + str(i)
dtype[k] = 'float32'
X = pd.read_csv(filename, dtype=dtype)
resp_cols = ['resp_1', 'resp_2', 'resp_3','resp_4', 'resp']
X = X.query('date > 85')
X = X[X['weight'] != 0].reset_index(drop = True)
y = np.stack([(X[c] > 0 ).astype('int')for c in resp_cols] ).T
f_columns = [c for c in X.columns if "feature" in c]
Weights = X['weight'].values.reshape(( -1,1))
if isTrainData:
X.drop(columns=['date', 'weight', 'resp', 'resp_1', 'resp_2', 'resp_3', 'resp_4', 'ts_id'], inplace=True)
else:
X.drop(columns=['date', 'weight'], inplace=True)
preprocess_pipe = Pipeline([
("imputer", SimpleImputer(missing_values=np.nan, strategy='mean')) ,
])
X = preprocess_pipe.fit_transform(X)
X = np.hstack(( X, Weights))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)
del X, y
gc.collect()
W_train = X_train[:, -1]
X_train = X_train[:, :-1]
W_test = X_test[:, -1]
X_test = X_test[:, :-1]
return X_train, X_test, y_train, y_test, W_train, W_test, preprocess_pipe<train_model> | X_train = df_train[df_train.columns[1:]]
y_train = df_train['label'] | Digit Recognizer |
1,909,759 | X_TRAIN, X_TEST, Y_TRAIN, Y_TEST, W_train, W_test, preprocess_pipe = load_preprocess_data()
gc.collect()
X_TRAIN.shape, Y_TRAIN.shape<normalization> | X_train, X_test, y_train, y_test = train_test_split(X_train, y_train ) | Digit Recognizer |
1,909,759 | def learning_rate_010_decay_power_09(current_iter):
base_learning_rate = 0.1
lr = base_learning_rate * np.power (.995, current_iter)
return lr if lr > 1e-2 else 1e-2
<init_hyperparams> | y_train = to_categorical(y_train)
y_test = to_categorical(y_test ) | Digit Recognizer |
1,909,759 | FIT_PARAMS= {
"early_stopping_rounds":30,
"eval_metric" : 'auc',
"eval_set" : [(X_TEST, Y_TEST[:,-1])],
'eval_names': ['valid'],
'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_010_decay_power_09)],
'verbose': 50,
'categorical_feature': 'auto'
}<init_hyperparams> | X_train = X_train/255
X_test = X_test/255 | Digit Recognizer |
1,909,759 |
<init_hyperparams> | model = Sequential() | Digit Recognizer |
1,909,759 | OPT_PARAMS_1 = {'n_estimators': N_ESTIMATORS, 'colsample_bytree': 0.668, 'min_child_samples': 150, 'min_child_weight': 1, 'num_leaves': 80, 'reg_alpha': 0, 'reg_lambda': 0.002, 'subsample': 0.87}
OPT_PARAMS_2 = {'n_estimators': N_ESTIMATORS, 'colsample_bytree': 0.668, 'min_child_samples': 190, 'min_child_weight': 1, 'num_leaves': 90, 'reg_alpha': 0, 'reg_lambda': 0.002, 'subsample': 0.87}<train_model> | model.add(Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(32,(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) | Digit Recognizer |
1,909,759 | def create_train_lgbm(X_train, y_train, component):
if component == 1:
opt_params = deepcopy(OPT_PARAMS_1)
else:
opt_params = deepcopy(OPT_PARAMS_2)
lgb_clf_1 = lgb.LGBMClassifier(**opt_params)
lgb_clf_1.fit(X_train, y_train, **FIT_PARAMS)
if lgb_clf_1.best_iteration_ != N_ESTIMATORS:
opt_params['n_estimators'] = lgb_clf_1.best_iteration_
lgb_clf_1 = lgb.LGBMClassifier(**opt_params)
lgb_clf_1.fit(X_train, y_train, **FIT_PARAMS)
X_temp = X_train[10].reshape(( 1, -1))
np.round(lgb_clf_1.predict(X_temp)).astype(int)
return lgb_clf_1<train_model> | model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
1,909,759 | def getLgbs() :
LGBS = []
for model_id in range(5):
y_train = Y_TRAIN[:, model_id]
lgbm_1 = create_train_lgbm(X_TRAIN, y_train, 1)
lgbm_2 = create_train_lgbm(X_TRAIN, y_train, 2)
LGBS.append(( lgbm_1, lgbm_2))
pickleSave(LGBS, 'lgbs.bin')
return LGBS<choose_model_class> | sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True ) | Digit Recognizer |
1,909,759 | def getSclfs() :
SCLFS = []
for model_id in range(5):
sclf = StackingClassifier(classifiers=LGBS[model_id], fit_base_estimators=False,
use_probas=True, average_probas=False,
meta_classifier=Best_clf_cv_transformer({ 'name': 'LSvc', 'params': {'penalty': 'l2', 'class_weight': 'balanced'}, 'param_grid': {'C' : [0.01, 0.05, 0.1, 1]} }))
sclf.name = 'sclf_' + str(model_id)
y_train = Y_TRAIN[:, model_id]
sclf.fit(X_TRAIN, y_train)
SCLFS.append(sclf)
pickleSave(SCLFS, 'sclfs.bin')
X_temp = X_TRAIN[10].reshape(( 1, -1))
np.round(SCLFS[0].predict(X_temp)).astype(int)
return SCLFS<load_pretrained> | model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'] ) | Digit Recognizer |
1,909,759 | LGBS = unpickle('.. /input/jane-lgbm-stackedlsvc/lgbs.bin')
SCLFS = unpickle('.. /input/jane-lgbm-stackedlsvc/sclfs.bin' )<predict_on_test> | datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
) | Digit Recognizer |
1,909,759 | def predict(test_df, isRetProb=False):
test_df.drop(columns=['weight', 'date'], inplace=True)
test_df.reset_index(drop=True, inplace=True)
X_test = preprocess_pipe.transform(test_df ).reshape(( -1, 130))
y_probs = []
for sclf in SCLFS:
y_p = sclf.predict_proba(X_test ).reshape(( -1, 2)) [:, 1].reshape(( -1, 1))
y_probs.append(y_p)
y_probs = np.hstack(y_probs)
pred_pr = np.median(y_probs, axis=1)
y_pred =(pred_pr >= 0.5 ).astype(int)
if isRetProb:
return y_pred, pred_pr
else:
return y_pred<define_variables> | datagen.fit(X_train ) | Digit Recognizer |
1,909,759 | env = janestreet.make_env()
env_iter = env.iter_test()<predict_on_test> | model.fit_generator(datagen.flow(X_train, y_train, batch_size=128),
steps_per_epoch=int(len(X_train)/ 128), epochs=30 ) | Digit Recognizer |
1,909,759 | for test_df, pred_df in env_iter:
if test_df["weight"].item() > 0:
predictions = predict(test_df)
pred_df.action = predictions
else:
pred_df.action = 0
env.predict(pred_df )<train_model> | test_data = df_subm.values | Digit Recognizer |
1,909,759 | print('Done !' )<load_from_csv> | test_data = test_data.reshape(test_data.shape[0],28,28,1 ) | Digit Recognizer |
1,909,759 | if tuning or training:
train_data = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv')
train_data.fillna(train_data.mean() ,inplace=True)
metadata = pd.read_csv('/kaggle/input/jane-street-market-prediction/features.csv')
metadata.drop(['feature'],axis=1,inplace=True)
def replace_bool(tf):
if tf:
return 1
else:
return 0
metadata = metadata.applymap(replace_bool)
metadata_norm = metadata/metadata.sum()
metadata_norm = metadata_norm.applymap(np.sqrt)
features_transform = metadata_norm.values
start_date=86
feature_columns = [col for col in train_data.columns.values if 'feature' in col]
X_m = np.matmul(train_data[train_data['date']>=start_date][feature_columns].values,features_transform)
corr=abs(train_data[feature_columns].corr())
ordered_feature_columns=[feature_columns.pop(0)]
for col in feature_columns:
corr_max = corr[col][ordered_feature_columns].idxmax()
corr_max_idx = ordered_feature_columns.index(corr_max)
ordered_feature_columns.insert(corr_max_idx+1,col)
f_mean = train_data[ordered_feature_columns].mean()
f_mean.to_csv('f_mean.csv',index=False)
with open("ordered_columns.txt", 'wb')as f:
pickle.dump(( ordered_feature_columns), f)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
train_data = train_data[train_data['date']>=start_date]
X = np.concatenate(( train_data[ordered_feature_columns].values,X_m),axis=1)
y = np.stack([(train_data[c] > 0 ).astype('int')for c in resp_cols] ).T
max_date = train_data['date'].max()
split_date = int(0.7*(max_date-start_date))
train_test_split = train_data['date'][train_data['date']==split_date].index[0]
del train_data
X_train=X[:train_test_split,:]
X_test=X[train_test_split:,:]
y_train=y[:train_test_split,:]
y_test=y[train_test_split:,:]
<choose_model_class> | test_data = test_data/255 | Digit Recognizer |
1,909,759 | tf.random.set_seed(42)
SEED=42
def create_model(hp, num_columns, num_labels):
inp = tf.keras.layers.Input(shape =(num_columns, 1))
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Conv1D(filters=8,
kernel_size=hp.Int('kernel_size',5,10,step=5),
strides=1,
activation='relu' )(x)
x = tf.keras.layers.MaxPooling1D(pool_size=2 )(x)
x = tf.keras.layers.Flatten()(x)
for i in range(hp.Int('num_layers',12,16)) :
x = tf.keras.layers.Dense(hp.Int(f'num_units_{i}',32,64))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(tf.keras.activations.swish )(x)
x = tf.keras.layers.Dropout(hp.Float(f'dropout_{i}',0,0.5))(x)
x = tf.keras.layers.Dense(num_labels )(x)
out = tf.keras.layers.Activation('sigmoid' )(x)
model = tf.keras.models.Model(inputs = inp, outputs = out)
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = hp.Float('lr',0.00001,0.1,default=0.001)) ,
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing = 0.01),
metrics = tf.keras.metrics.AUC(name = 'AUC'),
)
return model<train_model> | predictions = model.predict(test_data ) | Digit Recognizer |
1,909,759 | if tuning:
model_fn = lambda hp: create_model(hp,X_train.shape[-1],y_train.shape[-1])
tuner = kt.tuners.bayesian.BayesianOptimization(
hypermodel=model_fn,
objective= kt.Objective('val_AUC', direction='max'),
num_initial_points=4,
max_trials=20)
tuner.search(X_train,y_train,batch_size=4096,epochs=20, validation_data =(X_test, y_test), callbacks=[EarlyStopping('val_AUC', mode='max',patience=5)])
hp = tuner.get_best_hyperparameters(1)[0]
pd.to_pickle(hp,'best_hp_cnn_day_86_metadata_deep.pkl' )<train_model> | predictions = np.argmax(predictions, axis=1, out=None ) | Digit Recognizer |
1,909,759 | if training:
hp = pd.read_pickle('best_hp_cnn_day_86_metadata_deep.pkl')
model_fn = lambda hp: create_model(hp,X_train.shape[-1],y_train.shape[-1])
model = model_fn(hp)
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=100,batch_size=4096,
callbacks=[EarlyStopping('val_AUC',mode='max',patience=10,restore_best_weights=True)])
model.save_weights('JS_CNN_day_86_metadata_deep.hdf5' )<load_from_csv> | with open("resultCNNwithPrepros.csv", "wb")as f:
f.write(b'ImageId,Label
')
np.savetxt(f, np.hstack([(np.array(range(28000)) +1 ).reshape(-1,1), predictions.astype(int ).reshape(-1,1)]), fmt='%i', delimiter="," ) | Digit Recognizer |
4,940,763 | if not training or tuning:
model_fn = lambda hp: create_model(hp,159,5)
hp = pd.read_pickle('/kaggle/input/jscnn/best_hp_cnn_day_86.pkl')
model = model_fn(hp)
model.load_weights('/kaggle/input/jscnn/JS_CNN_day_86.hdf5')
samples_mean = pd.read_csv('/kaggle/input/jscnn/f_mean.csv')
features_transform = np.load('/kaggle/input/jscnn/features_transform _130.npy')
env = janestreet.make_env()
iter_test = env.iter_test()
for(test_df, sample_prediction_df)in iter_test:
weight = test_df.weight.iloc[0]
with open("/kaggle/input/jscnn/ordered_columns.txt", 'rb')as f:
ordered_cols = pickle.load(f)
test_df = test_df[ordered_cols]
X_test = test_df.values[0]
if weight==0:
sample_prediction_df.action = 0
else:
for index, x in np.ndenumerate(X_test):
idx=index[0]
if np.isnan(x):
X_test[idx] = samples_mean.iloc[idx]
X_test=X_test.reshape(( 1,-1))
X_metadata = np.matmul(X_test,features_transform)
X = np.concatenate(( X_test,X_metadata),axis=1)
prediction = np.median(model(X, training=False ).numpy() [0])
sample_prediction_df.action = int(round(prediction))
env.predict(sample_prediction_df)
<set_options> | %matplotlib inline
| Digit Recognizer |
4,940,763 | plt.style.use('fivethirtyeight')
y_ = Fore.YELLOW
r_ = Fore.RED
g_ = Fore.GREEN
b_ = Fore.BLUE
m_ = Fore.MAGENTA
c_ = Fore.CYAN
sr_ = Style.RESET_ALL
warnings.filterwarnings('ignore')
<load_from_csv> | base = Path('.. /input' ) | Digit Recognizer |
4,940,763 | folder_path = '.. /input/jane-street-market-prediction/'
sample = pd.read_csv(folder_path + 'example_sample_submission.csv')
test_data = pd.read_csv(folder_path + 'example_test.csv' )<set_options> | data_df = pd.read_csv(base/'train.csv')
data_df.head() | Digit Recognizer |
4,940,763 | def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=42 )<load_from_csv> | trn_df = data_df.drop(val_df.index)
trn_df.shape | Digit Recognizer |
4,940,763 | features = [f'feature_{i}' for i in range(130)]
config = {
"epochs":100,
"train_batch_size":1024,
"valid_batch_size":1024,
"test_batch_size":64,
"nfolds":5,
"learning_rate":0.0005,
'encoder_input':len(features),
"input_size1":len(features),
"input_size2":128,
'output_size':5,
}
data_path = '.. /input/jsmp-pytorch-bottelneck-model-train'
train_data_mean = pd.read_csv(f"{data_path}/train_data_mean.csv" ).to_numpy()<normalization> | trn_x, trn_y = trn_df.loc[:, 'pixel0':'pixel783'], trn_df['label']
val_x, val_y = val_df.loc[:, 'pixel0':'pixel783'], val_df['label'] | Digit Recognizer |
4,940,763 | class GaussianNoise(nn.Module):
def __init__(self,device,sigma=0.1, is_relative_detach=True):
super().__init__()
self.sigma = sigma
self.is_relative_detach = is_relative_detach
self.noise = torch.tensor(0,dtype=torch.float ).to(device)
def forward(self, x):
if self.training and self.sigma != 0:
scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
sampled_noise = self.noise.repeat(*x.size() ).normal_() * scale
x = x + sampled_noise
return x
class Autoencoder(nn.Module):
def __init__(self,input_size,gaussian_noise,noise_level= 0.1):
super(Autoencoder,self ).__init__()
self.noise_level = noise_level
self.gaussian_noise = gaussian_noise
self.layer1 = self.batch_linear(input_size,768,nn.ReLU)
self.layer2 = self.batch_linear(768,768,nn.ReLU)
self.layer3 = self.batch_linear(768,128,nn.ReLU)
self.layer4 = self.batch_linear(128,768,nn.ReLU)
self.layer5 = self.batch_linear(768,768,nn.ReLU)
self.layer6 = self.batch_linear(768,input_size)
def swap_noise(self,x):
batch_size = x.shape[0]
num_columns = x.shape[1]
random_rows = torch.randint(low = 0,high = batch_size,size=(batch_size,))
t = x[random_rows]
random_swap = torch.rand(num_columns)< self.noise_level
x[:,random_swap] = t[:,random_swap]
return x
def batch_linear(self,inp,out,activation=None):
if activation:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Linear(inp,out),activation())
else:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Linear(inp,out))
def forward(self,x):
x = self.gaussian_noise(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
return x
def get_encoder(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x<choose_model_class> | def reshape(dt_x, dt_y):
dt_x = np.array(dt_x, dtype = np.uint8 ).reshape(-1,28,28)
dt_x = np.stack(( dt_x,)*3, axis=-1)
dt_y = np.array(dt_y)
return dt_x, dt_y | Digit Recognizer |
4,940,763 | class Model(nn.Module):
def __init__(self,input_size1,input_size2,output_size):
super(Model,self ).__init__()
self.layer1 = self.batch_linear_drop(input_size1,256,0.3,activation=nn.ELU)
self.layer2 = self.batch_linear(256,128,activation= nn.ELU)
self.layer3 = self.batch_linear_drop(input_size1+128,256,0.1,nn.ReLU)
self.layer4 = self.batch_linear_drop(input_size1+384,256,0.1,nn.ELU)
self.layer5 = self.batch_linear(256,128,nn.ReLU)
self.layer6 = self.batch_linear_drop(384,256,0.1,nn.ELU)
self.layer7 = self.batch_linear(256,128,nn.ReLU)
self.layer8 = self.batch_linear_drop(512,256,0.1,nn.ELU)
self.layer9 = self.batch_linear(256,128,nn.ReLU)
self.layer10 = self.batch_linear_drop(384,256,0.1,nn.ELU)
self.layer11 = self.batch_linear(256,128,nn.ReLU)
self.layer12 = self.batch_linear(768,256,nn.SELU)
self.layer13 = self.batch_linear(256,128,nn.SELU)
self.layer14 = nn.Sequential(nn.BatchNorm1d(128),nn.Linear(128,output_size))
def batch_linear_drop(self,inp,out,drop,activation=None):
if activation:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Dropout(drop),nn.Linear(inp,out),activation())
else:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Dropout(drop),nn.Linear(inp,out))
def batch_linear(self,inp,out,activation=None):
if activation:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Linear(inp,out),activation())
else:
return nn.Sequential(nn.BatchNorm1d(inp),nn.Linear(inp,out))
def forward(self,input1,input2):
x1 = self.layer1(input1)
x1 = self.layer2(x1)
x2 = torch.cat([input1,x1],1)
x2 = self.layer3(x2)
x3 = torch.cat([input1,x1,x2],1)
x3 = self.layer4(x3)
x3 = self.layer5(x3)
x4 = torch.cat([x2,x3],1)
x4 = self.layer6(x4)
x4 = self.layer7(x4)
x5 = torch.cat([x2,x3,x4],1)
x5 = self.layer8(x5)
x5 = self.layer9(x5)
x6 = torch.cat([x3,x4,x5],1)
x6 = self.layer10(x6)
x6 = self.layer11(x6)
x7 = torch.cat([x1,x2,x3,x5,x6],1)
x7 = self.layer12(x7)
x7 = self.layer13(x7)
x7 = self.layer14(x7)
return x7
<categorify> | trn_x, trn_y = reshape(trn_x, trn_y)
val_x, val_y = reshape(val_x, val_y ) | Digit Recognizer |
4,940,763 | data_path = '.. /input/jsmp-pytorch-bottelneck-model-train'
models = list()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for i in range(config['nfolds']):
model = Model(config['input_size1'],config['input_size2'],config['output_size'])
model.load_state_dict(torch.load(f"{data_path}/model{i}.bin",map_location=device))
model.to(device)
model.eval()
models.append(model)
gaussian_noise = GaussianNoise(device)
encoder = Autoencoder(config['input_size1'],gaussian_noise)
encoder.load_state_dict(torch.load(f'{data_path}/encoder.bin',map_location=device))
encoder.to(device)
encoder.eval() ;<categorify> | train=Path('.. /working/data/train')
save(train, trn_x, trn_y)
valid = Path('.. /working/data/valid')
save(valid, val_x, val_y ) | Digit Recognizer |
4,940,763 | def inference(test):
all_prediction = np.zeros(( test.shape[0],5))
inputs = torch.tensor(test,dtype=torch.float)
for model in models:
inputs = inputs.to(device,dtype=torch.float)
encoder_inp = encoder.get_encoder(inputs)
outputs = model(inputs,encoder_inp)
all_prediction += outputs.sigmoid().detach().cpu().numpy()
return all_prediction/len(models )<load_from_csv> | path = Path('.. /working/data/')
data =(ImageList.from_folder(path)
.split_by_folder(train='train', valid='valid')
.label_from_folder()
.transform(get_transforms(do_flip=False), size=28)
.databunch(bs=256 ).normalize(imagenet_stats)) | Digit Recognizer |
4,940,763 | test_data = pd.read_csv(folder_path + 'example_test.csv')
test_data.fillna(0,inplace=True)
test_data = test_data[features].to_numpy()
predictions = inference(test_data)
predictions = predictions.mean(axis=1)
sns.distplot(predictions);<split> | learn = cnn_learner(data, models.resnet34, loss_func=nn.CrossEntropyLoss() , metrics=accuracy ) | Digit Recognizer |
4,940,763 | env = janestreet.make_env()
iter_test = env.iter_test()<predict_on_test> | learn.fit_one_cycle(3, 1e-2 ) | Digit Recognizer |
4,940,763 | %%time
all_predictions = list()
for(test_df, sample_prediction_df)in iter_test:
if test_df['weight'].item() != 0:
test_df.fillna(0,inplace=True)
predictions = inference(test_df[features].to_numpy())
prediction = np.mean(predictions)
all_predictions.append(prediction)
sample_prediction_df.action = np.where(prediction >= 0.5, 1, 0 ).astype(int)
else:
sample_prediction_df.action = 0
env.predict(sample_prediction_df )<load_from_csv> | learn.save('stage1' ) | Digit Recognizer |
4,940,763 | submission = pd.read_csv('./submission.csv')
submission.head()<install_modules> | learn.unfreeze()
learn.lr_find()
learn.recorder.plot() | Digit Recognizer |
4,940,763 | !pip install -q git+https://github.com/mljar/mljar-supervised.git@dev<import_modules> | learn.fit_one_cycle(15, slice(5e-5)) | Digit Recognizer |
4,940,763 | import pandas as pd
from supervised.automl import AutoML<load_from_csv> | learn.save('stage2' ) | Digit Recognizer |
4,940,763 | train = pd.read_csv(".. /input/bnp-paribas-cardif-claims-management/train.csv.zip")
test = pd.read_csv(".. /input/bnp-paribas-cardif-claims-management/test.csv.zip")
sub = pd.read_csv(".. /input/bnp-paribas-cardif-claims-management/sample_submission.csv.zip")
x_cols = [f for f in train.columns if "v" in f]<train_model> | learn.unfreeze()
learn.lr_find()
learn.recorder.plot() | Digit Recognizer |
4,940,763 | automl = AutoML(
total_time_limit=8*3600,
optuna_time_budget=1800,
mode="Optuna",
)
automl.fit(train[x_cols], train["target"] )<save_to_csv> | learn.fit_one_cycle(5, 5e-5 ) | Digit Recognizer |
4,940,763 | pred = automl.predict_proba(test)
sub["PredictedProb"] = pred[:, 1]
sub.to_csv("./1_submission.csv", index=False )<import_modules> | def create_tst(path:Path, test):
path.mkdir(parents=True, exist_ok=True)
for i in range(len(test)) :
matplotlib.image.imsave(str(path/(str(i)+ '.jpeg')) , test[i] ) | Digit Recognizer |
4,940,763 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler<load_from_csv> | test_df = pd.read_csv(base/'test.csv')
test_df = np.array(test_df, dtype=np.uint8 ).reshape(-1,28,28)
test_df = np.stack(( test_df,)*3, axis=-1)
test_df.shape | Digit Recognizer |
4,940,763 | data = pd.read_csv('.. /input/ghouls-goblins-and-ghosts-boo/train.csv.zip')
data<load_from_csv> | tst_path = Path('.. /working/test')
create_tst(tst_path, test_df ) | Digit Recognizer |
4,940,763 | validate_data = pd.read_csv('.. /input/ghouls-goblins-and-ghosts-boo/test.csv.zip')
validate_data<define_variables> | preds = []
ImageId = []
for i in range(len(test_df)) :
img = open_image(tst_path/str(str(i)+'.jpeg'))
pred_cls, pred_idx, pred_img = learn.predict(img)
preds.append(int(pred_idx))
ImageId.append(i+1 ) | Digit Recognizer |
4,940,763 | validate_data_ids = validate_data['id']<count_missing_values> | submission = pd.DataFrame({'ImageId':ImageId, 'Label':preds} ) | Digit Recognizer |
4,940,763 | data.isnull().any()<data_type_conversions> | submission.to_csv('submission.csv',index=False ) | Digit Recognizer |
4,940,763 | <compute_test_metric><EOS> | shutil.rmtree(tst_path)
path_val = Path('.. /working/data/valid')
shutil.rmtree(path_val)
path_trn = Path('.. /working/data/train')
shutil.rmtree(path_trn ) | Digit Recognizer |
800,022 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric> | print(os.listdir(".. /input"))
| Digit Recognizer |
800,022 | data, validate_data<prepare_x_and_y> | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" ) | Digit Recognizer |
800,022 | train_set_x, train_set_y = data.drop('type', 1), data['type']<choose_model_class> | Y_train = train["label"]
X_train = train.drop(labels="label",axis=1)
Y_train.value_counts() | Digit Recognizer |
800,022 | classifier = GridSearchCV(
KNeighborsClassifier() ,
param_grid={
'n_neighbors': np.arange(1, 100),
'p': np.arange(1, 10)
},
scoring='accuracy',
cv=3
)<train_model> | X_train = X_train/255.0
test = test/255.0 | Digit Recognizer |
800,022 | classifier.fit(train_set_x, train_set_y )<find_best_params> | X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1 ) | Digit Recognizer |
800,022 | scores = classifier.cv_results_['mean_test_score']
scores, scores.mean() , scores.max()<find_best_params> | Y_train = to_categorical(Y_train,num_classes=10 ) | Digit Recognizer |
800,022 | classifier.best_params_<predict_on_test> | X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size = 0.05 ) | Digit Recognizer |
800,022 | np.mean(classifier.predict(train_set_x)== train_set_y )<predict_on_test> | def conv_layer(x,concat_axis,nb_filter,dropout_rate=None,weight_decay=1E-4):
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu' )(x)
x = Conv2D(nb_filter,(3,3),padding='same',kernel_regularizer=l2(weight_decay),use_bias=False )(x)
if dropout_rate:
x = Dropout(dropout_rate )(x)
return x
def transition_layer(x,concat_axis,nb_filter,dropout_rate=None,weight_decay=1E-4):
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu' )(x)
x = Conv2D(nb_filter,(1,1),padding='same',kernel_regularizer=l2(weight_decay),use_bias=False )(x)
if dropout_rate:
x = Dropout(dropout_rate )(x)
x = AveragePooling2D(( 2,2),strides=(2,2))(x)
return x
def denseblock(x,concat_axis,nb_filter,nb_layers,growth_rate,dropout_rate=None,weight_decay=1E-4):
list_features = [x]
for i in range(nb_layers):
x = conv_layer(x,concat_axis,growth_rate,dropout_rate=None,weight_decay=1E-4)
list_features.append(x)
x = Concatenate(axis=concat_axis )(list_features)
nb_filter += growth_rate
return x,nb_filter
def Densenet(nb_classes,img_dim,depth,nb_dense_block,nb_filter,growth_rate,
dropout_rate=None,weight_decay=1E-4):
if K.image_dim_ordering() == "th":
concat_axis = 1
elif K.image_dim_ordering() == "tf":
concat_axis = -1
model_input = Input(shape=img_dim)
assert(depth-4)%3 == 0 , "Depth must be 4*N +3"
nb_layers = int(( depth-4)/ 3)
x = Conv2D(nb_filter,(3,3),padding='same',use_bias=False,
kernel_regularizer=l2(weight_decay))(model_input)
for block_id in range(nb_dense_block-1):
x,nb_filter = denseblock(x,concat_axis,nb_filter,nb_layers,growth_rate,
dropout_rate=None,weight_decay=1E-4)
x = transition_layer(x,concat_axis,nb_filter,dropout_rate=None,weight_decay=1E-4)
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu' )(x)
x = GlobalAveragePooling2D(data_format=K.image_data_format() )(x)
x = Dense(nb_classes,activation='softmax',kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay))(x)
densenet = Model(inputs=[model_input], outputs=[x], name="DenseNet")
return densenet
| Digit Recognizer |
800,022 | submission = classifier.predict(validate_data )<save_to_csv> | model = Densenet(nb_classes=10,
img_dim=(28,28,1),
depth = 34,
nb_dense_block = 5,
growth_rate=12,
nb_filter=32,
dropout_rate=0.2,
weight_decay=1E-4)
model.summary()
| Digit Recognizer |
800,022 | pd.DataFrame({'id': validate_data_ids, 'type': submission} ).to_csv('submission.csv', index=False )<load_pretrained> | model_filepath = 'model.h5'
batch_size=64
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
lr_reduce = ReduceLROnPlateau(monitor='val_acc', factor=0.1, epsilon=1e-5, patience=2, verbose=1)
msave = ModelCheckpoint(model_filepath, save_best_only=True ) | Digit Recognizer |
800,022 | warnings.filterwarnings('ignore');
zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip' ).extractall()
zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip' ).extractall()
%matplotlib inline
<train_model> | model.compile(loss='categorical_crossentropy',
optimizer = Adamax() ,
metrics=['accuracy'])
model.fit(X_train ,Y_train,
batch_size = 64,
validation_data =(X_test,Y_test),
epochs = 20,
callbacks=[lr_reduce,msave,annealer],
verbose = 1 ) | Digit Recognizer |
800,022 | def N_net(train, test, target):
hidden_layer_sizes=(100,)
activation = 'relu'
solver = 'adam'
batch_size = 'auto'
alpha = 0.0001
random_state = 0
max_iter = 10000
early_stopping = True
clf = MLPClassifier(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
batch_size=batch_size,
alpha=alpha,
random_state=random_state,
max_iter=max_iter,
)
clf.fit(train, target)
SAVE_TRAINED_DATA_PATH = 'train1.learn'
joblib.dump(clf, SAVE_TRAINED_DATA_PATH)
clf1 = joblib.load(SAVE_TRAINED_DATA_PATH)
predict_no = clf1.predict(test)
predict = np.round(np.round(predict_no, decimals=1))
return predict<train_model> | results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" ) | Digit Recognizer |
800,022 | <split><EOS> | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False ) | Digit Recognizer |
1,636,227 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_on_grid> | import numpy as np
import pandas as pd
import seaborn as sns
from seaborn import countplot
import matplotlib.pyplot as plt
from keras import optimizers
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation, BatchNormalization
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix | Digit Recognizer |
1,636,227 | def SVM(train, test, target):
clf_result=svm.SVC(kernel='rbf', gamma=1/2 , C=1.0,class_weight='balanced', random_state=0)
clf_result.fit(train, target)
predict= np.array(clf_result.predict(test))
return predict<categorify> | TRAIN_PATH = '.. /input/train.csv'
TEST_PATH = '.. /input/test.csv'
SUBMISSION_NAME = 'submission.csv'
BATCH_SIZE = 64
EPOCHS = 45
LEARNING_RATE = 0.001
HEIGHT = 28
WIDTH = 28
CANAL = 1
N_CLASSES = 10 | Digit Recognizer |
1,636,227 | def LogRes(ghost,ghoul,goblin, test, target,HowDo):
vsnp= np.empty(( 529,6),dtype="float64")
submission=np.empty(( 529),dtype="int")
ghost0=np.zeros(len(ghost)) ;ghost1=np.ones(len(ghost))
ghoul0=np.zeros(len(ghoul)) ;ghoul1=np.ones(len(ghoul))
goblin0=np.zeros(len(goblin)) ;goblin1=np.ones(len(goblin))
vs1 = ghost.append(ghoul, ignore_index=True);vs1t = np.append(ghost0,ghoul1)
vs2 = ghoul.append(goblin, ignore_index=True);vs2t = np.append(ghoul0,goblin1)
vs3 = goblin.append(ghost, ignore_index=True);vs3t = np.append(goblin0,ghost1)
if HowDo == "log":
model1 = LogisticRegression() ;model1.fit(vs1,vs1t);vsnp[:,0:2]=model1.predict_proba(test)
model2 = LogisticRegression() ;model2.fit(vs2,vs2t);vsnp[:,2:4]=model2.predict_proba(test)
model3 = LogisticRegression() ;model3.fit(vs3,vs3t);vsnp[:,4:6]=model3.predict_proba(test)
if HowDo == "net":
vsnp[:,0]=1-N_netlog(vs1, test, vs1t);vsnp[:,1]=1-vsnp[:,0]
vsnp[:,2]=1-N_netlog(vs2, test, vs2t);vsnp[:,3]=1-vsnp[:,2]
vsnp[:,4]=1-N_netlog(vs3, test, vs3t);vsnp[:,5]=1-vsnp[:,4]
for n in range(len(vsnp)) :
if np.argmax(vsnp[n,:])==0:
submission[n]=0
if np.argmax(vsnp[n,:])==1:
submission[n]=2
if np.argmax(vsnp[n,:])==2:
submission[n]=2
if np.argmax(vsnp[n,:])==3:
submission[n]=1
if np.argmax(vsnp[n,:])==4:
submission[n]=1
if np.argmax(vsnp[n,:])==5:
submission[n]=0
print(vsnp)
return submission<categorify> | train = pd.read_csv(TRAIN_PATH)
test = pd.read_csv(TEST_PATH)
labels = train['label']
train = train.drop(['label'], axis=1 ) | Digit Recognizer |
1,636,227 | def LogRes2(ghost,ghoul,goblin, test, target,HowDo):
vsnp=np.empty(( 529,3),dtype="float64")
submission=np.empty(( 529),dtype="int")
ghost0=np.zeros(len(ghost)) ;ghost1=np.ones(len(ghost))
ghoul0=np.zeros(len(ghoul)) ;ghoul1=np.ones(len(ghoul))
goblin0=np.zeros(len(goblin)) ;goblin1=np.ones(len(goblin))
vs1 = ghost.append(ghoul, ignore_index=True)
vs1 = vs1.append(goblin, ignore_index=True)
vs1t = np.append(ghost0,ghoul1)
vs1t = np.append(vs1t,goblin1)
vs2 = ghoul.append(goblin, ignore_index=True)
vs2 = vs2.append(ghost, ignore_index=True)
vs2t = np.append(ghoul0,goblin1)
vs2t = np.append(vs2t,ghost1)
vs3 = goblin.append(ghost, ignore_index=True)
vs3 = vs3.append(ghoul, ignore_index=True)
vs3t = np.append(goblin0,ghost1)
vs3t = np.append(vs3t,ghoul1)
if HowDo == "log":
model1 = LogisticRegression() ;model1.fit(vs1,vs1t);vsnp[:,0]=model1.predict_proba(test)[:,0]
model2 = LogisticRegression() ;model2.fit(vs2,vs2t);vsnp[:,1]=model2.predict_proba(test)[:,0]
model3 = LogisticRegression() ;model3.fit(vs3,vs3t);vsnp[:,2]=model3.predict_proba(test)[:,0]
if HowDo == "net":
vsnp[:,0]=1-N_netlog(vs1, test, vs1t)
vsnp[:,1]=1-N_netlog(vs2, test, vs2t)
vsnp[:,2]=1-N_netlog(vs3, test, vs3t)
for n in range(len(vsnp)) :
if np.argmax(vsnp[n,:])==0:
submission[n]=0
if np.argmax(vsnp[n,:])==1:
submission[n]=2
if np.argmax(vsnp[n,:])==2:
submission[n]=1
print(vsnp)
return submission<create_dataframe> | labels.value_counts() | Digit Recognizer |
1,636,227 | def syuunou(vote, ID):
pred:str=[]
for n in range(len(ID)) :
if np.argmax(vote[:,n])==0:
pred.append('Ghost')
if np.argmax(vote[:,n])==2:
pred.append('Ghoul')
if np.argmax(vote[:,n])==1:
pred.append('Goblin')
s_c= pd.DataFrame({"id": ID, "type": pred})
return s_c<compute_test_metric> | train = train.values.reshape(-1,HEIGHT,WIDTH,CANAL)
test = test.values.reshape(-1,HEIGHT,WIDTH,CANAL)
labels = labels.values | Digit Recognizer |
1,636,227 | def tohyo(predict, vote):
vote[0] +=(predict==0);vote[1] +=(predict==1);vote[2] +=(predict==2 )<load_from_csv> | labels = pd.get_dummies(labels ).values | Digit Recognizer |
1,636,227 | def main_n() :
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
type_array = pd.get_dummies(train['type']); del train['type']
COLOR = pd.get_dummies(train['color']); del train['color'] ;del train['id']
COLOR2 = pd.get_dummies(test['color']); del test['color']; ID = test["id"]; del test['id']
vote = np.zeros(( 3,529),dtype = 'int')
target = pd.DataFrame(type_array['Ghost']* 0 + type_array['Ghoul'] * 2 + type_array['Goblin'] * 1)
ghost=train[type_array['Ghost']==1]
ghoul=train[type_array['Ghoul']==1]
goblin=train[type_array['Goblin']==1]
tohyo(N_net(train, test, target), vote)
tohyo(lightgbm(train, test, target), vote)
tohyo(SVM(train, test, target), vote)
tohyo(LogRes2(ghost,ghoul,goblin, test, target,"log"), vote)
tohyo(LogRes2(ghost,ghoul,goblin, test, target,"net"), vote)
train = train.join(COLOR)
test = test.join(COLOR2)
ghost=train[type_array['Ghost']==1]
ghoul=train[type_array['Ghoul']==1]
goblin=train[type_array['Goblin']==1]
tohyo(LogRes2(ghost,ghoul,goblin, test, target,"net"), vote)
tohyo(N_net(train, test, target), vote)
submission = syuunou(vote, ID)
return submission<save_to_csv> | train = train / 255.0
test = test / 255.0 | Digit Recognizer |
1,636,227 | submission=main_n()
rows=submission
submission = rows
submission.to_csv("submission6.csv", index=False )<choose_model_class> | x_train, x_val, y_train, y_val = train_test_split(train, labels, test_size=0.1, random_state=1 ) | Digit Recognizer |
1,636,227 |
<categorify> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=False,
vertical_flip=False,
rotation_range=15,
zoom_range = 0.15,
width_shift_range=0.15,
height_shift_range=0.15)
datagen.fit(x_train ) | Digit Recognizer |
1,636,227 |
<load_from_csv> | model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5),padding='Same', input_shape=(HEIGHT, WIDTH, CANAL)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(5,5),padding='Same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3),padding='Same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=(3,3),padding='Same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(N_CLASSES, activation = "softmax"))
optimizer = optimizers.adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer , loss="categorical_crossentropy", metrics=["accuracy"] ) | Digit Recognizer |
1,636,227 | train_data=pd.read_csv('/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip')
train_data.head()<count_values> | print('Dataset size: %s' % train.shape[0])
print('Epochs: %s' % EPOCHS)
print('Learning rate: %s' % LEARNING_RATE)
print('Batch size: %s' % BATCH_SIZE)
print('Input dimension:(%s, %s, %s)' %(HEIGHT, WIDTH, CANAL)) | Digit Recognizer |
1,636,227 | train_data['type'].value_counts()<load_from_csv> | history = model.fit_generator(datagen.flow(x_train,y_train, batch_size=BATCH_SIZE),
epochs=EPOCHS, validation_data=(x_val, y_val),
verbose=2, steps_per_epoch=x_train.shape[0] // BATCH_SIZE ) | Digit Recognizer |
1,636,227 | test_data=pd.read_csv('/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip')
test_data.head()<count_values> | predictions = model.predict_classes(test ) | Digit Recognizer |
1,636,227 | train_data['color'].value_counts()<count_values> | submission = pd.DataFrame({"ImageId": list(range(1, len(predictions)+ 1)) , "Label": predictions})
submission.to_csv(SUBMISSION_NAME, index=False)
submission.head(10 ) | Digit Recognizer |
2,158,474 | test_data['color'].value_counts()<categorify> | sns.set() | Digit Recognizer |
2,158,474 | train_data=pd.concat([train_data,pd.get_dummies(train_data['color'])],axis=1)
train_data.drop('color',axis=1,inplace=True)
train_data.head()<categorify> | train_data = pd.read_csv(".. /input/train.csv")
test_data = pd.read_csv(".. /input/test.csv")
sample_submission = pd.read_csv(".. /input/sample_submission.csv" ) | Digit Recognizer |
2,158,474 | test_data=pd.concat([test_data,pd.get_dummies(test_data['color'])],axis=1)
test_data.drop('color',axis=1,inplace=True)
test_data.head()<prepare_x_and_y> | X_train = train_data.drop("label", axis=1)
y_train = train_data[["label"]]
X_test = test_data.copy() | Digit Recognizer |
2,158,474 | X=train_data.drop(['id','type'],axis=1)
y=pd.get_dummies(train_data['type'] )<split> | X_train = X_train.astype("float32")/ 255
X_test = X_test.astype("float32")/ 255
X_train = X_train.values.reshape(( len(X_train), 28, 28, 1))
X_test = X_test.values.reshape(( len(X_test), 28, 28, 1))
y_train = pd.get_dummies(y_train, columns=["label"] ) | Digit Recognizer |
2,158,474 | X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,random_state=42)
print(X_train.shape,y_train.shape)
print(X_test.shape,y_test.shape )<import_modules> | model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding="valid", input_shape=X_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(64, kernel_size=(3, 3), padding="valid"))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, kernel_size=(3, 3), padding="valid"))
model.add(Activation("relu"))
model.add(Conv2D(128, kernel_size=(3, 3), padding="valid"))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dropout(0.4))
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(optimizer="adadelta", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary() | Digit Recognizer |
2,158,474 | from tensorflow import keras
from keras.layers import Dense,Dropout
from keras.models import Sequential<choose_model_class> | epochs = 50
batch_size = 256
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2 ) | Digit Recognizer |
2,158,474 | model=Sequential()
model.add(Dense(100,input_shape=(X.shape[1],)))
model.add(Dense(100,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(3,activation='softmax'))
model.summary()<choose_model_class> | y_pred = np.argmax(model.predict(X_test), axis=1 ) | Digit Recognizer |
2,158,474 | model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'] )<train_model> | result = pd.Series(y_pred, name="Label" ).to_frame().reset_index().rename(columns={"index": "ImageId"})
result["ImageId"] += 1
result.head() | Digit Recognizer |
2,158,474 | <import_modules><EOS> | result.to_csv("out.csv", index=False ) | Digit Recognizer |
5,082,686 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test> | %matplotlib inline
| Digit Recognizer |
5,082,686 | pred=model.predict(test_data.drop('id',axis=1))
pred_final=[np.argmax(i)for i in pred]
submission = pd.DataFrame({'id':test_data['id'], 'type':pred_final})
submission.head()<categorify> | train_file = ".. /input/train.csv"
test_file = ".. /input/test.csv"
output_file = "submission.csv"
raw_data = np.loadtxt(train_file, skiprows=1, dtype='int', delimiter=',')
x_train, y_train = raw_data[:, 1:], raw_data[:, 0]
x_train = x_train.reshape(-1, 28, 28, 1 ).astype("float32")/255
y_train = keras.utils.to_categorical(y_train ) | Digit Recognizer |
5,082,686 | submission['type'].replace(to_replace=[0,1,2],value=['Ghost','Ghoul','Goblin'],inplace=True)
submission.head()<save_to_csv> | model = keras.models.Sequential([
keras.layers.Conv2D(32, kernel_size=3, activation='relu',
input_shape=(28, 28, 1)) ,
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(32, kernel_size=3, activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(32, kernel_size=5, strides=2, padding='same',
activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(rate=0.4),
keras.layers.Conv2D(64, kernel_size=3, activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(64, kernel_size=3, activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(64, kernel_size=5, strides=2, padding='same',
activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(rate=0.4),
keras.layers.Flatten() ,
keras.layers.Dense(128, activation='relu'),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(rate=0.4),
keras.layers.Dense(10, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=["accuracy"] ) | Digit Recognizer |
5,082,686 | submission.to_csv('.. /working/submission.csv', index=False )<load_pretrained> | def elastic_transform(image, alpha_range, sigma, random_state=None):
random_state = np.random.RandomState(random_state)
if np.isscalar(alpha_range):
alpha = alpha_range
else:
alpha = np.random.uniform(low=alpha_range[0], high=alpha_range[1])
shape = image.shape
dx = gaussian_filter(random_state.rand(*shape)* 2 - 1, sigma)* alpha
dy = gaussian_filter(random_state.rand(*shape)* 2 - 1, sigma)* alpha
x, y, z = np.meshgrid(np.arange(shape[0]),
np.arange(shape[1]),
np.arange(shape[2]), indexing='ij')
indices =(np.reshape(x + dx,(-1, 1)) ,
np.reshape(y + dy,(-1, 1)) ,
np.reshape(z,(-1, 1)))
return map_coordinates(image, indices, order=1, mode='reflect' ).reshape(shape ) | Digit Recognizer |
5,082,686 | zf1 = zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip')
print(zf1.namelist())
zf2 = zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip')
print(zf2.namelist())
zf3 = zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/sample_submission.csv.zip')
print(zf3.namelist() )<load_pretrained> | datagen = keras.preprocessing.image.ImageDataGenerator(
zoom_range=0.0,
height_shift_range=2,
width_shift_range=2,
preprocessing_function=lambda x: elastic_transform(x, alpha_range=[8, 10], sigma=3))
datagen.fit(x_train ) | Digit Recognizer |
5,082,686 | zf1.extractall()
zf2.extractall()
zf3.extractall()<load_from_csv> | batch_size = 32
epochs = 30
learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs, verbose=2,
callbacks=[learning_rate_reduction],
steps_per_epoch=x_train.shape[0] // batch_size ) | Digit Recognizer |
5,082,686 | train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv' )<load_from_csv> | raw_data_test = np.loadtxt(test_file, skiprows=1, dtype='int', delimiter=',')
x_test = raw_data_test.reshape(-1, 28, 28, 1 ).astype("float32")/255 | Digit Recognizer |
5,082,686 | <categorify><EOS> | results = model.predict_classes(x_test)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1, x_test.shape[0] + 1), name='ImageId'), results], axis=1)
submission.to_csv(output_file, index=False ) | Digit Recognizer |
2,075,583 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules> | np.random.seed(13 ) | Digit Recognizer |
2,075,583 | from sklearn.neural_network import MLPClassifier
from sklearn import metrics
import joblib<prepare_x_and_y> | num_classes = 10
batch_size = 128
epochs = 700
img_rows, img_cols = 28, 28
input_shape =(img_rows, img_cols,1 ) | Digit Recognizer |
2,075,583 | hidden_layer_sizes=(100,)
activation = 'relu'
solver = 'adam'
batch_size = 'auto'
alpha = 0.0001
random_state = 0
max_iter = 10000
x = ['bone_length', 'rotting_flesh', 'hair_length', 'has_soul']
train_X = training[x]
y1 = ['type']
train_y1 = training[y1]
clf = MLPClassifier(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
batch_size=batch_size,
alpha=alpha,
random_state=random_state,
max_iter=max_iter,
)
clf.fit(train_X, train_y1)
SAVE_TRAINED_DATA_PATH = 'train1.learn'
joblib.dump(clf, SAVE_TRAINED_DATA_PATH)
clf1 = joblib.load(SAVE_TRAINED_DATA_PATH)
test_X1 = testing[x]
predict_y1 = clf1.predict_proba(test_X1)
predict_Y1 = clf1.predict(test_X1)
print(predict_Y1)
print("学習データの精度: {:.3f}".format(clf.score(train_X, train_y1)))
print("テスト結果の精度: {:.3f}".format(clf.score(test_X1, predict_Y1)) )<save_to_csv> | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" ) | Digit Recognizer |
2,075,583 | sub = pd.read_csv('sample_submission.csv')
sub['type'] = list(predict_Y1)
sub.to_csv('sample_submission.csv', index=False )<set_options> | y_train = train["label"]
x_train = train.drop(labels = ["label"],axis = 1 ) | Digit Recognizer |
2,075,583 | warnings.filterwarnings('ignore' )<load_pretrained> | x_train /= 255
test /= 255 | Digit Recognizer |
2,075,583 | zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip' ).extractall()
zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip' ).extractall()
<load_from_csv> | x_train = x_train.values.reshape(-1,img_rows,img_cols,1 ).astype('float32')
test = test.values.reshape(-1,img_rows,img_cols,1 ).astype('float32' ) | Digit Recognizer |
2,075,583 | train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
<categorify> | y_train = keras.utils.to_categorical(y_train, num_classes = num_classes ) | Digit Recognizer |
2,075,583 | train2 = pd.get_dummies(train['type'])
del train['type']
COLOR = pd.get_dummies(train['color'])
del train['color']
del train['id']
target = pd.DataFrame(train2['Ghost']* 0 + train2['Ghoul'] * 2 + train2['Goblin'] * 1)
target_GOB = pd.DataFrame(train2['Ghost']* 0 + train2['Ghoul'] * 0 + train2['Goblin'] * 1)
target = target.rename(columns={'0': 'target'})
COLOR2 = pd.get_dummies(test['color'])
del test['color']
ID = test["id"]
del test['id']<set_options> | x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size = 0.1 ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.