kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
6,789,424
img_size = 768 def decode_image(filename, label=None, image_size=(img_size, img_size)) : bits = tf.io.read_file(filename) image = tf.image.decode_jpeg(bits, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.image.resize(image, image_size) if label is None: return image else: return image, label def data_augment(image, label=None, seed=2020): image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_flip_up_down(image, seed=seed) if label is None: return image else: return image, label<categorify>
interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs )
Digit Recognizer
6,789,424
train_dataset =( tf.data.Dataset .from_tensor_slices(( train_paths, train_labels)) .map(decode_image, num_parallel_calls=AUTO) .map(data_augment, num_parallel_calls=AUTO) .repeat() .shuffle(512) .batch(BATCH_SIZE) .prefetch(AUTO) ) valid_dataset =( tf.data.Dataset .from_tensor_slices(( valid_paths, valid_labels)) .map(decode_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .cache() .prefetch(AUTO) ) test_dataset =( tf.data.Dataset .from_tensor_slices(test_paths) .map(decode_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) )<init_hyperparams>
tmp_df = pd.read_csv(path+'sample_submission.csv') tmp_df.head()
Digit Recognizer
6,789,424
LR_START = 0.00001 LR_MAX = 0.0001 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))<choose_model_class>
for i in range(28000): img = learn.data.test_ds[i][0] tmp_array[i,1] = int(learn.predict(img)[1] )
Digit Recognizer
6,789,424
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(img_size, img_size, 3)) x = base_model.output predictions = Dense(train_labels.shape[1], activation="softmax" )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB7) model.compile(optimizer='nadam', loss='categorical_crossentropy',metrics=['categorical_accuracy']) model.summary()<train_model>
tmp_df = pd.DataFrame(tmp_array,columns = ['ImageId','Label']) tmp_df
Digit Recognizer
6,789,424
history = model.fit( train_dataset, steps_per_epoch=train_labels.shape[0] // BATCH_SIZE, callbacks=[lr_callback, ModelCheckpoint(filepath='pretrained_EfficientNetB7.h5', monitor='val_loss', save_best_only=True)], validation_data=valid_dataset, epochs=EPOCHS )<load_pretrained>
tmp_df.to_csv('submission.csv',index=False )
Digit Recognizer
6,789,424
<save_to_csv><EOS>
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv") mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
Digit Recognizer
10,425,390
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<install_modules>
import numpy as np import sklearn as sk import tensorflow as tf import matplotlib.pyplot as plt import pandas as pd import sklearn.model_selection as ms import sklearn.preprocessing as p import math
Digit Recognizer
10,425,390
!pip install scorecardpy<import_modules>
tf.version.VERSION
Digit Recognizer
10,425,390
import numpy as np import pandas as pd from scipy.special import logit import lightgbm as lgb import scorecardpy as sc<load_from_csv>
mnist = pd.read_csv('.. /input/digit-recognizer/train.csv' )
Digit Recognizer
10,425,390
train = pd.read_csv(".. /input/santander-customer-transaction-prediction/train.csv") test = pd.read_csv(".. /input/santander-customer-transaction-prediction/test.csv") train = train.drop('ID_code', axis = 1) train.head()<drop_column>
height = 28 width = 28 channels = 1
Digit Recognizer
10,425,390
test_id = test.ID_code test = test.drop('ID_code', axis = 1) test.head()<count_missing_values>
n_outputs = 10
Digit Recognizer
10,425,390
print(f"The number of missing values in the training set is: {np.sum(np.sum(pd.isnull(train)))}") print(f"The number of missing values in the test set is: {np.sum(np.sum(pd.isnull(test)))}" )<sort_values>
mnist.loc[:3].apply(show_digit_and_print_label, axis=1)
Digit Recognizer
10,425,390
correlations = train.drop("target", axis = 1 ).corr().abs().unstack().sort_values(kind = "quicksort" ).reset_index() correlations = correlations[correlations['level_0'] != correlations['level_1']] correlations.head(10 )<compute_test_metric>
X_data = mnist.drop(columns='label') y_data = mnist['label']
Digit Recognizer
10,425,390
variables = train.drop("target", axis = 1 ).columns.values.tolist() corr_pre_res = np.zeros(len(variables)) i = 0 for var in variables: corr_pre_res[i] = np.corrcoef(train[var], train["target"])[0, 1] i += 1<create_dataframe>
y_data = tf.keras.utils.to_categorical(y_data, num_classes = n_outputs) y_data.shape
Digit Recognizer
10,425,390
corr_pre_res = abs(pd.DataFrame(corr_pre_res)) corr_pre_res.columns = ['corr_pre_res'] corr_pre_res.sort_values(by = 'corr_pre_res' )<groupby>
X_train, X_val, y_train, y_val = ms.train_test_split(X_data, y_data, test_size=0.15 )
Digit Recognizer
10,425,390
features = [x for x in train.columns if x.startswith("var")] hist_df = pd.DataFrame() for var in features: var_stats = train[var].append(test[var] ).value_counts() hist_df[var] = pd.Series(test[var] ).map(var_stats) hist_df[var] = hist_df[var] > 1 ind = hist_df.sum(axis = 1)!= 200 var_stats = {var: train[var].append(test[ind][var] ).value_counts() for var in features} pred = 0 for var in features: model = lgb.LGBMClassifier(**{'learning_rate': 0.05, 'max_bin': 165, 'max_depth': 5, 'min_child_samples': 150, 'min_child_weight': 0.1, 'min_split_gain': 0.0018, 'n_estimators': 41, 'num_leaves': 6, 'reg_alpha': 2.0, 'reg_lambda': 2.54, 'objective': 'binary', 'n_jobs': -1}) model = model.fit(np.hstack([train[var].values.reshape(-1, 1), train[var].map(var_stats[var] ).values.reshape(-1, 1)]), train["target"].values) pred += logit(model.predict_proba(np.hstack([test[var].values.reshape(-1, 1), test[var].map(var_stats[var] ).values.reshape(-1, 1)])) [:, 1]) pd.DataFrame({"ID_code": test_id, "target": pred} ).to_csv("submission.csv", index = False )<define_variables>
scaler = p.StandardScaler() X_train = scaler.fit_transform(X_train) X_train = X_train.reshape(-1, height, width, channels) X_val = scaler.transform(X_val) X_val = X_val.reshape(-1, height, width, channels )
Digit Recognizer
10,425,390
input_path = '/kaggle/input/severstal-steel-defect-detection/' base = '/kaggle/input/severstal-inference-base' requirements_dir = base + '/requirements/'<install_modules>
batch_size = 250
Digit Recognizer
10,425,390
!pip -q config set global.disable-pip-version-check true !pip -q install {requirements_dir}Keras_Applications-1.0.8-py3-none-any.whl !pip -q install {requirements_dir}efficientnet-1.1.1-py3-none-any.whl<set_options>
train_data_gen = image_gen.flow(X_train, y=y_train, batch_size=batch_size )
Digit Recognizer
10,425,390
!cp -r {base}/tpu_segmentation./ !cp -r {base}/*.py./ !rm -r tpu_segmentation *.py AUTO = tf.data.experimental.AUTOTUNE strategy = tf.distribute.get_strategy() start_notebook = time() print('Notebook started at: ', current_time_str()) print('Tensorflow version: ', tf.__version__ )<define_variables>
class CosineAnnealingLearningRateCallback(tf.keras.callbacks.Callback): def __init__(self, n_epochs, n_cycles, lrate_max, n_epochs_for_saving, verbose=0): self.epochs = n_epochs self.cycles = n_cycles self.lr_max = lrate_max self.n_epochs_for_saving = n_epochs_for_saving self.best_val_acc_per_cycle = float('-inf') def is_save_range(self, epoch, epochs_per_cycle, n_epochs_for_saving): epoch += 1 f, d = math.modf(epoch / epochs_per_cycle) next_end = epochs_per_cycle *(d +(1 if f > 0 else 0)) need_to_save = epoch >(next_end - n_epochs_for_saving) return need_to_save def cosine_annealing(self, epoch, n_epochs, n_cycles, lrate_max): epochs_per_cycle = math.floor(n_epochs/n_cycles) cos_inner =(math.pi *(epoch % epochs_per_cycle)) /(epochs_per_cycle) return lrate_max/2 *(math.cos(cos_inner)+ 1) def on_epoch_begin(self, epoch, logs=None): lr = self.cosine_annealing(epoch, self.epochs, self.cycles, self.lr_max) tf.keras.backend.set_value(self.model.optimizer.lr, lr) def on_epoch_end(self, epoch, logs={}): epochs_per_cycle = math.floor(self.epochs / self.cycles) if epoch % epochs_per_cycle == 0: self.best_val_acc_per_cycle = float('-inf') isr = self.is_save_range(epoch, epochs_per_cycle, self.n_epochs_for_saving) last_val_acc = logs['val_accuracy'] if epoch != 0 and isr and last_val_acc > self.best_val_acc_per_cycle: self.best_val_acc_per_cycle = last_val_acc filename = f'snapshot_model_{epoch // epochs_per_cycle}.h5' self.model.save(filename) print(f'saved snapshot {filename}, epoch: {epoch}, val_accuracy: {last_val_acc:.5f}') n_epochs = 300 n_cycles = n_epochs / 50 n_epochs_for_saving = 20 calrc = CosineAnnealingLearningRateCallback(n_epochs, n_cycles, 0.01, n_epochs_for_saving )
Digit Recognizer
10,425,390
IMAGE_SIZE =(256, 1600) target_size =(128, 800) input_shape =(*target_size, 3) N_CLASSES = 4<define_variables>
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, 3, 1, padding='same', activation='relu', input_shape=(height, width, channels))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(32, 3, 1, padding='same', activation='relu', input_shape=(height, width, channels))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dropout(0.20)) model.add(tf.keras.layers.Conv2D(64, 3, 1, padding='same', activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(64, 3, 1, padding='same', activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dropout(0.20)) model.add(tf.keras.layers.Conv2D(128, 3, 1, padding='same', activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dropout(0.20)) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer='Adam', loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
10,425,390
test_fnames = tf.io.gfile.glob(input_path + 'test_images/*') test_ids = [x.split('/')[-1].split('.')[0] for x in test_fnames] get_test_path = lambda x: input_path + 'test_images/' + x + '.jpg'<categorify>
model.fit(train_data_gen, batch_size=batch_size, epochs = n_epochs, validation_data =(X_val, y_val), callbacks=[calrc], verbose=2 )
Digit Recognizer
10,425,390
def normalize_and_reshape(img, target_size): img = tf.image.resize(img, target_size) img = tf.cast(img, tf.float32)/ 255.0 img = tf.reshape(img, [*target_size, 3]) return img def get_image_and_id(file_name, target_size): img = tf.io.read_file(file_name) img = tf.image.decode_jpeg(img, channels=3) img = normalize_and_reshape(img, target_size) img_id = tf.strings.split(file_name, os.path.sep)[-1] img_id = tf.strings.split(img_id, '.')[0] return img, img_id def get_test_dataset(fnames, target_size, batch_size): dataset = tf.data.Dataset.from_tensor_slices(fnames) dataset = dataset.map(lambda file_name: get_image_and_id(file_name, target_size), num_parallel_calls=AUTO) dataset = dataset.batch(batch_size=batch_size, drop_remainder=False) dataset = dataset.prefetch(AUTO) return dataset<load_pretrained>
def load_all_models(n_models): all_models = list() for i in range(n_models): filename = f'snapshot_model_{str(i)}.h5' model = tf.keras.models.load_model(filename) all_models.append(model) return all_models def ensemble_predictions(models, testX): yhats = [model.predict(testX)for model in models] yhats = np.array(yhats) summed = np.sum(yhats, axis=0) result = np.argmax(summed, axis=1) return result def evaluate_n_models(models, n_models, testX, testy): subset = models[:n_models] yhat = ensemble_predictions(subset, testX) return sk.metrics.accuracy_score(testy, yhat )
Digit Recognizer
10,425,390
df = pd.read_csv(base + '/weights_meta.csv') df1 = df[df.source == 1] df2 = df[df.source == 2] bin1 = df1[df1.type == 'bin'] bin1 = get_best_weights(bin1, 1) seg1 = df1[df1.type != 'bin'] seg1 = get_best_weights(seg1, 1) bin2 = df2[df2.type == 'bin'] seg2 = df2[df2.type != 'bin'] bin_weights = list(bin2.filename)+ list(bin1.filename) seg_weights = list(seg2.filename)+ list(seg1.filename )<load_pretrained>
X_pred = pd.read_csv('.. /input/digit-recognizer/test.csv') X_pred = scaler.transform(X_pred) X_pred = X_pred.reshape(-1, height, width, channels )
Digit Recognizer
10,425,390
<create_dataframe><EOS>
y_pred = pd.DataFrame() y_pred['ImageId'] = pd.Series(range(1,X_pred.shape[0] + 1)) y_pred['Label'] = ensemble_predictions(models, X_pred) y_pred.to_csv("submission.csv", index=False )
Digit Recognizer
9,149,413
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
Path.ls = lambda x: list(x.iterdir()) path = Path('/kaggle/input/digit-recognizer/') def get_data(path,fn='train.csv'): df = pd.read_csv(path/fn) if 'label' not in df.columns: vals = np.ones_like(df.iloc[:,0].values)*-1 df.insert(0,'label',vals) X = df.iloc[:,1:].values y = df.iloc[:,0].values return X,y class Dataset: def __init__(self,X,y): self.X, self.y = X,y def __len__(self): return len(self.X) def __getitem__(self,idx): return torch.tensor(self.X[idx],dtype=torch.float),torch.tensor(self.y[idx],dtype=torch.long) def get_dls(train_ds,test_ds,bs=64): return(DataLoader(train_ds,batch_size = bs, shuffle=True, drop_last=True), DataLoader(test_ds, batch_size = bs*2, shuffle=False)) def init_cnn(m, uniform=False): f = init.kaiming_uniform_ if uniform else init.kaiming_normal_ for l in m: if isinstance(l, nn.Sequential): f(l[0].weight, a=0.1) if l[0].bias is not None: l[0].bias.data.zero_() class Lambda(nn.Module): def __init__(self,func): super().__init__() self.func = func def forward(self,x): return self.func(x) def flatten(x): return x.view(x.shape[0],-1) def mnist_resize(x): return x.view(-1,1,28,28) def get_model(layers,pct_sparsity=0.,use_gpu=True): model = layers; if use_gpu: return model.cuda() return model def get_optimizer(model,lr=0.1): return optim.SGD(model.parameters() , lr=lr, momentum=0.9) def accuracy(preds, y): return(torch.argmax(preds, dim=1)== y ).float().mean() def average_metrics(dataloader,model,metrics=[],use_gpu=True): with torch.no_grad() : count = 0 tot_metrics = [0.for _ in metrics] for xb,yb in dataloader: if use_gpu: xb, yb = xb.cuda() , yb.cuda() bs=len(xb) for idx, metric in enumerate(metrics): tot_metrics[idx] += metric(model(xb),yb)* bs count += bs avg_metrics = list() for metric in tot_metrics: avg_metrics.append(metric/count) return avg_metrics def fit_one_cycle(epochs,sched_func,use_gpu=True): n_epochs = 0; for epoch in range(epochs): n_epochs = epoch iters = len(train_dl) model.train() ; for xb,yb in train_dl: if use_gpu: xb, yb = xb.cuda() , yb.cuda() preds = model(xb) loss = loss_func(preds,yb) loss.backward() sched_params(opt, n_epochs, epochs,sched_func) opt.step() opt.zero_grad() n_epochs += 1./iters print(f"Epoch {epoch} completed") def sched_params(opt, n_epochs, epoch,sched_func): for pg in opt.param_groups: pg['lr'] = sched_func(n_epochs/epoch) def annealer(f): def _inner(start, end): return partial(f, start, end) return _inner @annealer def sched_cos(start, end, pos): return start +(1+math.cos(math.pi*(1-pos)))*(end-start)/2 def combine_scheds(pcts, scheds): assert sum(pcts)== 1 pcts = tensor([0]+ list(pcts)) assert torch.all(pcts>=0) pcts = torch.cumsum(pcts,0) def _inner(pos): idx =(pos>=pcts ).nonzero().max() actual_pos =(pos-pcts[idx])/(pcts[idx+1]-pcts[idx]) return scheds[idx](actual_pos) return _inner def normalize(x,m,s): return(x-m)/s def normalize_to(data, train): mean, std = train.mean() , train.std() return normalize(data, mean, std), normalize(train, mean, std) def get_normalized_data() : X_train, y_train = get_data(path, 'train.csv') X_test, y_test = get_data(path,'test.csv') X_test, X_train = normalize_to(X_test, X_train) return X_train, y_train, X_test, y_test def get_stats(x): return f'mean :{x.mean() } , std : {x.std() }'; def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_preds(model,dl): model.eval() preds = [] for xb,yb in dl: xb,yb = xb.cuda() , yb.cuda() pred= model(xb) preds += pred.cpu().detach() return preds
Digit Recognizer
9,149,413
ensemble_outputs = [] with strategy.scope() : X = L.Input(shape=input_shape) for i, w in enumerate(bin_weights): base_name = w.split('-bin')[0] model = build_classifier(base_name, n_classes = 1, input_shape=input_shape, weights = None, name_suffix='-M{}'.format(i+1)) model.load_weights('weights/' + w) model_output = model(X) ensemble_outputs.append(model_output) Y = L.Average()(ensemble_outputs) binary_ensemble = tf.keras.Model(inputs=X, outputs=Y, name='Binary_Classification_Ensemble') binary_ensemble.compile(optimizer='adam', loss='binary_crossentropy', metrics=[]) del model, ensemble_outputs, model_output<predict_on_test>
class GeneralReLU(nn.Module): def __init__(self, leak=None, sub=None, maxv=None): super().__init__() self.leak, self.sub, self.maxv = leak, sub, maxv; def forward(self, x): x = F.leaky_relu(x, self.leak)if self.leak is not None else F.relu(x) if self.sub is not None: x.sub_(self.sub); if self.maxv is not None: x.clamp_max_(self.maxv) return x
Digit Recognizer
9,149,413
start_preds = time() binary_predictions = binary_ensemble.predict(test_dataset_bin) del binary_ensemble K.clear_session() gc.collect() print('Elapsed time(binary predictions){}'.format(time_passed(start_preds)) )<choose_model_class>
def conv_layer(f_in, f_out, ks, s, p): return nn.Sequential(nn.Conv2d(f_in, f_out, kernel_size=ks, stride=s, padding=p,bias=False), nn.BatchNorm2d(f_out), GeneralReLU(sub=0.5))
Digit Recognizer
9,149,413
ensemble_outputs = [] with strategy.scope() : X = L.Input(shape=input_shape) for i, w in enumerate(seg_weights): backbone_name = w.split('-unetpp')[0] model = xnet(backbone_name, num_classes = 4, input_shape=input_shape, weights = None) model._name = '{}-M{}'.format(model.name, i+1) model.load_weights('weights/' + w) model_output = model(X) ensemble_outputs.append(model_output) Y = L.Average()(ensemble_outputs) seg_ensemble = tf.keras.Model(inputs=X, outputs=Y, name='Mask_Segmentation_Ensemble') seg_ensemble.compile(optimizer='adam', loss='binary_crossentropy', metrics=[]) del model, ensemble_outputs, model_output !rm -r weights<define_variables>
class ResBlock(nn.Module): def __init__(self, nf): super().__init__() self.nf = nf self.conv1 = conv_layer(nf,nf,3,1,1) self.conv2 = conv_layer(nf,nf,3,1,1) def forward(self, X): return X + self.conv2(self.conv1(X))
Digit Recognizer
9,149,413
THRESHOLD = 0.80 masked_indexes = np.where(binary_predictions>=THRESHOLD)[0] unmasked_indexes = np.where(binary_predictions<THRESHOLD)[0] seg_ids = list(np.array(test_ids)[masked_indexes]) no_seg_ids = list(np.array(test_ids)[unmasked_indexes]) print(len(seg_ids), len(no_seg_ids), len(seg_ids)+ len(no_seg_ids), len(test_ids))<create_dataframe>
class DenseBlock(nn.Module): def __init__(self, ni, nf): super().__init__() self.ni, self.nf = ni, nf self.conv1 = conv_layer(ni, nf,3,1,1) self.conv2 = conv_layer(nf, nf,3,1,1) def forward(self, X): return torch.cat([X,self.conv2(self.conv1(X)) ],dim=1 )
Digit Recognizer
9,149,413
fnames_seg = [get_test_path(i)for i in seg_ids] batch_size = 8 test_dataset_seg = get_test_dataset(fnames_seg, target_size=target_size, batch_size=batch_size) num_batches = tf.data.experimental.cardinality(test_dataset_seg); print('num of batches', num_batches.numpy() )<predict_on_test>
layers = nn.Sequential(Lambda(mnist_resize), conv_layer(1,8,5,1,2), nn.Dropout2d(p=0.05), ResBlock(8), nn.Dropout2d(p=0.05), nn.MaxPool2d(3,2,1), DenseBlock(8,8), nn.Dropout2d(p=0.05), nn.MaxPool2d(3,2,1), DenseBlock(16,16), nn.Dropout2d(p=0.05), nn.AdaptiveAvgPool2d(1), Lambda(flatten), nn.Linear(32,10), nn.BatchNorm1d(10) )
Digit Recognizer
9,149,413
n_batches = 1 sample_preds = seg_ensemble.predict(test_dataset_seg.take(n_batches)) examples = retrieve_examples(test_dataset_seg, batch_size*n_batches) idx = -1 mask_rgb = [(230, 184, 0),(0, 128, 0),(102, 0, 204),(204, 0, 102)]<predict_on_test>
X_train, y_train, X_test, y_test = get_normalized_data() train_dl, valid_dl = get_dls(Dataset(X_train,y_train), Dataset(X_test,y_test)) model = get_model(layers=layers) opt = get_optimizer(model) loss_func = nn.CrossEntropyLoss() init_cnn(model )
Digit Recognizer
9,149,413
thresh_upper = [0.7,0.7,0.7,0.7] thresh_lower = [0.4,0.5,0.4,0.5] min_area = [180, 260, 200, 500] empty_mask = np.zeros(target_size, int) rles_dict = {} for img_prefix in no_seg_ids: for c in range(N_CLASSES): row_name = '{}.jpg_{}'.format(img_prefix, c+1) rles_dict[row_name] = '' start_preds = time() for item in test_dataset_seg: mask_predictions = seg_ensemble.predict(item[0]) for k, p in enumerate(mask_predictions): for ch in range(N_CLASSES): ch_probs = p[..., ch] ch_pred =(ch_probs > thresh_upper[ch]) if ch_pred.sum() < min_area[ch]: ch_pred = empty_mask.copy() else: ch_pred =(ch_probs > thresh_lower[ch]) mask_predictions[k,:,:,ch] = ch_pred img_ids = item[1] ids = [l.decode('utf-8')for l in img_ids.numpy() ] rles = [create_rles(p, IMAGE_SIZE)for p in mask_predictions] for i in range(len(ids)) : rle = rles[i] img_prefix = ids[i] for c in range(N_CLASSES): row_name = '{}.jpg_{}'.format(img_prefix, c+1) rles_dict[row_name] = rle[c].numpy().decode('utf-8') print('Elapsed time(mask predictions){}'.format(time_passed(start_preds)) )<save_to_csv>
count_parameters(model )
Digit Recognizer
9,149,413
df = pd.DataFrame.from_dict(rles_dict, orient='index') df.reset_index(level=0, inplace=True) df.columns = ['ImageId_ClassId', 'EncodedPixels'] df.to_csv('submission.csv', index=False )<load_from_csv>
one_cycle_sched= combine_scheds([0.3,0.7], [sched_cos(1e-3,1e-1), sched_cos(0.1,1e-6)]) fit_one_cycle(30,one_cycle_sched )
Digit Recognizer
9,149,413
train_data=pd.read_csv('/kaggle/input/forest-cover-type-prediction/train.csv') train_data.head()<load_from_csv>
preds = get_preds(model,valid_dl) res = [] for t in preds: r = t.argmax().item() res.append(r )
Digit Recognizer
9,149,413
<count_values><EOS>
submission = pd.read_csv(path/'sample_submission.csv') submission['Label'] = res submission.to_csv('subs.csv',index=False )
Digit Recognizer
8,428,136
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
%matplotlib inline
Digit Recognizer
8,428,136
from sklearn.model_selection import train_test_split<import_modules>
class TrainDataset(Dataset): def __init__(self, file_path, transform=None): self.data = pd.read_csv(file_path) self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, index): images = self.data.iloc[index, 1:].values.astype(np.uint8 ).reshape(( 28, 28, 1)) labels = self.data.iloc[index, 0] if self.transform is not None: images = self.transform(images) return images, labels transform = transforms.ToTensor() train_data = TrainDataset('.. /input/digit-recognizer/train.csv', transform=transform) valid_size = 0.15 num_train = len(train_data) indices = list(range(num_train)) torch.manual_seed(0) np.random.shuffle(indices) split = int(np.floor(valid_size * num_train)) train_idx, valid_idx = indices[split:], indices[:split] train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) batch_size = 20 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler) class TestDataset(Dataset): def __init__(self, file_path, transform=None): self.data = pd.read_csv(file_path) self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, index): images = self.data.iloc[index, :].values.astype(np.uint8 ).reshape(( 28, 28, 1)) if self.transform is not None: images = self.transform(images) return images test_data = TestDataset('.. /input/digit-recognizer/test.csv', transform=transform) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False )
Digit Recognizer
8,428,136
from sklearn.model_selection import train_test_split<prepare_x_and_y>
class Net(nn.Module): def __init__(self): super(Net, self ).__init__() self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) hidden_1 = 1024 hidden_2 = 512 self.fc1 = nn.Linear(128*7*7, hidden_1) self.fc2 = nn.Linear(hidden_1, hidden_2) self.fc3 = nn.Linear(hidden_2, 10) self.dropout = nn.Dropout(p=0.5) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 128*7*7) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x
Digit Recognizer
8,428,136
X=train_data.drop(labels=['Id','Cover_Type'],axis=1) y=train_data['Cover_Type']<split>
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = Net() criterion = nn.CrossEntropyLoss() lr = 0.001 optimizer = optim.Adam(model.parameters() , lr = lr) model.to(device )
Digit Recognizer
8,428,136
X_train,X_val,y_train,y_val=train_test_split(X,y,random_state=12 )<import_modules>
t0 = time.time() n_epochs = 50 valid_loss_min = np.Inf for epoch in range(n_epochs): train_loss = 0.0 model.train() for data, target in train_loader: data, target = data.to(device),target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() *data.size(0) valid_loss = 0.0 model.eval() with torch.no_grad() : for data, target in valid_loader: data, target = data.to(device),target.to(device) output = model(data) loss = criterion(output, target) valid_loss += loss.item() *data.size(0) train_loss = train_loss/len(train_loader.dataset) valid_loss = valid_loss/len(valid_loader.dataset) print("Epoch: {}/{}.. ".format(epoch+1, n_epochs), "Training Loss: {:.6f}.. ".format(train_loss), "Validation Loss: {:.6f}.. ".format(valid_loss)) if valid_loss <= valid_loss_min: print('Validation loss decreased({:.6f} --> {:.6f} ).Saving model...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict() , 'model.pt') valid_loss_min = valid_loss print('Training and validation executed in {:.1f} minutes.'.format(( time.time() - t0)/60))
Digit Recognizer
8,428,136
from sklearn.ensemble import RandomForestClassifier<train_model>
model.load_state_dict(torch.load('model.pt'))
Digit Recognizer
8,428,136
rfc=RandomForestClassifier(n_estimators=70) rfc.fit(X_train,y_train )<compute_test_metric>
class_correct = list(0.for i in range(10)) class_total = list(0.for i in range(10)) model.eval() with torch.no_grad() : for data, target in valid_loader: data, target = data.to(device),target.to(device) output = model(data) _, pred = torch.max(output, dim=1) correct = pred == target.view_as(pred) for i in range(len(target)) : label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 for i in range(10): if class_total[i] > 0: print('Validation Accuracy of %5s: %.1f%%(%2d/%2d)' %( str(i), 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Validation Accuracy of %5s: N/A(no training examples)' %(classes[i])) print(' Validation Accuracy(Overall): %.1f%%(%2d/%2d)' %( 100.* np.sum(class_correct)/ np.sum(class_total), np.sum(class_correct), np.sum(class_total)) )
Digit Recognizer
8,428,136
rfc.score(X_val,y_val )<predict_on_test>
test_preds = torch.LongTensor() model.eval() with torch.no_grad() : for data in test_loader: data, target = data.to(device),target.to(device) output = model(data) _, pred = torch.max(output, dim=1) test_preds = torch.cat(( test_preds.cpu() , pred.cpu()), dim=0) submission = pd.DataFrame({"ImageId":list(range(1, len(test_preds)+1)) , "Label":test_preds.numpy() }) submission
Digit Recognizer
8,428,136
predict=rfc.predict(test_data.drop(labels=['Id'],axis=1))<prepare_output>
submission.to_csv("my_submission.csv", index=False, header=True )
Digit Recognizer
8,428,136
Submission=pd.DataFrame(data=predict,columns=['Cover_Type']) Submission.head()<prepare_output>
submission = pd.read_csv("my_submission.csv") submission
Digit Recognizer
8,134,868
Submission['Id']=test_data['Id'] Submission.set_index('Id',inplace=True )<save_to_csv>
root = Path('.. /input') train_path = Path('train') rseed = 7 val_size = 0.05
Digit Recognizer
8,134,868
Submission.to_csv('Submission.csv' )<load_from_csv>
def save_imgs(path:Path, data, labels): path.mkdir(parents=True,exist_ok=True) for label in np.unique(labels): (path/str(label)).mkdir(parents=True,exist_ok=True) for i in range(len(data)) : if(len(labels)!=0): imageio.imsave(str(path/str(labels[i])/(str(i)+'.jpg')) , data[i]) else: imageio.imsave(str(path/(str(i)+'.jpg')) , data[i] )
Digit Recognizer
8,134,868
SEED = 1111 tf.random.set_seed(SEED) np.random.seed(SEED) train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.query('date > 85' ).reset_index(drop = True) train = train[train['weight'] != 0] train.fillna(train.mean() ,inplace=True) train['action'] =(( train['resp'].values)> 0 ).astype(int) features = [c for c in train.columns if "feature" in c] f_mean = np.mean(train[features[1:]].values,axis=0) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X_train = train.loc[:, train.columns.str.contains('feature')] y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T def create_mlp( num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate ): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model batch_size = 5000 hidden_units = [150.5, 150.5, 150.5] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 clf = create_mlp( len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate ) clf.fit(X_train, y_train, epochs=200, batch_size=5000) models = [] models.append(clf) th = 0.502 f = np.median models = models[-3:] env = janestreet.make_env() for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<define_variables>
train_csv = pd.read_csv(root/'train.csv' )
Digit Recognizer
8,134,868
pca_components = 60<choose_model_class>
test_csv = pd.read_csv(root/'test.csv' )
Digit Recognizer
8,134,868
e_size = 64 fc_input = pca_components h_dims = [512,512,256,128] dropout_rate = 0.5 epochs = 200 minibatch_size = 100000 class MarketPredictor(nn.Module): def __init__(self): super(MarketPredictor, self ).__init__() self.e = nn.Embedding(2,e_size) self.deep = nn.Sequential( nn.Linear(fc_input,h_dims[0]), nn.BatchNorm1d(h_dims[0]), nn.LeakyReLU() , nn.Dropout(dropout_rate), nn.Linear(h_dims[0],h_dims[1]), nn.BatchNorm1d(h_dims[1]), nn.LeakyReLU() , nn.Dropout(dropout_rate), nn.Linear(h_dims[1],h_dims[2]), nn.BatchNorm1d(h_dims[2]), nn.LeakyReLU() , nn.Dropout(dropout_rate), nn.Linear(h_dims[2],h_dims[3]), nn.BatchNorm1d(h_dims[3]), nn.LeakyReLU() , nn.Dropout(dropout_rate), nn.Linear(h_dims[3],e_size), nn.BatchNorm1d(e_size), nn.LeakyReLU() , nn.Dropout(dropout_rate) ) self.reduce = nn.utils.weight_norm(nn.Linear(e_size,1)) self.sig = nn.Sigmoid() def forward(self,xi,xf): e_out = self.e(xi) f_out = self.deep(xf) ef_out = self.reduce(e_out+f_out) sig_out = self.sig(ef_out) return sig_out <load_pretrained>
data_X, data_y = train_csv.loc[:,'pixel0':'pixel783'], train_csv['label']
Digit Recognizer
8,134,868
epochs = 200 path = '/kaggle/input/pytorch-nn-model/marketpredictor_state_dict_'+str(epochs)+'epochs.pt' model = MarketPredictor() model.load_state_dict(torch.load(path,map_location=dev)) model.to(dev) model.eval()<load_pretrained>
train_X, val_X, train_y, val_y = train_test_split(data_X, data_y, test_size=val_size,random_state=rseed,stratify=data_y )
Digit Recognizer
8,134,868
with open('/kaggle/input/pytorch-nn-model/feature_processing.pkl', 'rb')as f: sc, pca, maxindex, fill_val = pickle.load(f )<define_variables>
def to_img_shape(data_X, data_y=[]): data_X = np.array(data_X ).reshape(-1,28,28) data_X = np.stack(( data_X,)*3, axis=-1) data_y = np.array(data_y) return data_X,data_y
Digit Recognizer
8,134,868
feature_names = ['feature_'+str(i)for i in range(1,130)] exclude = np.where([maxindex[i,1] > 100 and maxindex [i,2] > 1 for i in range(129)])[0]<split>
train_X,train_y = to_img_shape(data_X, data_y )
Digit Recognizer
8,134,868
env = janestreet.make_env() iter_test = env.iter_test()<data_type_conversions>
val_X,val_y = to_img_shape(val_X,val_y )
Digit Recognizer
8,134,868
for(test_df, sample_prediction_df)in iter_test: if test_df['weight'].item() == 0: sample_prediction_df.action = 0 else: test_df_features = test_df[feature_names].to_numpy() for i in exclude: if test_df_features[0,i] == maxindex[i,0]: test_df_features[0,i] = fill_val[i] test_df_int_features = test_df['feature_0'].to_numpy() nans = np.isnan(test_df_features) for i in range(129): if nans[0,i]: test_df_features[0,i] = fill_val[i] test_df_features_scaled = sc.transform(test_df_features) test_df_features_pca=pca.transform(test_df_features_scaled) itensor = torch.tensor(( test_df_int_features+1)//2,dtype=torch.long,device=dev) ftensor = torch.tensor(test_df_features_pca,dtype=torch.float,device=dev) s = model(itensor,ftensor ).item() sample_prediction_df.action = int(np.round(s)) env.predict(sample_prediction_df) <import_modules>
save_imgs(Path('/data/train'),train_X,train_y )
Digit Recognizer
8,134,868
import numpy as np import pandas as pd from tensorflow.keras.callbacks import TensorBoard from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder<import_modules>
save_imgs(Path('/data/valid'),val_X,val_y )
Digit Recognizer
8,134,868
from tensorflow import keras from tensorflow.keras.layers import MaxPooling1D, Dense, LeakyReLU, Conv1D from tensorflow.keras.layers import Flatten, Activation, BatchNormalization, Dropout from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras import layers from kerastuner.tuners import RandomSearch from kerastuner.engine.hyperparameters import HyperParameters import time import pickle<import_modules>
data = ImageDataBunch.from_folder('/data/',bs=256,size=28,ds_tfms=get_transforms(do_flip=False),num_workers=0 ).normalize(imagenet_stats )
Digit Recognizer
8,134,868
import tensorflow as tf<load_from_csv>
data.show_batch(3,figsize=(6,6))
Digit Recognizer
8,134,868
%%time train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.query('date > 85' ).reset_index(drop = True) train = train[train['weight'] != 0] train.fillna(train.mean() ,inplace=True )<prepare_x_and_y>
learn = cnn_learner(data,models.resnet18,metrics=accuracy,path='.') learn.lr_find() learn.recorder.plot()
Digit Recognizer
8,134,868
SEED = 1111 tf.random.set_seed(SEED) np.random.seed(SEED) train['action'] =(( train['resp'].values)> 0 ).astype(int) features = [c for c in train.columns if "feature" in c] f_mean = np.mean(train[features[1:]].values,axis=0) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X = train.loc[:, train.columns.str.contains('feature')] Y = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T leaky_relu_alpha =0.05 LeakyReLU(alpha=leaky_relu_alpha) <choose_model_class>
learn.fit_one_cycle(1,1e-02 )
Digit Recognizer
8,134,868
def build_model() : model = keras.models.Sequential() model.add(Conv1D(180, 2, input_shape=x_train.shape[1:])) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=leaky_relu_alpha)) model.add(MaxPooling1D(pool_size=2)) model.add(Dropout(0.15)) model.add(Flatten()) model.add(Dense(180)) model.add(LeakyReLU(alpha=leaky_relu_alpha)) model.add(Dropout(0.15)) model.add(Dense(5)) model.add(Activation("sigmoid")) model.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss=BinaryCrossentropy(label_smoothing=0.095), metrics=[tf.keras.metrics.AUC(name = 'auc'), "accuracy"]) return model<choose_model_class>
learn.save('s1' )
Digit Recognizer
8,134,868
model = build_model()<train_model>
learn.load('s1');
Digit Recognizer
8,134,868
model.fit(x=x_train, y=Y, epochs=10, batch_size=1024 )<import_modules>
learn.lr_find()
Digit Recognizer
8,134,868
from tqdm import tqdm<feature_engineering>
learn.fit_one_cycle(10,max_lr=slice(1e-6,1e-5))
Digit Recognizer
8,134,868
f = np.median th = 0.5000 env = janestreet.make_env() for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt.reshape(-1, x_tt.shape[1], 1), training = False ).numpy() ],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<define_variables>
interp = ClassificationInterpretation.from_learner(learn )
Digit Recognizer
8,134,868
START_TIME = time.time() <data_type_conversions>
learn1 = learn.load('s1') sub_df = pd.DataFrame(columns=['ImageId','Label'] )
Digit Recognizer
8,134,868
train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.astype({c: np.float32 for c in train.select_dtypes(include='float64' ).columns}) train.fillna(train.median() , inplace=True) train = train.query('weight > 0' ).reset_index(drop = True) train['action'] =(train['resp'] > 0 ).astype('int') features = [c for c in train.columns if 'feature' in c] resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X = train[features].values y = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T f_median = np.median(train[features[1:]].values, axis=0) <prepare_x_and_y>
def get_img(data): t1 = data.reshape(28,28)/255 t1 = np.stack([t1]*3,axis=0) img = Image(FloatTensor(t1)) return img
Digit Recognizer
8,134,868
y_resps = train[resp_cols].values y_actions = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T<choose_model_class>
from fastprogress import progress_bar
Digit Recognizer
8,134,868
def create_model(input_dim, output_dims, add_models=0): input_layer_0 = Input(input_dim) bn_0 = BatchNormalization()(input_layer_0) outputs_layer_0 = [] for m in range(2+add_models): x = Dropout(0.2 )(bn_0) for i in range(m+1): x = Dense(64 )(x) x = BatchNormalization()(x) x = Lambda(tf.keras.activations.swish )(x) x = Dropout(0.1 )(x) output = Dense(output_dims[3], activation='linear', name=f'level_0_output_{m}' )(x) outputs_layer_0.append(output) output_layer_0_average = Average(name='output_layer_0_average' )(outputs_layer_0) bn_1 = BatchNormalization()(output_layer_0_average) input_layer_1 = Concatenate()([bn_0] + [bn_1]) outputs_layer_1 = [] for m in range(2+add_models): x = Dropout(0.2 )(input_layer_1) for i in range(m+1): x = Dense(64 )(x) x = BatchNormalization()(x) x = Lambda(tf.keras.activations.swish )(x) x = Dropout(0.1 )(x) output = Dense(output_dims[2], activation='sigmoid', name=f'level_1_output_{m}' )(x) outputs_layer_1.append(output) output_layer_1_average = Average(name='output_layer_1_average' )(outputs_layer_1) bn_2 = BatchNormalization()(output_layer_1_average) input_layer_2 = Concatenate()([bn_1] + [bn_2]) outputs_layer_2 = [] for m in range(2+add_models): x = Dropout(0.2 )(input_layer_2) for i in range(m+1): x = Dense(64 )(x) x = BatchNormalization()(x) x = Lambda(tf.keras.activations.swish )(x) x = Dropout(0.1 )(x) output = Dense(output_dims[1], activation='linear', name=f'level_2_output_{m}' )(x) outputs_layer_2.append(output) output_layer_2_average = Average(name='output_layer_2_average' )(outputs_layer_2) bn_3 = BatchNormalization()(output_layer_2_average) input_layer_3 = Concatenate()([bn_2] + [bn_3]) outputs_layer_3 = [] for m in range(2+add_models): x = Dropout(0.2 )(input_layer_3) for i in range(m+1): x = Dense(64 )(x) x = BatchNormalization()(x) x = Lambda(tf.keras.activations.swish )(x) x = Dropout(0.1 )(x) output = Dense(output_dims[1], activation='sigmoid', name=f'level_3_output_{m}' )(x) outputs_layer_3.append(output) output_layer_3_average = Average(name='output_layer_3_average' )(outputs_layer_3) model = Model(inputs=input_layer_0, outputs=[output_layer_3_average, output_layer_2_average, output_layer_1_average, output_layer_0_average]) loss = {} loss['output_layer_3_average'] = BinaryCrossentropy(label_smoothing = 0.1) loss['output_layer_2_average'] = 'mse' loss['output_layer_1_average'] = BinaryCrossentropy(label_smoothing = 0.1) loss['output_layer_0_average'] = 'mse' loss_weights={} loss_weights['output_layer_3_average'] =.25 loss_weights['output_layer_2_average'] =.25 loss_weights['output_layer_1_average'] =.25 loss_weights['output_layer_0_average'] =.25 metrics = {} metrics['output_layer_3_average'] = tf.keras.metrics.AUC(name = 'auc') metrics['output_layer_2_average'] = 'mse' metrics['output_layer_1_average'] = tf.keras.metrics.AUC(name = 'auc') metrics['output_layer_0_average'] = 'mse' model.compile(optimizer = Adam() , loss = loss, metrics = metrics, loss_weights=loss_weights) return model <prepare_x_and_y>
sub_df.to_csv('submission.csv',index=False )
Digit Recognizer
4,408,047
epochs = 50 batch_size = 1024 * 4 verbose = True objective = 'val_output_layer_3_average_auc' objective = 'output_layer_3_average_auc' direction = 'max' tr =(0, 400) te =(420, 500) train_indices = train[(train.date >= tr[0])&(train.date < tr[1])].index test_indices = train[(train.date >= te[0])&(train.date < te[1])].index model = create_model(input_dim=130, output_dims=(1,1,5,5), add_models=3) X_train, X_test = X[train_indices], X[test_indices] y_train =(y_actions[train_indices][:,3], y_resps[train_indices][:,3], y_actions[train_indices], y_resps[train_indices]) y_test =(y_actions[test_indices][:,3], y_resps[test_indices][:,3], y_actions[test_indices], y_resps[test_indices]) rlr = ReduceLROnPlateau(monitor = objective, factor = 0.5, patience = 4, verbose = 1, min_delta = 1e-4, mode = direction) es = EarlyStopping(objective, patience=21, restore_best_weights=True, mode=direction) h = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks = [es, rlr]) model.save_weights('./model.hdf5') metrics = model.evaluate(X_test, y_test, batch_size=batch_size) print(metrics )<train_model>
import pandas as pd import numpy as np import matplotlib.pyplot as plt from keras.utils import to_categorical import os import time from keras.models import Sequential from keras.layers import Dense,Conv2D,Flatten,Dropout,MaxPooling2D,BatchNormalization from keras.callbacks import EarlyStopping from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix import seaborn as sns from keras import optimizers from sklearn.metrics import classification_report
Digit Recognizer
4,408,047
tf.keras.utils.plot_model(model, to_file=f'model.png', show_shapes=True )<predict_on_test>
path_train='.. /input/train.csv' path_test=".. /input/test.csv" train=pd.read_csv(path_train) test=pd.read_csv(path_test) X_train=train.drop("label",axis=1 ).values Y_train=train["label"].values X_test=test.values X_train=X_train/X_train.max() X_test=X_test/X_test.max()
Digit Recognizer
4,408,047
pred = model.predict(X_test, batch_size=batch_size, verbose=True )<split>
label=[0,1,2,3,4,5,6,7,8,9] nc=10 Y_train_d=to_categorical(Y_train,10) X_train_c=X_train.reshape(-1,28,28,1) X_test_c=X_test.reshape(-1,28,28,1 )
Digit Recognizer
4,408,047
env = janestreet.make_env() iter_test = env.iter_test()<define_variables>
np.random.seed(2) m=Sequential() m.add(Conv2D(filters=128,kernel_size=4,padding="same",activation="relu",input_shape=(28,28,1))) m.add(Conv2D(filters=128,kernel_size=4,padding="same",activation="relu")) m.add(MaxPooling2D(pool_size=2,strides=2)) m.add(Dropout(0.2)) m.add(Conv2D(filters=64,kernel_size=4,padding="same",activation="relu",)) m.add(Conv2D(filters=64,kernel_size=4,padding="same",activation="relu")) m.add(MaxPooling2D(pool_size=2,strides=2)) m.add(Dropout(0.2)) m.add(Flatten()) m.add(Dense(1024,activation="relu")) m.add(Dropout(0.2)) m.add(Dense(512,activation="relu")) m.add(Dropout(0.4)) m.add(Dense(256,activation="relu")) m.add(Dropout(0.6)) m.add(Dense(128,activation="relu")) m.add(Dense(nc,activation='softmax')) m.summary()
Digit Recognizer
4,408,047
selected_models = [model]<feature_engineering>
el=EarlyStopping(monitor='val_loss',min_delta=0.001,patience=5,restore_best_weights=True) ad=optimizers.Adam(lr=0.002,beta_1=0.9,beta_2=0.999,decay=0.004) m.compile(loss="categorical_crossentropy",optimizer=ad,metrics=["accuracy"]) s=time.time() h=m.fit(X_train_c,Y_train_d,batch_size=32,validation_split=0.4,epochs=50,callbacks=[el]) e=time.time() t=e-s print("Addestramento completato in %d minuti e %d secondi" %(t/60,t*60))
Digit Recognizer
4,408,047
start = time.time() th = 0.5 j = 0 for(test_df, pred_df)in tqdm(iter_test): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_median try: pred = model(x_tt, training=False)[2].numpy().flatten() pred = np.median(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) except: pred_df.action = 0 else: pred_df.action = 0 env.predict(pred_df) j +=1 total = time.time() - start print(f'Expected time for 1M: { total * 1000000 /(j*60*60+1):.2} hours') print(f'Iters per second: {j/total:.1f} iter/s') print(f'Global time: {(time.time() - START_TIME)/ 60:.1f} minutes' )<load_pretrained>
acc=h.history['acc'] val_acc=h.history['val_acc'] loss=h.history['loss'] val_loss=h.history['val_loss']
Digit Recognizer
4,408,047
<import_modules><EOS>
y_test=m.predict(X_test_c) y_test = np.argmax(y_test,axis = 1) out=pd.DataFrame({"ImageId": list(range(1,len(y_test)+1)) ,"Label": y_test}) out.to_csv("Submission_cnn.csv", index=False, header=True )
Digit Recognizer
5,380,372
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
os.listdir('.. /input/digit-recognizer')
Digit Recognizer
5,380,372
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv') test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv') sample_sub = pd.read_csv('.. /input/tabular-playground-series-feb-2021/sample_submission.csv' )<drop_column>
PATH = '.. /input/digit-recognizer' df_train = pd.read_csv(os.path.join(PATH, 'train.csv')) train_y = df_train['label'].values train_x = df_train.drop(['label'], axis=1 ).values df_test = pd.read_csv(os.path.join(PATH, 'test.csv')) test_x = df_test.values print(train_x.shape) print(train_y.shape) print(test_x.shape )
Digit Recognizer
5,380,372
delete_columns = ['id'] train.drop(delete_columns, axis=1, inplace=True) test.drop(delete_columns, axis=1, inplace=True )<define_variables>
IMG_SIZE = 32
Digit Recognizer
5,380,372
categorical_features = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6','cat7', 'cat8', 'cat9']<categorify>
def resize(img_array): tmp = np.empty(( img_array.shape[0], IMG_SIZE, IMG_SIZE)) for i in range(len(img_array)) : img = img_array[i].reshape(28, 28 ).astype('uint8') img = cv2.resize(img,(IMG_SIZE, IMG_SIZE)) img = img.astype('float32')/255 tmp[i] = img return tmp train_x_resize = resize(train_x) test_x_resize = resize(test_x )
Digit Recognizer
5,380,372
for c in train.columns: if train[c].dtype == 'object': lbl = LabelEncoder() lbl.fit(list(train[c].values)+list(test[c].values)) train[c] = lbl.transform(train[c].values) test[c] = lbl.transform(test[c].values) display(train.head()) <prepare_x_and_y>
train_y_final = to_categorical(train_y, num_classes=10) print(train_y_final.shape )
Digit Recognizer
5,380,372
y_train = train['target'] X_train = train.drop('target', axis = 1) X_test = test<init_hyperparams>
vgg16 = VGG16(weights = 'imagenet', include_top = False, input_shape=(IMG_SIZE, IMG_SIZE, 3) ) model = Sequential() model.add(vgg16) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.summary()
Digit Recognizer
5,380,372
y_preds = [] models = [] oof_train = np.zeros(len(X_train)) cv = KFold(n_splits=5, shuffle=True, random_state=0) params = { 'random_state':42, 'metric': 'rmse', 'n_jobs': -1, 'cat_feature': [x for x in range(len(categorical_features)) ], 'bagging_seed':42, 'feature_fraction_seed':42, 'learning_rate': 0.0011992715138089741, 'max_depth': 101, 'num_leaves': 86, 'reg_alpha': 7.504329214783163, 'reg_lambda': 1.5631184517427836, 'colsample_bytree': 0.22354989226986266, 'min_child_samples': 149, 'subsample_freq': 4, 'subsample': 0.5143496951794435, 'max_bin': 720, 'min_data_per_group': 55, 'cat_smooth': 78, 'cat_l2': 7 } y_preds = 0 for fold_id,(train_index, valid_index)in enumerate(cv.split(X_train, y_train)) : X_tr = X_train.loc[train_index, :] X_val = X_train.loc[valid_index, :] y_tr = y_train[train_index] y_val = y_train[valid_index] lgb_train = lgb.Dataset(X_tr, y_tr, categorical_feature=categorical_features) lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train, categorical_feature=categorical_features) model = lgb.train(params, lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval = 1000, num_boost_round = 30000, early_stopping_rounds=1000) oof_train[valid_index] = model.predict(X_val, num_iteration=model.best_iteration) y_pred = model.predict(X_test, num_iteration=model.best_iteration) y_preds += y_pred/5 models.append(model )<save_to_csv>
x_train, x_test, y_train, y_test = train_test_split(train_x_final, train_y_final, test_size=0.2, random_state=2019) print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape)
Digit Recognizer
5,380,372
pd.DataFrame(oof_train ).to_csv('oof_train_kfold.csv', index=False )<create_dataframe>
es = EarlyStopping(monitor='val_acc', verbose=1, patience=5) mc = ModelCheckpoint(filepath='mnist-vgg13.h5', verbose=1, monitor='val_acc') cb = [es, mc]
Digit Recognizer
5,380,372
y_preds = pd.DataFrame(y_preds )<prepare_output>
history = model.fit(x_train, y_train, epochs=100, batch_size=128, validation_data=(x_test, y_test), callbacks=cb )
Digit Recognizer
5,380,372
y_subs = y_preds<save_to_csv>
preds = model.predict(test_x_final, batch_size=128 )
Digit Recognizer
5,380,372
sample_sub['target'] = y_subs sample_sub.to_csv('submission_CV.csv', index=False )<import_modules>
results = np.argmax(preds, axis=-1) results.shape
Digit Recognizer
5,380,372
<load_from_csv><EOS>
sub = pd.read_csv(os.path.join(PATH, 'sample_submission.csv')) sub.head() df = pd.DataFrame({'ImageId': sub['ImageId'], 'Label': results}) df.to_csv('submission.csv', index=False) os.listdir('./' )
Digit Recognizer
4,143,339
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
%matplotlib inline
Digit Recognizer
4,143,339
train_id = df_train["id"] test_id = df_test["id"] df_train.drop("id", axis=1, inplace=True) df_test.drop("id", axis=1, inplace=True )<define_variables>
train_data=pd.read_csv(".. /input/train.csv") test_data=pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,143,339
cat_features = [f"cat{i}" for i in range(9 + 1)]<categorify>
y_label=train_data['label']
Digit Recognizer
4,143,339
onehot_encoder = ce.one_hot.OneHotEncoder() onehot_encoder.fit(pd.concat([df_train[cat_features], df_test[cat_features]], axis=0)) train_ohe = onehot_encoder.transform(df_train[cat_features]) test_ohe = onehot_encoder.transform(df_test[cat_features]) train_ohe.columns = [f"OHE_{col}" for col in train_ohe] test_ohe.columns = [f"OHE_{col}" for col in test_ohe]<define_variables>
img_rows, img_cols = 28, 28 num_classes = 10
Digit Recognizer
4,143,339
numerical_features = [f"cont{i}" for i in range(13 + 1)]<concatenate>
def data_prep(raw): out_y = keras.utils.to_categorical(raw.label, num_classes) num_images = raw.shape[0] x_as_array = raw.values[:,1:] x_shaped_array = x_as_array.reshape(num_images, img_rows, img_cols, 1) out_x = x_shaped_array / 255 return out_x, out_y
Digit Recognizer
4,143,339
train_x = pd.concat([ df_train[numerical_features], train_ohe ], axis=1 )<concatenate>
train_size =len(train_data )
Digit Recognizer
4,143,339
test_x = pd.concat([ df_test[numerical_features], test_ohe ], axis=1 )<prepare_x_and_y>
x,y = data_prep(train_data )
Digit Recognizer
4,143,339
train_y = df_train["target"]<choose_model_class>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=20, zoom_range = 0.18, width_shift_range=0.15, height_shift_range=0.15, horizontal_flip=False, vertical_flip=False) datagen.fit(x )
Digit Recognizer
4,143,339
folds = KFold(n_splits=5, shuffle=True, random_state=2021 )<train_model>
X_train, X_val, Y_train, Y_val = train_test_split(x, y, test_size = 0.1, random_state=2 )
Digit Recognizer
4,143,339
class FoldsAverageLGBM: def __init__(self, folds): self.folds = folds self.models = [] def fit(self, lgb_params, train_x, train_y): oof_preds = np.zeros_like(train_y) self.train_x = train_x self.train_y = train_y.values for tr_idx, va_idx in tqdm(folds.split(train_x)) : tr_x, va_x = self.train_x.iloc[tr_idx], self.train_x.iloc[va_idx] tr_y, va_y = self.train_y[tr_idx], self.train_y[va_idx] lgb_train_dataset = lgb.Dataset(tr_x, tr_y) lgb_valid_dataset = lgb.Dataset(va_x, va_y) model = lgb.train(lgb_params, lgb_train_dataset, valid_sets=[lgb_valid_dataset], verbose_eval=100) self.models.append(model) oof_pred = model.predict(va_x) oof_preds[va_idx] = oof_pred self.oof_preds = oof_preds def predict(self, test_x): preds = [] for model in tqdm(self.models): pred = model.predict(test_x) preds.append(pred) preds = np.mean(preds, axis=0) return preds def get_feature_importance(self, importance_type="gain"): feature_names = self.models[0].feature_name() feature_importances_list = [model.feature_importance(importance_type)for model in self.models] out_df = pd.DataFrame() for i, name in enumerate(feature_names): out_df[name] = [v[i] for v in feature_importances_list] return out_df<init_hyperparams>
class myCallback(keras.callbacks.Callback): def on_epoch_end(self,epoch,logs={}): if(logs.get('acc')>0.997): print(" Reached 99.7% accuracy so cancelling training") self.model.stop_training=True
Digit Recognizer
4,143,339
lgb_params = { 'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'learning_rate': 0.01, 'feature_pre_filter': False, 'lambda_l1': 6.271548464074981, 'lambda_l2': 6.442666191955093e-05, 'num_leaves': 244, 'feature_fraction': 0.4, 'bagging_fraction': 0.6165715549446614, 'bagging_freq': 6, 'min_child_samples': 100 } lgb_params["learning_rate"] = 0.001 lgb_params["early_stopping_round"] = 1000 lgb_params["num_iterations"] = 20000<statistical_test>
model = Sequential()
Digit Recognizer
4,143,339
folds_average_lgbm = FoldsAverageLGBM(folds )<train_model>
model.add(Conv2D(filters = 16, kernel_size =(3,3),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(512, activation = "relu")) model.add(Dense(10, activation = "softmax"))
Digit Recognizer