kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,978,455
x1=0 x2=0 print("Classifying Kaggle's 'test.csv' using KNN where K=1 and MNIST 70k images.. ") for i in range(0,28000): for j in range(0,70000): if np.absolute(X_test[i,:]-mnist_image[j,:] ).sum() ==0: predictions[i]=mnist_label[j] if i%1000==0: print(" %d images classified perfectly"%(i),end="") if j<60000: x1+=1 else: x2+=1 break if x1+x2==28000: print(" 28000 images classified perfectly.") print("All 28000 images are contained in MNIST.npz Dataset.") print("%d images are in MNIST.npz train and %d images are in MNIST.npz test"%(x1,x2))<define_variables>
class MNISTDataset(Dataset): def __init__(self, feature, target=None, transform=None): self.X = feature self.y = target self.transform = transform def __len__(self): return len(self.X) def __getitem__(self, idx): if self.transform is not None: return self.transform(self.X[idx]), self.y[idx] elif self.y is None: return [self.X[idx]] return self.X[idx], self.y[idx]
Digit Recognizer
11,978,455
final_pred = predictions[0:28000]<prepare_output>
data_transform = transforms.Compose([ transforms.ToPILImage() , transforms.RandomAffine(degrees=45, translate=(0.1, 0.1), scale=(0.8, 1.2)) , transforms.ToTensor() ]) train_set = MNISTDataset(featuresTrain.float() , targetsTrain, transform=data_transform) validate_set = MNISTDataset(featuresValidation.float() , targetsValidation) test_set = MNISTDataset(Test.float() )
Digit Recognizer
11,978,455
my_submission = pd.DataFrame({'ImageId':np.arange(28000),'Label':final_pred.squeeze().astype(np.int)}) my_submission.head()<feature_engineering>
train_set = torch.utils.data.TensorDataset(featuresTrain.float() , targetsTrain) validate_set = torch.utils.data.TensorDataset(featuresValidation.float() , targetsValidation) test_set = torch.utils.data.TensorDataset(Test.float() )
Digit Recognizer
11,978,455
my_submission["ImageId"]=my_submission["ImageId"]+1<save_to_csv>
train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle = True) validate_loader = torch.utils.data.DataLoader(validate_set, batch_size = batch_size, shuffle = False) test_loader = torch.utils.data.DataLoader(test_set, batch_size = batch_size, shuffle = False )
Digit Recognizer
11,978,455
my_submission.to_csv('best_submission.csv', index=False )<install_modules>
class CNNModel(nn.Module): def __init__(self): super(CNNModel, self ).__init__() self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5), nn.ReLU(inplace=True), nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Dropout(0.25), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3), nn.ReLU(inplace=True), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Dropout(0.25)) self.classifier = nn.Sequential(nn.Linear(576, 256), nn.Dropout(0.5), nn.Linear(256, 10)) def forward(self, x): x = self.cnn(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x
Digit Recognizer
11,978,455
! pip install.. /input/mlcollection/ml_collections-0.1.0-py3-none-any.whl<import_modules>
model = CNNModel() optimizer = optim.RMSprop(model.parameters() , lr=0.001, alpha=0.9) criterion = nn.CrossEntropyLoss() lr_reduction = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0.00001) if torch.cuda.is_available() : model = model.cuda() criterion = criterion.cuda()
Digit Recognizer
11,978,455
from glob import glob from sklearn.model_selection import GroupKFold, StratifiedKFold import cv2 from skimage import io import torch from torch import nn import os from datetime import datetime import time import random import cv2 import torchvision from torchvision import transforms import pandas as pd import numpy as np from tqdm import tqdm from fastai.vision.all import * import matplotlib.pyplot as plt from torch.utils.data import Dataset,DataLoader from torch.utils.data.sampler import SequentialSampler, RandomSampler from torch.cuda.amp import autocast, GradScaler import sklearn import warnings import joblib from sklearn.metrics import roc_auc_score, log_loss from sklearn import metrics import warnings import cv2 import pydicom import timm from scipy.ndimage.interpolation import zoom from sklearn.metrics import log_loss <init_hyperparams>
count = 0 loss_list = [] iteration_list = [] average_training_accuracy = [] average_validation_accuracy = [] average_training_loss = [] average_validation_loss = []
Digit Recognizer
11,978,455
CFG = { 'fold_num': 5, 'seed': 719, 'model_arch': 'resnext101_ibn_a', 'model_arch_eff':'tf_efficientnet_b4_ns', 'img_size': 512, 'epochs': 10, 'train_bs': 32, 'valid_bs': 32, 'lr': 1e-4, 'num_workers': 4, 'accum_iter': 1, 'verbose_step': 1, 'device': 'cuda' if torch.cuda.is_available() else 'cpu', 'tta': 4, } ckpt_paths = ['.. /input/tf-efficientnet-b4-ns-fold-0-8-089533/tf_efficientnet_b4_ns_fold_0_4_0.89299', '.. /input/tf-efficientnet-b4-ns-fold-0-8-089533/tf_efficientnet_b4_ns_fold_0_6_0.89065', '.. /input/tf-efficientnet-b4-ns-fold-0-8-089533/tf_efficientnet_b4_ns_fold_0_8_0.89533', '.. /input/tf-efficientnet-b4-ns-fold-0-8-089533/tf_efficientnet_b4_ns_fold_0_9_0.88995' ] ckpt_paths = [ '.. /input/resnext101-ibn-a-01-22-10-05-bit-ls015-896/resnext101_ibn_a_fold_0_6_0.88435', '.. /input/resnext101-ibn-a-01-22-10-05-bit-ls015-896/resnext101_ibn_a_fold_0_7_0.89299', '.. /input/resnext101-ibn-a-01-22-10-05-bit-ls015-896/resnext101_ibn_a_fold_0_8_0.88995', '.. /input/resnext101-ibn-a-01-22-10-05-bit-ls015-896/resnext101_ibn_a_fold_0_9_0.89603'] ckpt_weights = [1,1,1,1] ckpt_dir = '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds' ckpt_paths = [os.path.join(ckpt_dir, sub_path)for sub_path in os.listdir(ckpt_dir)if not ckpt_dir.endswith('.txt')] ckpt_paths = ['.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_0_9_0.89439', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_0_19_0.89393', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_1_9_0.89603', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_1_8_0.89486', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_2_9_0.88759', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_2_17_0.88619', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_3_16_0.89928', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_3_19_0.89904', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_4_18_0.89857', '.. /input/resnext101-ibn-a-01-26-01-27-bit-ema-5folds/resnext101_ibn_a_fold_4_9_0.89787', ] ckpt_paths = [ '.. /input/vit-convert-tta-seed719/vit__1920-fold0_checkpoint-17.pth', '.. /input/vit-convert-tta-seed719/vit__1920-fold1_checkpoint-14.pth', '.. /input/vit-convert-tta-seed719/vit__1920-fold2_checkpoint-16.pth', '.. /input/vit-convert-tta-seed719/vit__1920-fold3_checkpoint-8.pth', '.. /input/vit-convert-tta-seed719/vit__1920-fold4_checkpoint-9.pth', '.. /input/alldata-ema-resnext101-ibn-a/resnext101_ibn_a_fold_0_11_0.89603', '.. /input/alldata-ema-resnext101-ibn-a/resnext101_ibn_a_fold_1_10_0.89369', '.. /input/alldata-ema-resnext101-ibn-a/resnext101_ibn_a_fold_2_13_0.88899', '.. /input/alldata-ema-resnext101-ibn-a/resnext101_ibn_a_fold_3_12_0.89834', '.. /input/alldata-ema-resnext101-ibn-a/resnext101_ibn_a_fold_4_11_0.89741', '.. /input/alldata-ema-resnext101-32x4d-swsl-adam-02-05-17-28/resnext101_32x4d_swsl_fold_0_12_0.89369', '.. /input/alldata-ema-resnext101-32x4d-swsl-adam-02-05-17-28/resnext101_32x4d_swsl_fold_1_12_0.89416', '.. /input/alldata-ema-resnext101-32x4d-swsl-adam-02-05-17-28/resnext101_32x4d_swsl_fold_2_8_0.89203', '.. /input/alldata-ema-resnext101-32x4d-swsl-adam-02-05-17-28/resnext101_32x4d_swsl_fold_3_12_0.90115', '.. /input/alldata-ema-resnext101-32x4d-swsl-adam-02-05-17-28/resnext101_32x4d_swsl_fold_4_14_0.89530', ] model_names = [ 'vit', 'vit', 'vit', 'vit', 'vit', 'resnext101_ibn_a', 'resnext101_ibn_a', 'resnext101_ibn_a', 'resnext101_ibn_a', 'resnext101_ibn_a', 'resnext101', 'resnext101', 'resnext101', 'resnext101', 'resnext101', ] assert len(model_names)== len(ckpt_paths) print(len(ckpt_paths)) ckpt_weights = [1 for i in range(len(ckpt_paths)) ] print(ckpt_paths) model_wts={} model_wts['vit']=0.6582084173812914 model_wts['resnext101']=0.16946139705016672 model_wts['resnext101_ibn_a']=0.6465801475470692 <load_from_csv>
def train(epoch): global count model.train() for batch_idx,(data, target)in enumerate(train_loader): data, target = Variable(data), Variable(target) if torch.cuda.is_available() : data = data.cuda() target = target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() if(batch_idx + 1)% 100 == 0: loss_list.append(loss.item()) iteration_list.append(count) count += 1
Digit Recognizer
11,978,455
train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv') train.head()<count_values>
def evaluate(data_loader, validate=False): model.eval() loss = 0 correct = 0 for data, target in data_loader: data, target = Variable(data), Variable(target) if torch.cuda.is_available() : data = data.cuda() target = target.cuda() output = model(data) loss += F.cross_entropy(output, target, size_average=False ).item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() loss /= len(data_loader.dataset) accuracy = 100.* correct / len(data_loader.dataset) if not validate: lr_reduction.step(loss) average_training_accuracy.append(accuracy) average_training_loss.append(loss) else: average_validation_accuracy.append(accuracy) average_validation_loss.append(loss) '.format(loss, correct, len(data_loader.dataset), accuracy))
Digit Recognizer
11,978,455
train.label.value_counts()<load_from_csv>
def prediciton(data_loader): model.eval() test_pred = torch.LongTensor() for i, data in enumerate(data_loader): data = Variable(data[0]) if torch.cuda.is_available() : data = data.cuda() output = model(data) pred = output.cpu().data.max(1, keepdim=True)[1] test_pred = torch.cat(( test_pred, pred), dim=0) return test_pred test_pred = prediciton(test_loader )
Digit Recognizer
11,978,455
submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') submission.head()<categorify>
out_df = pd.DataFrame(np.c_[np.arange(1, len(test_set)+1)[:,None], test_pred.numpy() ], columns=['ImageId', 'Label']) out_df.head()
Digit Recognizer
11,978,455
<import_modules><EOS>
out_df.to_csv('submission.csv', index=False )
Digit Recognizer
13,819,614
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
import torch import torchvision import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split import copy import time
Digit Recognizer
13,819,614
class CassvaImgClassifier(nn.Module): def __init__(self, model_arch, n_class, pretrained=False): super().__init__() self.model = timm.create_model(model_arch, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, n_class) def forward(self, x): x = self.model(x) return x<choose_model_class>
torch.backends.cudnn.enabled
Digit Recognizer
13,819,614
class IBNResnextCassava(nn.Module): def __init__(self, arch='resnext101_ibn_a', n_class=5, pre=False): super().__init__() m = resnext101_ibn_a() self.enc = nn.Sequential(*list(m.children())[:-2]) nc = list(m.children())[-1].in_features self.head = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Flatten() , nn.Linear(2048, n_class) ) def forward(self, x): x = self.enc(x) x = self.head(x) return x<feature_engineering>
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device )
Digit Recognizer
13,819,614
class MishFunction(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x * torch.tanh(F.softplus(x)) @staticmethod def backward(ctx, grad_output): x = ctx.saved_variables[0] sigmoid = torch.sigmoid(x) tanh_sp = torch.tanh(F.softplus(x)) return grad_output *(tanh_sp + x * sigmoid *(1 - tanh_sp * tanh_sp)) class Mish(nn.Module): def forward(self, x): return MishFunction.apply(x) def to_Mish(model): for child_name, child in model.named_children() : if isinstance(child, nn.ReLU): setattr(model, child_name, Mish()) else: to_Mish(child) def _resnext(url, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) return model <define_variables>
Num_CNN = 12 n_epochs = 25 batch_size_train = 64 batch_size_test = 1000 learning_rate = 0.01 momentum = 0.5 log_interval = 100 random_seed = 121 torch.manual_seed(random_seed) kaggle_input_data = "/kaggle/input/digit-recognizer/train.csv" kaggle_input_validation = "/kaggle/input/digit-recognizer/test.csv" kaggle_model_path = "/kaggle/working/model_id_%d.pth" kaggle_optimizer_path = "/kaggle/working/optimizer_id_%d.pth" prediction_path = "/kaggle/working/predictions_essemble.csv"
Digit Recognizer
13,819,614
semi_weakly_supervised_model_urls = { 'resnet18': 'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth', 'resnet50': 'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth', 'resnext50_32x4d': 'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth', 'resnext101_32x4d': 'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth', 'resnext101_32x8d': 'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth', 'resnext101_32x16d':'https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth', } class ResnextCassava(nn.Module): def __init__(self, arch='resnext101_32x4d', n_class=5, pretrained=False): super().__init__() m = _resnext(semi_weakly_supervised_model_urls[arch], Bottleneck, [3, 4, 23, 3], False, progress=False,groups=32,width_per_group=4) self.enc = nn.Sequential(*list(m.children())[:-2]) self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.logit = nn.Linear(2048,n_class) def forward(self, x): x = self.enc(x) x =(self.avg_pool(x)+ self.max_pool(x)).squeeze(-1 ).squeeze(-1) x = self.logit(x) return x <choose_model_class>
class Dataset(torch.utils.data.Dataset): def __init__(self, dataframe): self.labels = dataframe["label"].to_numpy() self.dataframe = dataframe.loc[:,dataframe.columns != "label"] def __len__(self): return self.dataframe.shape[0] def __getitem__(self, index): X = torch.from_numpy(self.dataframe.iloc[index].values.reshape(1,28,28)).float() y = self.labels[index] return X,y
Digit Recognizer
13,819,614
class CassvaImgClassifier(nn.Module): def __init__(self, model_arch, n_class, pretrained=False): super().__init__() self.model = create_model(model_arch, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, n_class) def forward(self, x): x = self.model(x) return x class IBN_CUSTOM1(nn.Module): def __init__(self, arch='resnext101_ibn_a', n_class=5, pretrained=True): super().__init__() m = se_resnet101_ibn_a(pretrained=pretrained) self.enc = nn.Sequential(*list(m.children())[:-2]) self.head = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Flatten() , nn.Linear(2048, n_class) ) def forward(self, x): x = self.enc(x) x = self.head(x) return x def get_model(model_name, device='cuda'): if 'efficientnet' in model_name: model = CassvaImgClassifier(model_name, 5, pretrained=False ).to(device) elif model_name == 'ibn50a': model = resnet50_ibn_a(pretrained=False ).to(device) model.avgpool = nn.AdaptiveAvgPool2d(1) model.fc = nn.Linear(2048, train.label.nunique()) elif model_name == 'ibnse50a': model = se_resnet50_ibn_a(pretrained=False ).to(device) model.avgpool = nn.AdaptiveAvgPool2d(1) model.fc = nn.Linear(2048, train.label.nunique()) elif model_name == 'resnext101_ibn_a': model = IBNResnextCassava(arch=model_name) elif model_name == 'se50_softlabel': model = se_resnext50_32x4d(pretrained=None ).to(device) elif model_name == 'resnext101': model = ResnextCassava(pretrained=False ).to(device) elif model_name == 'b4': b4_model = timm.create_model('tf_efficientnet_b4_ns', pretrained=False) model=timm_class(b4_model,n=5 ).to(device) elif model_name == 'vit': CONFIGS = { 'ViT-B_16': configs.get_b16_config() , 'ViT-B_32': configs.get_b32_config() , 'ViT-L_16': configs.get_l16_config() , 'ViT-L_32': configs.get_l32_config() , 'ViT-H_14': configs.get_h14_config() , 'testing': configs.get_testing() , } config = CONFIGS["ViT-B_16"] model = VisionTransformer(config, CFG['img_size'], zero_head=True, num_classes=5 ).to(device) elif model_name == 'se_resnet101_ibn_a': model = IBN_CUSTOM1(pretrained=False ).to(device) model = model.to(device) return model <install_modules>
df_input = pd.read_csv(kaggle_input_data) df_validation = pd.read_csv(kaggle_input_validation) df_train, df_test = train_test_split(df_input, test_size = 0.01) training_set = Dataset(df_train) training_generator = torch.utils.data.DataLoader(training_set, batch_size = batch_size_train, shuffle = True) test_set = Dataset(df_test) test_generator = torch.utils.data.DataLoader(test_set, batch_size = batch_size_test, shuffle = True) kaggle_validation_set = torch.from_numpy(df_validation.to_numpy() ).view( df_validation.shape[0],-1,28,28 ).float()
Digit Recognizer
13,819,614
! pip install.. /input/mlcollection/ml_collections-0.1.0-py3-none-any.whl<feature_engineering>
import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler
Digit Recognizer
13,819,614
class AdaptiveConcatPool2d(nn.Module): "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`" def __init__(self, size=None): super().__init__() self.size = size or 1 self.ap = nn.AdaptiveAvgPool2d(self.size) self.mp = nn.AdaptiveMaxPool2d(self.size) def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1 )<init_hyperparams>
class Net(nn.Module): def __init__(self): super(Net, self ).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.batchnorm1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 32, kernel_size=3) self.batchnorm2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=2, stride = 2) self.batchnorm3 = nn.BatchNorm2d(32) self.conv4 = nn.Conv2d(32, 64, kernel_size=5) self.batchnorm4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(64, 64, kernel_size=2, stride = 2) self.batchnorm5 = nn.BatchNorm2d(64) self.conv5_drop = nn.Dropout2d() self.fc1 = nn.Linear(1024, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.batchnorm1(F.relu(self.conv1(x))) x = self.batchnorm2(F.relu(self.conv2(x))) x = self.batchnorm3(F.relu(self.conv3(x))) x = self.batchnorm4(F.relu(self.conv4(x))) x = self.batchnorm5(F.relu(self.conv5(x))) x = self.conv5_drop(x) x = x.view(-1, 1024) x = F.relu(self.fc1(x)) x = F.log_softmax(self.fc2(x), dim=1) return x
Digit Recognizer
13,819,614
if __name__ == '__main__': VALID = False test_num = len(os.listdir('.. /input/cassava-leaf-disease-classification/test_images')) print('test_num:', test_num) seed_everything(CFG['seed']) folds = StratifiedKFold(n_splits=CFG['fold_num'], shuffle=True, random_state=CFG['seed'] ).split(np.arange(train.shape[0]), train.label.values) for fold,(trn_idx, val_idx)in enumerate(folds): if fold > 0: break print('Inference fold {} started'.format(fold)) valid_ = train.loc[val_idx,:].reset_index(drop=True) valid_ds = CassavaDataset(valid_, '.. /input/cassava-leaf-disease-classification/train_images/', transforms=get_inference_transforms() , output_label=False) test = pd.DataFrame() test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/')) test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms() , output_label=False) val_loader = torch.utils.data.DataLoader( valid_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=False, ) tst_loader = torch.utils.data.DataLoader( test_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=False, ) device = torch.device(CFG['device']) val_preds = [] tst_preds = [] for i in range(len(ckpt_paths)) : ckpt_path = ckpt_paths[i] model = get_model(model_names[i], device) if model_names[i] == 'se_resnet101_ibn_a': model.load_state_dict(torch.load(ckpt_path)['state_dict']) else: model.load_state_dict(torch.load(ckpt_path,map_location=device)) with torch.no_grad() : for _ in range(CFG['tta']): if VALID: if test_num == 1: val_preds += [ckpt_weights[i]/sum(ckpt_weights)/CFG['tta']*inference_one_epoch(model, val_loader, device)] tst_preds += [ inference_one_epoch(model, tst_loader, device)*model_wts[model_names[i]]] del model torch.cuda.empty_cache() if VALID: if test_num == 1: val_preds = np.mean(val_preds, axis=0) tst_preds = np.mean(tst_preds, axis=0)*3 if VALID: if test_num == 1: print('fold {} validation loss = {:.5f}'.format(fold, log_loss(valid_.label.values, val_preds))) print('fold {} validation accuracy = {:.5f}'.format(fold,(valid_.label.values==np.argmax(val_preds, axis=1)).mean())) cm = confusion_matrix(valid_.label.values, np.argmax(val_preds, axis=1), labels=[0, 1, 2, 3, 4]) print('cm') print(cm) sns.set() f, ax=plt.subplots() sns.heatmap(cm,annot=True,ax=ax) ax.set_title('confusion matrix') ax.set_xlabel('predict') ax.set_ylabel('true') <feature_engineering>
train_losses = [] train_counter = [] test_losses = [] test_counter = [i*len(training_generator.dataset)for i in range(n_epochs + 1)] log_interval = len(training_generator) network_list = [] prediction_tensor = torch.zeros(kaggle_validation_set.shape[0],10 )
Digit Recognizer
13,819,614
test['label'] = np.argmax(tst_preds, axis=1) test.head()<save_to_csv>
def train(epoch, network, scheduler, network_id, device): start_time = time.time() network.train() for batch_idx,(data, target)in enumerate(training_generator): data = data.to(device) target = target.to(device) optimizer.zero_grad() output = network(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() print('CNN : {},\t Train Epoch: {} \tLoss: {:.6f},\t runtime: {:.2f}'.format( network_id, epoch, loss.item() , time.time() -start_time)) train_losses.append(loss.item()) train_counter.append(epoch) torch.save(network.state_dict() , kaggle_model_path%network_id) torch.save(optimizer.state_dict() ,kaggle_optimizer_path%network_id) scheduler.step() def test(network): network.eval() test_loss = 0 correct = 0 with torch.no_grad() : for data, target in test_generator: data = data.to(device) target = target.to(device) output = network(data) test_loss += F.nll_loss(output, target, reduction="sum" ).item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum() test_loss /= len(test_generator.dataset) test_losses.append(test_loss) print(' Test set: Avg.loss: {:.4f}, Accuracy: {}/{}({:.5f}%) '.format( test_loss, correct, len(test_generator.dataset), 100.* correct / len(test_generator.dataset))) def validation(network, validation_tensor, prediction_tensor): with torch.no_grad() : output = network(validation_tensor) prediction_tensor = prediction_tensor + output return prediction_tensor def initialise_network(device): network = Net().to(device) optimizer = optim.SGD(network.parameters() , lr=learning_rate, momentum = momentum) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.95) return network, optimizer, exp_lr_scheduler
Digit Recognizer
13,819,614
test.to_csv('submission.csv', index=False )<install_modules>
validation_tensor = kaggle_validation_set.to(device) prediction_tensor = prediction_tensor.to(device) for id_net in range(1, Num_CNN+1): network, optimizer, scheduler = initialise_network(device) for epoch in range(1, n_epochs+1): train(epoch, network, scheduler, id_net, device) test(network) prediction_tensor = validation(network, validation_tensor, prediction_tensor) network_list.append(copy.deepcopy(network))
Digit Recognizer
13,819,614
!pip install -q '/kaggle/input/birdcall-identification-submission-custom/Keras_Applications-1.0.8-py3-none-any.whl' !pip install -q '/kaggle/input/birdcall-identification-submission-custom/efficientnet-1.1.0-py3-none-any.whl'<import_modules>
output_df = prediction_tensor.max(1)[1].to("cpu") output_df = pd.DataFrame(output_df.numpy() , columns = ["Label"]) output_df.index.name = "ImageId" output_df.index = output_df.index + 1
Digit Recognizer
13,819,614
import numpy as np import pandas as pd import tensorflow as tf import efficientnet.tfkeras as efn import matplotlib.pyplot as plt from tqdm.notebook import tqdm<define_search_space>
output_df.to_csv(prediction_path )
Digit Recognizer
11,437,677
IMG_HEIGHT = 600 IMG_WIDTH = 800 IMG_SIZE = 600 IMG_TARGET_SIZE = 512 N_CHANNELS = 3 N_LABELS = 5 N_FOLDS = 5 BATCH_SIZE = 16 AUTO = tf.data.experimental.AUTOTUNE IMAGENET_MEAN = tf.constant([0.485, 0.456, 0.406], dtype=tf.float32) IMAGENET_STD = tf.constant([0.229, 0.224, 0.225], dtype=tf.float32 )<choose_model_class>
df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') df2 = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') print(df.shape) print(df2.shape )
Digit Recognizer
11,437,677
def get_model(fold): tf.keras.backend.clear_session() net = efn.EfficientNetB4( include_top=False, weights=None, input_shape=(IMG_TARGET_SIZE, IMG_TARGET_SIZE, N_CHANNELS), ) for layer in reversed(net.layers): if isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable = False else: layer.trainable = True model = tf.keras.Sequential([ net, tf.keras.layers.Dropout(0.45), tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dropout(0.45), tf.keras.layers.Dense(N_LABELS, activation='softmax', dtype=tf.float32), ]) model.load_weights(f'/kaggle/input/cassava-leaf-disease-prediction/model_fold_{fold}_weights.h5') return model<normalization>
scaler = MinMaxScaler() x = scaler.fit_transform(x) x
Digit Recognizer
11,437,677
@tf.function def decode_tfrecord_test(file_path): image = tf.io.read_file(file_path) image = tf.io.decode_jpeg(image) image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, N_CHANNELS]) image = tf.cast(image, tf.float32) image_id = tf.strings.split(file_path, '/')[-1] return image, image_id<define_variables>
x1 = np.array(df2) x1 = scaler.fit_transform(x1) x1 = x1.reshape(( 28000,28,28,1)) x1.shape
Digit Recognizer
11,437,677
def get_test_dataset() : ignore_order = tf.data.Options() ignore_order.experimental_deterministic = False test_dataset = tf.data.Dataset.list_files('/kaggle/input/cassava-leaf-disease-classification/test_images/*.jpg') test_dataset = test_dataset.with_options(ignore_order) test_dataset = test_dataset.map(decode_tfrecord_test, num_parallel_calls=AUTO) test_dataset = test_dataset.batch(BATCH_SIZE) test_dataset = test_dataset.prefetch(AUTO) return test_dataset<categorify>
y = np.array(y) enc = OneHotEncoder(sparse=False) y= y.reshape(( -1,1)) y = enc.fit_transform(y) y.shape
Digit Recognizer
11,437,677
def show_first_test_batch() : imgs, imgs_ids = next(iter(get_test_dataset())) img = imgs[0].numpy().astype(np.float32) print(f'imgs.shape: {imgs.shape}, imgs.dtype: {imgs.dtype}, imgs_ids.shape: {imgs_ids.shape}, imgs_ids.dtype: {imgs_ids.dtype}') print('img mean: {:.3f}, img std {:.3f}, img min: {:.3f}, img max: {:.3f}'.format(img.mean() , img.std() , img.min() , img.max())) print(f'imgs_id: {imgs_ids[0]}') img += abs(img.min()) img /= img.max() plt.imshow(img) plt.show() show_first_test_batch()<predict_on_test>
x_train,x_test,y_train,y_test = tts(x,y,test_size = 0.2, random_state=42) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape )
Digit Recognizer
11,437,677
submission = pd.DataFrame(columns=['image_id', 'label']) preds_dict = dict() for fold in range(N_FOLDS): model = get_model(fold) for idx,(imgs, image_ids)in tqdm(enumerate(get_test_dataset())) : for img, image_id in zip(imgs, image_ids.numpy().astype(str)) : pred = predict_tta(model, img) if image_id in preds_dict: preds_dict[image_id] += pred else: preds_dict[image_id] = pred for idx,(image_id, preds)in enumerate(preds_dict.items()): if idx is 0: print(f'image {image_id} predictions:{preds}') label = np.argmax(preds) submission = submission.append({ 'image_id': image_id, 'label': label }, ignore_index=True) submission.to_csv('./submission.csv', index=False )<import_modules>
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau
Digit Recognizer
11,437,677
import numpy as np import pandas as pd import os <install_modules>
model = keras.Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.summary()
Digit Recognizer
11,437,677
!pip install timm --no-index --find-links=file:///kaggle/input/timm-package/<install_modules>
callbacks = [ keras.callbacks.EarlyStopping( monitor='val_loss', min_delta=1e-5, patience=25, verbose=1) ]
Digit Recognizer
11,437,677
!pip install albumentations --no-index --find-links=file:///kaggle/input/albumentationspackage/<import_modules>
predictions=model.predict(x1) pre=predictions.argmax(axis=-1 )
Digit Recognizer
11,437,677
import sys import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter import os import cv2 import timm<import_modules>
submission = pd.Series(pre,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),submission],axis = 1) submission.to_csv("final_submission_v1.csv",index=False) submission.head()
Digit Recognizer
11,232,489
import albumentations as A<normalization>
from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.keras import layers, Sequential, optimizers from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt
Digit Recognizer
11,232,489
def gem(x, p=3, eps=1e-5): return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-5): super(GeM, self ).__init__() self.p = Parameter(torch.ones(1)* p) self.eps = eps def forward(self, x): return gem(x, p=self.p, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str( self.eps)+ ')'<choose_model_class>
print("GPUs Available: ", tf.config.experimental.list_physical_devices('GPU'))
Digit Recognizer
11,232,489
class Net(nn.Module): def __init__(self, num_classes=5): super().__init__() self.model = timm.create_model('seresnext50_32x4d', pretrained=False) self._avg_pooling = nn.AdaptiveAvgPool2d(1) self.dropout=nn.Dropout(0.5) self._fc = nn.Linear(2048 , num_classes, bias=True) def forward(self, inputs): input_iid = inputs input_iid=input_iid/255. bs = input_iid.size(0) x = self.model.forward_features(input_iid) fm = self._avg_pooling(x) fm = fm.view(bs, -1) feature=self.dropout(fm) x = self._fc(feature) return x<create_dataframe>
raw_csv = "/kaggle/input/digit-recognizer/train.csv" test_csv = "/kaggle/input/digit-recognizer/test.csv"
Digit Recognizer
11,232,489
class DatasetTest() : def __init__(self, test_data_dir): self.ds = self.get_list(test_data_dir) self.root_dir = test_data_dir self.val_trans=A.Compose([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.2, hue=0.00, always_apply=False, p=1.0), A.RandomCrop(height= 600, width = 600,always_apply=True, p=1.0)] ) def get_list(self, dir): pic_list = os.listdir(dir) return pic_list def __len__(self): return len(self.ds) def preprocess_func(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image1=self.val_trans(image=image)['image'] image2=self.val_trans(image=image)['image'] image3=self.val_trans(image=image)['image'] image4=self.val_trans(image=image)['image'] image5=self.val_trans(image=image)['image'] image6=self.val_trans(image=image)['image'] image7=self.val_trans(image=image)['image'] image8=self.val_trans(image=image)['image'] image_batch = np.stack([image1, image2, image3, image4, image5, image6, image7, image8 ]) image_batch = np.transpose(image_batch, axes=[0, 3, 1, 2]) return image, image_batch def __getitem__(self, item): fname = self.ds[item] image_path = os.path.join(self.root_dir, fname) image = cv2.imread(image_path, -1) image,float_image = self.preprocess_func(image) return fname, image,float_image<predict_on_test>
raw_df = pd.read_csv(raw_csv) test_df = pd.read_csv(test_csv )
Digit Recognizer
11,232,489
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") kaggle_root = '/kaggle/input' model_dir = os.path.join(kaggle_root, 'cassva-models-se50-640') weights = [os.path.join(model_dir, f)for f in os.listdir(model_dir)] test_datadir= os.path.join(kaggle_root, 'cassava-leaf-disease-classification/test_images') dataiter=DatasetTest(test_datadir) def predict_with_model(model,weights): merge_res_dict = {} for j, weight in enumerate(weights): model.load_state_dict(torch.load(weight, map_location=device), strict=False) model.eval() cur_result=pd.DataFrame(columns=['image_id','label']) len_data = len(dataiter) image_ids=[] precictions=[] for i in range(len(dataiter)) : print('weight {}: data {}/{}'.format(j, i+1, len_data)) fname,_,float_image=dataiter.__getitem__(i) input=torch.from_numpy(float_image ).to(device ).float() with torch.no_grad() : output=model(input) output=torch.nn.functional.softmax(output,dim=-1) output=output.cpu().numpy() output=np.mean(output,axis=0) image_ids.append(fname) label=np.argmax(output) precictions.append(label) if fname not in merge_res_dict: merge_res_dict[fname] = output else: merge_res_dict[fname] += output merge_result = pd.DataFrame(columns=['image_id','label']) precictions = [] for fname in merge_res_dict: label = np.argmax(merge_res_dict[fname]) precictions.append(label) merge_result['image_id'] = image_ids merge_result['label'] = precictions merge_result.to_csv('submission.csv',index=False) model=Net().to(device) predict_with_model(model,weights )<define_variables>
def get_image_and_label(data_frame): IMGs = data_frame.drop(["label"], axis=1 ).values if 'label' in data_frame.columns else data_frame.values IMGs = np.array([image.reshape(( 28, 28)) for image in IMGs]) IMGs = np.expand_dims(IMGs, axis=3) labels = data_frame['label'].values if 'label' in data_frame.columns else None return IMGs, labels
Digit Recognizer
11,232,489
package_path = '.. /input/pytorch-image-models/pytorch-image-models-master' sys.path.append(package_path )<set_options>
raw_IMGs, raw_labels = get_image_and_label(raw_df) test_IMGs, _ = get_image_and_label(test_df )
Digit Recognizer
11,232,489
warnings.filterwarnings("ignore") <init_hyperparams>
classes = len(set(raw_labels)) classes
Digit Recognizer
11,232,489
CFG = { 'fold_num': 5, 'seed': 719, 'model_arch': 'tf_efficientnet_b4_ns', 'img_size': 512, 'epochs': 10, 'train_bs': 32, 'valid_bs': 32, 'lr': 1e-4, 'num_workers': 4, 'accum_iter': 1, 'verbose_step': 1, 'device': 'cuda:0', 'tta': 3, 'used_epochs': [6,7,8,9], 'weights': [1,1,1,1] }<load_from_csv>
raw_labels = to_categorical(raw_labels, num_classes=classes) raw_labels
Digit Recognizer
11,232,489
train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv') train.head(10 )<count_values>
train_IMGs, validation_IMGs, trian_labels, validation_labels = train_test_split(raw_IMGs, raw_labels, test_size=0.1, random_state=42 )
Digit Recognizer
11,232,489
train.label.value_counts()<load_from_csv>
model = Sequential([ layers.Conv2D(32,(3,3), activation="relu", input_shape=(28,28,1)) , layers.BatchNormalization() , layers.MaxPooling2D(( 2,2)) , layers.Conv2D(64,(3,3), activation="relu"), layers.BatchNormalization() , layers.MaxPooling2D(( 2,2)) , layers.Conv2D(128,(3,3), activation="relu"), layers.BatchNormalization() , layers.Flatten() , layers.Dense(1024, activation="relu"), layers.Dropout(0.2), layers.Dense(256, activation="relu"), layers.Dropout(0.2), layers.Dense(64, activation="relu"), layers.Dropout(0.2), layers.Dense(32, activation="relu"), layers.Dropout(0.2), layers.Dense(16, activation="relu"), layers.Dropout(0.2), layers.Dense(int(classes), activation="softmax") ] )
Digit Recognizer
11,232,489
submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') submission.head()<set_options>
model.compile(loss="categorical_crossentropy", optimizer=optimizers.Adam(learning_rate=1e-4), metrics=['accuracy'] )
Digit Recognizer
11,232,489
def seeder(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True def get_img(path): im_bgr = cv2.imread(path) im_rgb = im_bgr[:, :, ::-1] return im_rgb <load_pretrained>
train_datagen = ImageDataGenerator( rescale=1/255, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, shear_range=0.1 ) validation_datagen = ImageDataGenerator(rescale=1/255) test_datagen = ImageDataGenerator(rescale=1/255 )
Digit Recognizer
11,232,489
img = get_img('.. /input/cassava-leaf-disease-classification/train_images/1000015157.jpg') plt.figure(figsize=(15,15)) plt.imshow(img) plt.show()<categorify>
train_generator = train_datagen.flow(train_IMGs, trian_labels, batch_size=32) validation_generator = train_datagen.flow(validation_IMGs, validation_labels, batch_size=32) test_generator = test_datagen.flow(test_IMGs, batch_size=32, shuffle=False )
Digit Recognizer
11,232,489
class CassavaDataset(Dataset): def __init__(self,df,data_root,transforms=None,output_label=True): super(CassavaDataset ).__init__() self.df=df.reset_index().copy() self.data_root=data_root self.transforms=transforms self.output_label=output_label def __len__(self): return self.df.shape[0] def __getitem__(self,index:int): if self.output_label == True: label=self.iloc[index]["label"] path = "{}/{}".format(self.data_root, self.df.iloc[index]['image_id']) img=get_img(path) if self.transforms: img=self.transforms(image=img)["image"] if self.output_label == True: return img,torch.tensor(label,dtype=torch.double) else: return img <categorify>
history = model.fit_generator(train_generator, epochs=100, validation_data=validation_generator, verbose=1 )
Digit Recognizer
11,232,489
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop, IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize ) def get_train_transforms() : return Compose([ RandomResizedCrop(CFG['img_size'], CFG['img_size']), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), CoarseDropout(p=0.5), Cutout(p=0.5), ToTensorV2(p=1.0), ], p=1.) def get_valid_transforms() : return Compose([ CenterCrop(CFG['img_size'], CFG['img_size'], p=1.) , Resize(CFG['img_size'], CFG['img_size']), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.) def get_inference_transforms() : return Compose([ RandomResizedCrop(CFG['img_size'], CFG['img_size']), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.)<choose_model_class>
model.evaluate(validation_IMGs, validation_labels )
Digit Recognizer
11,232,489
class CassvaImgClassifier(nn.Module): def __init__(self, model_arch, n_class, pretrained=False): super().__init__() self.model = timm.create_model(model_arch, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, n_class) def forward(self, x): x = self.model(x) return x<feature_engineering>
accuracy = history.history["accuracy"] val_accuracy = history.history["val_accuracy"] loss = history.history["loss"] val_loss = history.history["val_loss"] epochs = range(len(accuracy))
Digit Recognizer
11,232,489
if __name__ == "__main__": seeder(CFG["seed"]) folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values) for fold,(trn_idx, val_idx)in enumerate(folds): if fold > 0: break print('Inference fold {} started'.format(fold)) test = pd.DataFrame() test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/')) test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms() , output_label=False) valid_ = train.loc[val_idx,:].reset_index(drop=True) valid_ds = CassavaDataset(valid_, '.. /input/cassava-leaf-disease-classification/train_images/', transforms=get_inference_transforms() , output_label=False) val_loader = torch.utils.data.DataLoader( valid_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=False, ) tst_loader = torch.utils.data.DataLoader( test_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=False, ) device = torch.device(CFG['device']) model = CassvaImgClassifier(CFG['model_arch'], train.label.nunique() ).to(device) val_preds = [] tst_preds = [] for i, epoch in enumerate([1]): model.load_state_dict(torch.load(".. /input/aashika-cassava/tf_efficientnet_b4_ns_fold_0_9")) with torch.no_grad() : for _ in range(CFG["tta"]): val_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, val_loader, device)] tst_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, tst_loader, device)] val_preds = np.mean(val_preds, axis=0) tst_preds = np.mean(tst_preds, axis=0) print('fold {} validation loss = {:.5f}'.format(fold, log_loss(valid_.label.values, val_preds))) print('fold {} validation accuracy = {:.5f}'.format(fold,(valid_.label.values==np.argmax(val_preds, axis=1)).mean())) del model torch.cuda.empty_cache() <feature_engineering>
pred_labels = model.predict_generator(test_generator )
Digit Recognizer
11,232,489
test['label'] = np.argmax(tst_preds, axis=1) test.head()<save_to_csv>
pred_labels = np.argmax(pred_labels, axis=-1) pred_labels
Digit Recognizer
11,232,489
test.to_csv('submission.csv', index=False )<import_modules>
my_submission = pd.DataFrame({'ImageId': test_df.index + 1, 'Label': pred_labels}) my_submission.head()
Digit Recognizer
11,232,489
<install_modules><EOS>
my_submission.to_csv('submission.csv', index=False )
Digit Recognizer
11,265,589
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
%matplotlib inline
Digit Recognizer
11,265,589
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop, IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize ) HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop, IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize ) def get_train_transforms() : return Compose([ RandomResizedCrop(CFG['img_size'], CFG['img_size']), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), CoarseDropout(p=0.5), Cutout(p=0.5), ToTensorV2(p=1.0), ], p=1.) train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv') def get_valid_transforms() : return Compose([ CenterCrop(CFG['img_size'], CFG['img_size'], p=1.) , Resize(CFG['img_size'], CFG['img_size']), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.) def rand_bbox(size, lam): W = size[0] H = size[1] cut_rat = np.sqrt(1.- lam) cut_w = np.int(W * cut_rat) cut_h = np.int(H * cut_rat) cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 def get_inference_transforms() : return Compose([ RandomResizedCrop(CFG['img_size'], CFG['img_size']), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.) def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True CFG = { 'fold_num': 5, 'seed': 719, 'model_arch': 'tf_efficientnet_b4_ns', 'img_size': 512, 'epochs': 10, 'train_bs': 32, 'valid_bs': 32, 'lr': 1e-4, 'num_workers': 4, 'accum_iter': 1, 'verbose_step': 1, 'device': 'cuda:0', 'tta': 3, 'used_epochs': [25,26,27,28,29], 'weights': [1,1,1,1] } class CassavaNet(nn.Module): def __init__(self): super().__init__() backbone = timm.create_model('tf_efficientnet_b4_ns', pretrained=False) n_features = backbone.classifier.in_features self.backbone = nn.Sequential(*backbone.children())[:-3] self.classifier = nn.Linear(n_features, 5) self.pool = nn.AdaptiveAvgPool2d(( 1, 1)) def forward_features(self, x): x = self.backbone(x) return x def forward(self, x): feats = self.forward_features(x) x = self.pool(feats ).view(x.size(0), -1) x = self.classifier(x) return x, feats class CassavaDataset(Dataset): def __init__(self, df, data_root, transforms=None, output_label=True, one_hot_label=False, do_fmix=False, fmix_params={ 'alpha': 1., 'decay_power': 3., 'shape':(CFG['img_size'], CFG['img_size']), 'max_soft': True, 'reformulate': False }, do_cutmix=False, cutmix_params={ 'alpha': 1, } ): super().__init__() self.df = df.reset_index(drop=True ).copy() self.transforms = transforms self.data_root = data_root self.do_fmix = do_fmix self.fmix_params = fmix_params self.do_cutmix = do_cutmix self.cutmix_params = cutmix_params self.output_label = output_label self.one_hot_label = one_hot_label if output_label == True: self.labels = self.df['label'].values if one_hot_label is True: self.labels = np.eye(self.df['label'].max() + 1)[self.labels] def __len__(self): return self.df.shape[0] def __getitem__(self, index: int): if self.output_label: target = self.labels[index] img = get_img("{}/{}".format(self.data_root, self.df.loc[index]['image_id'])) if self.transforms: img = self.transforms(image=img)['image'] if self.do_fmix and np.random.uniform(0., 1., size=1)[0] > 0.5: with torch.no_grad() : lam = np.clip(np.random.beta(self.fmix_params['alpha'], self.fmix_params['alpha']), 0.6, 0.7) mask = make_low_freq_image(self.fmix_params['decay_power'], self.fmix_params['shape']) mask = binarise_mask(mask, lam, self.fmix_params['shape'], self.fmix_params['max_soft']) fmix_ix = np.random.choice(self.df.index, size=1)[0] fmix_img = get_img("{}/{}".format(self.data_root, self.df.iloc[fmix_ix]['image_id'])) if self.transforms: fmix_img = self.transforms(image=fmix_img)['image'] mask_torch = torch.from_numpy(mask) img = mask_torch * img +(1.- mask_torch)* fmix_img rate = mask.sum() / CFG['img_size'] / CFG['img_size'] target = rate * target +(1.- rate)* self.labels[fmix_ix] if self.do_cutmix and np.random.uniform(0., 1., size=1)[0] > 0.5: with torch.no_grad() : cmix_ix = np.random.choice(self.df.index, size=1)[0] cmix_img = get_img("{}/{}".format(self.data_root, self.df.iloc[cmix_ix]['image_id'])) if self.transforms: cmix_img = self.transforms(image=cmix_img)['image'] lam = np.clip(np.random.beta(self.cutmix_params['alpha'], self.cutmix_params['alpha']), 0.3, 0.4) bbx1, bby1, bbx2, bby2 = rand_bbox(( CFG['img_size'], CFG['img_size']), lam) img[:, bbx1:bbx2, bby1:bby2] = cmix_img[:, bbx1:bbx2, bby1:bby2] rate = 1 -(( bbx2 - bbx1)*(bby2 - bby1)/(CFG['img_size'] * CFG['img_size'])) target = rate * target +(1.- rate)* self.labels[cmix_ix] if self.output_label == True: return img, target else: return img def inference_one_epoch(model, data_loader, device): model.eval() image_preds_all = [] pbar = tqdm(enumerate(data_loader), total=len(data_loader)) for step,(imgs)in pbar: imgs = imgs.to(device ).float() image_preds= model(imgs) image_preds_all += [torch.softmax(image_preds, 1 ).detach().cpu().numpy() ] image_preds_all = np.concatenate(image_preds_all, axis=0) return image_preds_all def get_img(path): im_bgr = cv2.imread(path) im_rgb = im_bgr[:, :, ::-1] return im_rgb class CassvaImgClassifier(nn.Module): def __init__(self, model_arch, n_class, pretrained=False): super().__init__() self.model = timm.create_model(model_arch, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, n_class) def forward(self, x): x = self.model(x) return x if __name__ == '__main__': seed_everything(CFG['seed']) folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values) for fold,(trn_idx, val_idx)in enumerate(folds): if fold > 0: break print('Inference fold {} started'.format(fold)) valid_ = train.loc[val_idx, :].reset_index(drop=True) valid_ds = CassavaDataset(valid_, '.. /input/cassava-leaf-disease-classification/train_images/', transforms=get_inference_transforms() , output_label=False) test = pd.DataFrame() test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/')) test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms() , output_label=False) tst_loader = torch.utils.data.DataLoader( test_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=False, ) device = torch.device(CFG['device']) model = CassvaImgClassifier(CFG['model_arch'], train.label.nunique() ).to(device) tst_preds = [] for i in [1]: for epoch in [1,2,3,4,5,6,7,8,9]: ckpt=torch.load('.. /input/model-comined/tf_efficientnet_b4_ns_fold_{}_{}.pth'.format(i,epoch),map_location={'cuda:1':'cuda:0'}) ckpt_model_dict = OrderedDict([(k[7:],v)if 'module.' in k else(k,v)for k, v in ckpt.items() ]) model.load_state_dict(ckpt_model_dict) with torch.no_grad() : tst_preds += [inference_one_epoch(model, tst_loader,device)] tst_preds = np.mean(tst_preds, axis=0) del model torch.cuda.empty_cache() test['label'] = np.argmax(tst_preds, axis=1) test.head() test.to_csv('submission.csv', index=False) print(test) <import_modules>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
11,265,589
package_path = '.. /input/pytorch-image-models/pytorch-image-models-master' train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv') submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv' )<init_hyperparams>
Y_train=train['label'] X_train = train.drop(labels = ["label"],axis = 1) Y_train.value_counts()
Digit Recognizer
11,265,589
CFG = { 'normalize_mean':[0.42984136, 0.49624753, 0.3129598], 'normalize_std':[0.21417203, 0.21910103, 0.19542212], 'device': 'cuda:0', 'fold_num': 5, 'seed': 42, 'valid_bs': 32, 'num_workers': 4, 'model_arch': ['tf_efficientnet_b4_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b4_ns', ], 'img_size': 512, 'tta': 3, 'fold_list':[0], 'used_epochs': [".. /input/train3012/tf_efficientnet_b4_ns_3012_fold_0_14", ".. /input/train3012/tf_efficientnet_b4_ns_3012_fold_1_14", ".. /input/train3012/tf_efficientnet_b4_ns_3012_fold_2_14", ".. /input/train3012/tf_efficientnet_b4_ns_3012_fold_3_9", ".. /input/train3012/tf_efficientnet_b4_ns_3012_fold_4_12"], 'weights': [1,1,1,1,1], }<set_options>
X_train = X_train / 255 test = test / 255
Digit Recognizer
11,265,589
def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.cuda.manual_seed_all(seed) def get_img(path): im_bgr = cv2.imread(path) im_rgb = im_bgr[:, :, ::-1] return im_rgb<categorify>
print("The shape of the labels before One Hot Encoding",Y_train.shape) Y_train = to_categorical(Y_train, num_classes = 10) print("The shape of the labels after One Hot Encoding",Y_train.shape )
Digit Recognizer
11,265,589
class CassavaDataset(Dataset): def __init__( self, df, data_root, transforms=None, output_label=True ): super().__init__() self.df = df.reset_index(drop=True ).copy() self.transforms = transforms self.data_root = data_root self.output_label = output_label def __len__(self): return self.df.shape[0] def __getitem__(self, index: int): if self.output_label: target = self.df.iloc[index]['label'] path = "{}/{}".format(self.data_root, self.df.iloc[index]['image_id']) img = get_img(path) if self.transforms: img = self.transforms(image=img)['image'] if self.output_label == True: return img, target else: return img<normalization>
random_seed = 2 X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.3, random_state=random_seed )
Digit Recognizer
11,265,589
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop, IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize, RandomCrop ) def get_inference_transforms() : return Compose([ RandomCrop(512, 512), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=CFG["normalize_mean"], std=CFG["normalize_std"], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.) def get_inference_transforms_vit384() : return Compose([ RandomCrop(384, 384), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=CFG["normalize_mean"], std=CFG["normalize_std"], max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.) <choose_model_class>
datagen = ImageDataGenerator(zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1, rotation_range = 10 )
Digit Recognizer
11,265,589
class CassvaImgClassifier(nn.Module): def __init__(self, model_arch, n_class, pretrained=False): super().__init__() self.model = timm.create_model(model_arch, pretrained=pretrained, num_classes=5) def forward(self, x): x = self.model(x) return x<create_dataframe>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation = 'relu', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation = 'relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(5, 5), activation = 'relu')) model.add(BatchNormalization()) model.add(MaxPool2D(strides =(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(5, 5), activation = 'relu')) model.add(BatchNormalization()) model.add(MaxPool2D(strides =(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(1024, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation = 'softmax'))
Digit Recognizer
11,265,589
seed_everything(CFG['seed']) tst_preds = [] device = torch.device(CFG['device']) test = pd.DataFrame() test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/')) for i, sub_model in enumerate(CFG['used_epochs']): if "vit" not in sub_model: test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms() , output_label=False) else: test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms_vit384() , output_label=False) tst_loader = torch.utils.data.DataLoader( test_ds, batch_size=CFG['valid_bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=True, ) model = CassvaImgClassifier(CFG['model_arch'][i], train.label.nunique() ).to(device) model.load_state_dict(torch.load(sub_model, map_location=CFG['device'])) print(sub_model) with torch.no_grad() : for tta_ in range(CFG['tta']): tst_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, tst_loader, device)] tst_preds = np.sum(tst_preds, axis=0) del model torch.cuda.empty_cache()<save_to_csv>
model.compile(optimizer='adam',metrics=['accuracy'],loss='categorical_crossentropy' )
Digit Recognizer
11,265,589
test['label'] = np.argmax(tst_preds, axis=1) test.to_csv('submission.csv', index=False )<install_modules>
reduction_lr = ReduceLROnPlateau(monitor='val_accuracy',patience=2, verbose=1, factor=0.2, min_lr=0.00001 )
Digit Recognizer
11,265,589
!mkdir -p /tmp/pip/cache/ !cp.. /input/omegaconf/PyYAML-5.4b2-cp38-cp38-manylinux1_x86_64.whl /tmp/pip/cache/ !cp.. /input/omegaconf/omegaconf-2.0.5-py3-none-any.whl /tmp/pip/cache/ !cp.. /input/omegaconf/typing_extensions-3.7.4.3-py3-none-any.whl /tmp/pip/cache/ !pip install --no-index --find-links /tmp/pip/cache/ omegaconf > /dev/null<set_options>
hist = model.fit_generator(datagen.flow(X_train,Y_train,batch_size=32),epochs=20,validation_data =(X_val,Y_val),callbacks=[reduction_lr] )
Digit Recognizer
11,265,589
sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') sys.path.append(".. /input/cleanlab/") warnings.filterwarnings('ignore') <set_options>
final_loss, final_acc = model.evaluate(X_val, Y_val, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
Digit Recognizer
11,265,589
mean, std =(0.485, 0.456, 0.406),(0.229, 0.224, 0.225) def get_transforms(img_size=(512, 512)) : transformations = Compose([ PadIfNeeded(min_height=img_size[0], min_width=img_size[1]), CenterCrop(img_size[0], img_size[1]), Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.0) return transformations<choose_model_class>
y_pred = model.predict(X_val, batch_size = 64) y_pred = np.argmax(y_pred,axis = 1) y_pred = pd.Series(y_pred,name="Label") y_pred
Digit Recognizer
11,265,589
def create_model(model_name: str, pretrained: bool, num_classes: int, in_chans: int): model = timm.create_model(model_name=model_name, pretrained=pretrained, num_classes=num_classes, in_chans=in_chans) return model<load_pretrained>
y_pred1 = model.predict(test, batch_size = 64) y_pred1 = np.argmax(y_pred1,axis = 1) y_pred1 = pd.Series(y_pred1,name="Label") y_pred1
Digit Recognizer
11,265,589
<find_best_params><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),y_pred1],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
11,342,302
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_search_space>
from matplotlib import pyplot as plt import math, os, re, time, random import numpy as np, pandas as pd, seaborn as sns import tensorflow as tf from sklearn.model_selection import train_test_split
Digit Recognizer
11,342,302
eff_b0_cfg_s = eff_b0_cfg = OmegaConf.create(eff_b0_cfg_s )<define_variables>
rank_0_tensor = tf.constant(1) print(rank_0_tensor); print('') rank_1_tensor = tf.constant([1, 0, 0]) print(rank_1_tensor); print('') rank_2_tensor = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) print(rank_2_tensor )
Digit Recognizer
11,342,302
name = '14-10-36' cfg = eff_b0_cfg do_predict = True do_submit = True img_dir = '.. /input/cassava-leaf-disease-merged/train/' label_path = '.. /input/cassava-leaf-disease-merged/merged.csv' log_dir = os.path.join('.. /input/cassava-public-ckpt', name) n_folds = len(glob(os.path.join(log_dir, 'checkpoints/*.ckpt'))) num2class = ["Cassava Bacterial Blight(CBB)", "Cassava Brown Streak Disease(CBSD)", "Cassava Green Mottle(CGM)", "Cassava Mosaic Disease(CMD)", "Healthy"]<feature_engineering>
rank_0_tensor = tf.constant(1, dtype = tf.float16) print(rank_0_tensor); print('') rank_1_tensor = tf.constant([1, 0, 0], dtype = tf.float32) print(rank_1_tensor); print('') rank_2_tensor = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype = tf.int32) print(rank_2_tensor )
Digit Recognizer
11,342,302
seed_everything(42) label_df = pd.read_csv(label_path) if 'fold' not in label_df.columns: skf = StratifiedKFold(n_splits=5, shuffle=True) label_df.loc[:, 'fold'] = 0 for fold_num,(train_index, val_index)in enumerate(skf.split(X=label_df.index, y=label_df.label.values)) : label_df.loc[label_df.iloc[val_index].index, 'fold'] = fold_num if do_predict: infer = pl.Trainer(gpus=1) oof_dict = {'image_id': [], 'label': [], 'fold': []} for fold_num in range(n_folds): val_df = label_df[label_df.fold == fold_num] test_dataset = TestDataset(img_dir, val_df, img_size=cfg.img_size) test_dataloader = DataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=4, shuffle=False) state_dict = get_state_dict_from_checkpoint(log_dir, fold_num) model = LitTester(cfg.network, state_dict) pred = infer.test(model, test_dataloaders=test_dataloader, verbose=False)[0] oof_dict['image_id'].extend(val_df.image_id.values) oof_dict['label'].extend(pred['prob'].tolist()) oof_dict['fold'].extend([fold_num] * len(pred['prob'])) pred_df = pd.DataFrame(oof_dict) pred_df.to_csv('oof.csv', index=False) else: pred_df = pd.read_csv(os.path.join(log_dir, 'oof.csv'))<load_from_csv>
print(type(rank_2_tensor.numpy())) print(rank_2_tensor.numpy()); print('') tensor_to_array = np.add(rank_2_tensor, 1) print(type(tensor_to_array)) print(tensor_to_array); print('') array_to_tensor = tf.add(rank_2_tensor.numpy() , 1) print(array_to_tensor )
Digit Recognizer
11,342,302
if do_submit: sub = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') infer = pl.Trainer(gpus=1) test_dataset = TestDataset('.. /input/cassava-leaf-disease-classification/test_images', sub, img_size=cfg.img_size) test_dataloader = DataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=4, shuffle=False) preds = [] for fold_num in range(n_folds): state_dict = get_state_dict_from_checkpoint(log_dir, fold_num) model = LitTester(cfg.network, state_dict) pred = infer.test(model, test_dataloaders=test_dataloader, verbose=False)[0] preds.append(pred['prob']) sub['label'] = np.argmax(np.mean(preds, axis=0), axis=1) sub.to_csv(os.path.join(os.getcwd() , 'submission.csv'), index=False )<sort_values>
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense(256, activation='relu', input_shape =(784,))) model.add(tf.keras.layers.Dense(128, activation='swish')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["categorical_accuracy"]) model.summary()
Digit Recognizer
11,342,302
label_df = label_df.sort_values(by='image_id', ascending=1) pred_df = pred_df.sort_values(by='image_id', ascending=1) ids, labels = label_df.image_id.values, label_df.label.values preds = np.array([literal_eval(pred)if isinstance(pred, str)else pred for pred in pred_df.label.values]) print(f'total {len(ids)} images') print(f'prediction shape: {preds.shape}, label shape: {labels.shape}' )<define_variables>
model = tf.keras.models.Sequential([ tf.keras.layers.Dense(256, activation = 'relu', input_shape =(784,)) , tf.keras.layers.Dense(128, activation = 'swish'), tf.keras.layers.Dense(10, activation = 'softmax') ]) model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["categorical_accuracy"]) model.summary()
Digit Recognizer
11,342,302
s = labels psx = preds K = len(np.unique(s)) thresholds = [np.mean(psx[:,k][s == k])for k in range(K)] thresholds = np.asarray(thresholds) confident_joint = np.zeros(( K, K), dtype = int) for i, row in enumerate(psx): s_label = s[i] confident_bins = row >= thresholds - 1e-6 num_confident_bins = sum(confident_bins) if num_confident_bins == 1: confident_joint[s_label][np.argmax(confident_bins)] += 1 elif num_confident_bins > 1: confident_joint[s_label][np.argmax(row)] += 1 confident_joint = cleanlab.latent_estimation.calibrate_confident_joint( confident_joint, s) cleanlab.util.print_joint_matrix(confident_joint) MIN_NUM_PER_CLASS = 5 prune_count_matrix = cleanlab.pruning.keep_at_least_n_per_class( prune_count_matrix=confident_joint.T, n=MIN_NUM_PER_CLASS, ) s_counts = np.bincount(s) noise_masks_per_class = [] for k in range(K): noise_mask = np.zeros(len(psx), dtype=bool) psx_k = psx[:, k] if s_counts[k] > MIN_NUM_PER_CLASS: for j in range(K): if k != j: num2prune = prune_count_matrix[k][j] if num2prune > 0: margin = psx_k - psx[:, j] s_filter = s == j threshold = -np.partition( -margin[s_filter], num2prune - 1 )[num2prune - 1] noise_mask = noise_mask |(s_filter &(margin >= threshold)) noise_masks_per_class.append(noise_mask) else: noise_masks_per_class.append(np.zeros(len(s), dtype=bool)) label_errors_bool = np.stack(noise_masks_per_class ).any(axis=0) for i, pred_label in enumerate(psx.argmax(axis=1)) : if label_errors_bool[i] and np.all(pred_label == s[i]): label_errors_bool[i] = False label_errors_idx = np.arange(len(s)) [label_errors_bool] self_confidence = np.array( [np.mean(psx[i][s[i]])for i in label_errors_idx] ) margin = self_confidence - psx[label_errors_bool].max(axis=1) label_errors_idx = label_errors_idx[np.argsort(margin)]<feature_engineering>
inputs = tf.keras.Input(shape =(784,)) x = tf.keras.layers.Dense(256, activation = 'relu' )(inputs) x = tf.keras.layers.Dense(128, activation = 'swish' )(x) outputs = tf.keras.layers.Dense(10, activation = 'softmax' )(x) model = tf.keras.Model(inputs = inputs, outputs = outputs) model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["categorical_accuracy"]) model.summary()
Digit Recognizer
11,342,302
total_idx = np.arange(len(ids)) clean_idx = np.array([idx for idx in total_idx if idx not in label_errors_idx]) guesses = np.stack(noise_masks_per_class ).argmax(axis=0) guesses[clean_idx] = labels[clean_idx] clean_ids = ids[clean_idx] clean_labels = labels[clean_idx] clean_guesses = guesses[clean_idx] noisy_ids = ids[label_errors_idx] noisy_labels = labels[label_errors_idx] noisy_guesses = guesses[label_errors_idx] print(f'[clean ratio] \t {len(clean_idx)/ len(total_idx)* 100:.2f}%') print(f'[noise ratio] \t {len(noisy_ids)/ len(total_idx)* 100:.2f}%' )<prepare_output>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') train.head()
Digit Recognizer
11,342,302
all_data = pd.DataFrame({'image_id': ids, 'given_label': labels, 'guess_label': guesses}) all_data['is_noisy'] =(all_data.given_label != all_data.guess_label) all_data['max_prob'] = preds.max(axis=1 )<define_variables>
labels = train['label'] train = train.drop('label', axis = 1) train = train / 255.0 test = test / 255.0
Digit Recognizer
11,342,302
class_colors = np.array([' num2class = [f'{idx}-{elem}' for idx, elem in enumerate(num2class)]<load_pretrained>
labels = tf.one_hot(labels, depth = 10 ).numpy()
Digit Recognizer
11,342,302
with open('.. /input/train-weights-optimization/best_weights.json', 'r')as f: weights_dict = json.load(f) weights_dict<load_pretrained>
datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range = 20, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1 )
Digit Recognizer
11,342,302
normal_configs = [] tta_configs = [] normal_model_dirs = [] tta_model_dirs = [] for model_dir in weights_dict.keys() : assert len(glob.glob(f'{model_dir}/*.yml')) ==1 config_path = glob.glob(f'{model_dir}/*.yml')[0] with open(config_path)as f: config = yaml.load(f) if 'valid_augmentation' in config['tag'].keys() : tta_model_dirs.append(model_dir) tta_configs.append(config) else: normal_model_dirs.append(model_dir) normal_configs.append(config) TRAIN_PATH = '.. /input/cassava-leaf-disease-classification/train_images' TEST_PATH = '.. /input/cassava-leaf-disease-classification/test_images' OUTPUT_DIR = './'<compute_test_metric>
EPOCHS = 45 BATCH_SIZE = 64 NUM_NETS = 25 VERBOSE = 0
Digit Recognizer
11,342,302
def get_score(y_true, y_pred): return accuracy_score(y_true, y_pred) @contextmanager def timer(name): t0 = time.time() LOGGER.info(f'[{name}] start') yield LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.') def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True <load_from_csv>
model = [0] * NUM_NETS for j in range(NUM_NETS): model[j] = tf.keras.models.Sequential() model[j].add(tf.keras.layers.Conv2D(32, kernel_size = 3, activation = 'relu', input_shape =(28, 28, 1))) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Conv2D(32, kernel_size = 3, activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Conv2D(32, kernel_size = 5, strides = 2, padding = 'same', activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Dropout(0.4)) model[j].add(tf.keras.layers.Conv2D(64, kernel_size = 3, activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Conv2D(64, kernel_size = 3, activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Conv2D(64, kernel_size = 5, strides = 2, padding = 'same', activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Dropout(0.4)) model[j].add(tf.keras.layers.Conv2D(128, kernel_size = 4, activation = 'relu')) model[j].add(tf.keras.layers.BatchNormalization()) model[j].add(tf.keras.layers.Flatten()) model[j].add(tf.keras.layers.Dropout(0.4)) model[j].add(tf.keras.layers.Dense(10, activation = 'softmax')) model[j].compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["categorical_accuracy"] )
Digit Recognizer
11,342,302
test = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') test.head()<normalization>
lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * NUM_NETS for j in range(NUM_NETS): X_train, X_val, y_train, y_val = train_test_split(train, labels, test_size = 0.1) STEPS_PER_EPOCH = X_train.shape[0] // 64 history[j] = model[j].fit_generator(datagen.flow(X_train, y_train, batch_size = BATCH_SIZE), epochs = EPOCHS, steps_per_epoch = STEPS_PER_EPOCH, validation_data =(X_val, y_val), callbacks = [lr_callback], verbose = VERBOSE) print(f"CNN {j + 1}: Epochs={EPOCHS}, Train accuracy={max(history[j].history['categorical_accuracy'])}, Validation accuracy={max(history[j].history['val_categorical_accuracy'])}" )
Digit Recognizer
11,342,302
class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['image_id'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{TEST_PATH}/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image class TTADataset(Dataset): def __init__(self, df, image_path, ttas): self.df = df self.file_names = df['image_id'].values self.labels = df['label'].values self.image_path = image_path self.ttas = ttas def __len__(self)-> int: return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{self.image_path}/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) imglist=[tta(image=image)['image'] for tta in self.ttas] image=torch.stack(imglist) label = torch.tensor(self.labels[idx] ).long() return image, label<normalization>
preds = np.zeros(( test.shape[0],10)) for j in range(NUM_NETS): preds += model[j].predict(test)/ NUM_NETS probs = pd.DataFrame(preds) probs.to_csv('ensemble_probs') probs.columns = probs.columns.astype(str) print(probs.columns) probs.head()
Digit Recognizer
11,342,302
def _get_augmentations(aug_list, cfg): process = [] for aug in aug_list: if aug == 'Resize': process.append(Resize(cfg['size'], cfg['size'])) elif aug == 'RandomResizedCrop': process.append(RandomResizedCrop(cfg['size'], cfg['size'])) elif aug == 'CenterCrop': process.append(CenterCrop(CFG['size'], CFG['size'])) elif aug == 'Transpose': process.append(Transpose(p=0.5)) elif aug == 'HorizontalFlip': process.append(HorizontalFlip(p=0.5)) elif aug == 'VerticalFlip': process.append(VerticalFlip(p=0.5)) elif aug == 'ShiftScaleRotate': process.append(ShiftScaleRotate(p=0.5)) elif aug == 'Normalize': process.append(Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], )) else: raise ValueError(f'{aug} is not suitable') process.append(ToTensorV2()) return process<categorify>
submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') submission['Label'] = preds.argmax(axis = 1) submission.to_csv("ensemble.csv", index = False) submission.head(10 )
Digit Recognizer
11,342,302
def get_transforms(*, aug_list, cfg): return Compose( _get_augmentations(aug_list, cfg) )<choose_model_class>
prev_cnn_probs = pd.read_csv('.. /input/mnistsavedprobs/ensemble_probs') prev_cnn_probs = prev_cnn_probs.drop('Unnamed: 0', axis = 1) print(prev_cnn_probs.columns) prev_cnn_probs.head()
Digit Recognizer
11,342,302
class CustomModel(nn.Module): def __init__(self, model_name, target_size, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=pretrained) if hasattr(self.model, 'classifier'): n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, target_size) elif hasattr(self.model, 'fc'): n_features = self.model.fc.in_features self.model.fc = nn.Linear(n_features, target_size) elif hasattr(self.model, 'head'): n_features = self.model.head.in_features self.model.head = nn.Linear(n_features, target_size) def forward(self, x): x = self.model(x) return x<categorify>
new_probs = probs.add(prev_cnn_probs ).divide(2) new_probs.head()
Digit Recognizer
11,342,302
<load_pretrained><EOS>
submission2 = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') submission2['Label'] = new_probs.values.argmax(axis = 1) submission2.to_csv("ensemble2.csv", index = False) submission2.head(10 )
Digit Recognizer
11,333,538
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv>
%matplotlib inline
Digit Recognizer
11,333,538
predictions_list = [] model_dir_list = [] for config, model_dir in zip(normal_configs, normal_model_dirs): predictions_list.append(main(config, model_dir)) model_dir_list.append(model_dir) for config, model_dir in zip(tta_configs, tta_model_dirs): predictions_list.append(main_tta(config, model_dir)) model_dir_list.append(model_dir) <save_to_csv>
print(tf.version.VERSION )
Digit Recognizer
11,333,538
predictions = np.zeros(predictions_list[0].shape, dtype=predictions_list[0].dtype) for i, key in zip(range(len(predictions_list)) , model_dir_list): predictions += predictions_list[i] * weights_dict[key] test['label'] = predictions.argmax(1) test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False) test.head()<install_modules>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
11,333,538
!mkdir -p /tmp/pip/cache/ !cp.. /input/omegaconf/PyYAML-5.4b2-cp38-cp38-manylinux1_x86_64.whl /tmp/pip/cache/ !cp.. /input/omegaconf/omegaconf-2.0.5-py3-none-any.whl /tmp/pip/cache/ !cp.. /input/omegaconf/typing_extensions-3.7.4.3-py3-none-any.whl /tmp/pip/cache/ !pip install --no-index --find-links /tmp/pip/cache/ omegaconf > /dev/null<set_options>
train_X = train.loc[:, "pixel0":"pixel783"] train_y = train.loc[:, "label"]
Digit Recognizer
11,333,538
sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') sys.path.append(".. /input/cleanlab/") warnings.filterwarnings('ignore') <set_options>
treshhold=0.1 train_X[train_X<treshhold]=0 test[test<treshhold]=0
Digit Recognizer
11,333,538
mean, std =(0.485, 0.456, 0.406),(0.229, 0.224, 0.225) def get_transforms(img_size=(512, 512)) : transformations = Compose([ PadIfNeeded(min_height=img_size[0], min_width=img_size[1]), CenterCrop(img_size[0], img_size[1]), Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ], p=1.0) return transformations<choose_model_class>
train_X = train_X / 255.0 test_X = test / 255.0 train_X = train_X.values.reshape(-1,28,28,1) test_X = test_X.values.reshape(-1,28,28,1) train_y = to_categorical(train_y, num_classes = 10 )
Digit Recognizer
11,333,538
def create_model(model_name: str, pretrained: bool, num_classes: int, in_chans: int): model = timm.create_model(model_name=model_name, pretrained=pretrained, num_classes=num_classes, in_chans=in_chans) return model<load_pretrained>
def build_model(input_shape=(28, 28, 1)) : model = Sequential() model.add(Conv2D(32, kernel_size = 3, activation='swish', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 3, activation='swish')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='swish')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size = 3, activation='swish')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='swish')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='swish')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size = 4, activation='swish')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) return model
Digit Recognizer
11,333,538
def get_state_dict_from_checkpoint(log_dir, fold_num): ckpt_path = glob(os.path.join(log_dir, f'checkpoints/*fold{fold_num}*.ckpt')) [0] state_dict = pl_load(ckpt_path, map_location='cpu') if 'state_dict' in state_dict: state_dict = state_dict['state_dict'] did_distillation = False state_dict = OrderedDict(( k.replace('model.', '') if 'model.' in k else k, v)for k, v in state_dict.items()) return state_dict<find_best_params>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False )
Digit Recognizer
11,333,538
class LitTester(pl.LightningModule): def __init__(self, network_cfg, state_dict): super(LitTester, self ).__init__() self.model = create_model(**network_cfg) self.model.load_state_dict(state_dict) self.model.eval() def forward(self, x): x = self.model(x) return x def test_step(self, batch, batch_idx): score = torch.nn.functional.softmax(self(batch), dim=1) score2 = torch.nn.functional.softmax(self(torch.flip(batch, [-1])) , dim=1) score3 = torch.nn.functional.softmax(self(torch.flip(batch, [-2])) , dim=1) out =(score + score2 + score3)/ 3.0 return {"pred": out} def test_epoch_end(self, output_results): all_outputs = torch.cat([out["pred"] for out in output_results], dim=0) all_outputs = all_outputs.cpu().numpy() return {'prob': all_outputs}<define_search_space>
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy', patience=3, verbose=1, factor=0.8, min_lr=0.001) annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) early_stop=EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto', baseline=None, restore_best_weights=True )
Digit Recognizer
11,333,538
eff_b0_cfg_s = eff_b0_cfg = OmegaConf.create(eff_b0_cfg_s )<define_variables>
%%time nets=10 model = [0] *nets history = [0] * nets skf = StratifiedKFold(n_splits=nets, shuffle = True, random_state=1) skf.get_n_splits(train_X, train['label']) print(skf) number=0 for train_index, test_index in skf.split(train_X, train['label']): print("SPLIT ",number," TRAIN index:", train_index, "TEST index:", test_index) X_train, X_val = train_X[train_index], train_X[test_index] y_train, y_val = train_y[train_index], train_y[test_index] model[number]=build_model() model[number].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) history[number] =model[number].fit(datagen.flow(X_train,y_train), epochs=100 ,validation_data =(X_val,y_val), batch_size=100, verbose = 0,callbacks = [annealer,early_stop]) metrics=pd.DataFrame(history[number].history) display(metrics) number+=1
Digit Recognizer
11,333,538
name = '14-10-36' cfg = eff_b0_cfg do_predict = True do_submit = False img_dir = '.. /input/cassava-leaf-disease-merged/train/' label_path = '.. /input/cassava-leaf-disease-merged/merged.csv' log_dir = os.path.join('.. /input/cassava-public-ckpt', name) n_folds = len(glob(os.path.join(log_dir, 'checkpoints/*.ckpt'))) num2class = ["Cassava Bacterial Blight(CBB)", "Cassava Brown Streak Disease(CBSD)", "Cassava Green Mottle(CGM)", "Cassava Mosaic Disease(CMD)", "Healthy"]<feature_engineering>
for number in range(0,nets): model[number].save("StratifiedKFold_10_batch100_double_val_loss_"+str(number)+".h5" )
Digit Recognizer