kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
20,482,624
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') test['file_path'] = test.StudyInstanceUID.apply(lambda x: os.path.join('.. /input/ranzcr-clip-catheter-line-classification/test', f'{x}.jpg')) target_cols = test.iloc[:, 1:12].columns.tolist() test_dataset = RANZCRDataset(test, 'test', transform=transforms_test) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=24 )<choose_model_class>
history = model.fit(datagen.flow(Xtrain, ytrain, batch_size=100), validation_data=(Xvalid, yvalid), epochs = 100, callbacks=[learning_rate_reduction] )
Digit Recognizer
20,482,624
submit = False if submit: test_preds = [] for i in range(len(enet_type)) : if enet_type[i] == 'resnet200d': print('resnet200d loaded') model = RANZCRResNet200D(enet_type[i], out_dim=len(target_cols)) model = model.to(device) model.load_state_dict(torch.load(model_path[i], map_location='cuda:0')) if tta: test_preds += [tta_inference_func(test_loader)] else: test_preds += [inference_func(test_loader)] submission = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') submission[target_cols] = np.mean(test_preds, axis=0) submission.to_csv('submission.csv', index=False) else: pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv' ).to_csv('submission.csv', index=False )<load_from_csv>
ypred = model.predict(test_data) for i in range(ypred.shape[0]): pred = ypred[i, :] ypred[i, 0] = list(pred ).index(pred.max() )
Digit Recognizer
20,482,624
<load_from_csv><EOS>
submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') submission.iloc[:, 1] = ypred[:, 0].astype(np.int) submission.to_csv('submission.csv', index=False )
Digit Recognizer
20,343,341
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
!pip install efficientnet tensorflow_addons > /dev/null
Digit Recognizer
20,343,341
if torch.cuda.is_available() : map_location=lambda storage, loc: storage.cuda() else: map_location='cpu'<set_options>
%matplotlib inline
Digit Recognizer
20,343,341
if torch.cuda.is_available() : device= 'cuda' else: device='cpu' print(device )<categorify>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
20,343,341
def get_transforms() : return Compose([ Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2() ] )<choose_model_class>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
20,343,341
class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<categorify>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
20,343,341
def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs<define_variables>
Y_train = tf.keras.utils.to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
20,343,341
MODEL_PATH = '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth' MODEL_PATH957 = '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth'<choose_model_class>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=0 )
Digit Recognizer
20,343,341
model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH,map_location=map_location),strict=False) model.eval() models = [model.to(device)] model957 = ResNet200D() model957.load_state_dict(torch.load(MODEL_PATH957,map_location=map_location),strict=False) model957.eval() models957 = [model957.to(device)]<load_from_csv>
efficientnet_size = 7 weights = "imagenet" size = 56
Digit Recognizer
20,343,341
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') test_dataset = TestDataset(test, transform=get_transforms()) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 , pin_memory=True) predictions = inference(models, test_loader, device) predictions957 = inference(models957, test_loader, device) <save_to_csv>
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters = 3, kernel_size = 1, padding = 'Same', activation ='relu', input_shape =(size,size,1))) model.add(getattr(efn, f"EfficientNetB{efficientnet_size}" )( weights=weights, include_top=False, input_shape=(size, size, 3))) model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(10, activation = "softmax"))
Digit Recognizer
20,343,341
target_cols = test.iloc[:, 1:12].columns.tolist() test[target_cols] =(predictions + predictions957)/2 test[['StudyInstanceUID'] + target_cols].to_csv('submission.csv', index=False) test.head()<import_modules>
optimizer = tf.keras.optimizers.Adam(lr=0.0001 )
Digit Recognizer
20,343,341
<define_variables>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
20,343,341
AUTO = tf.data.experimental.AUTOTUNE DIM = 600 IMAGE_SIZE=[DIM,DIM] BATCH_SIZE = 8 DATA_PATH = ".. /input/ranzcr-clip-catheter-line-classification/" OUTPUT_PATH = "./"<install_modules>
tf.random.set_seed(0) history = model.fit(X_train, Y_train, epochs=10, batch_size=128, validation_data=(X_val, Y_val), )
Digit Recognizer
20,343,341
!pip install.. /input/kerasapplications/keras-applications-master/ package_path = '.. /input/efficientnetmaster/efficientnet-master/' sys.path.append(package_path) <define_variables>
pred = model.predict_classes(test )
Digit Recognizer
20,343,341
TEST_FILENAMES = tf.io.gfile.glob('.. /input/ranzcr-clip-catheter-line-classification/test_tfrecords/*.tfrec') print(TEST_FILENAMES )<categorify>
submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv" )
Digit Recognizer
20,343,341
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.image.resize(image, [DIM, DIM]) image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { 'image': tf.io.FixedLenFeature([], tf.string), 'StudyInstanceUID': tf.io.FixedLenFeature([], tf.string) } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['StudyInstanceUID'] return image, idnum def load_dataset(filenames, labeled = True, ordered = False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_unlabeled_tfrecord, num_parallel_calls = AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES )<load_pretrained>
submission["Label"] = pred
Digit Recognizer
20,343,341
models1=[] for filename in glob.glob('.. /input/eff7trained/*best.h5'): model = tf.keras.models.load_model(filename, custom_objects = None) models1.append(model) <predict_on_test>
submission.to_csv("submission.csv",index=False )
Digit Recognizer
18,487,180
test_ds = get_test_dataset(ordered=True) test_images_ds = test_ds.map(lambda image, idnum: image) labels = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] mean =(models1[0].predict(test_images_ds) +models1[1].predict(test_images_ds) +models1[2].predict(test_images_ds) +models1[3].predict(test_images_ds) +models1[4].predict(test_images_ds)) /5.0<categorify>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
18,487,180
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U' )<save_to_csv>
x_train = train.drop('label', axis=1)/255.0 y_label = train['label'].values x_test = test/255.0
Digit Recognizer
18,487,180
submission = pd.DataFrame(mean, columns = labels) submission.insert(0, "StudyInstanceUID", test_ids, False) submission['StudyInstanceUID'] = submission['StudyInstanceUID'].apply(lambda x: x.rstrip(".jpg")) submission.to_csv('submission.csv', index=False )<train_model>
datagen = ImageDataGenerator( rotation_range=15, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, )
Digit Recognizer
18,487,180
print("Done" )<define_variables>
import tensorflow as tf
Digit Recognizer
18,487,180
batch_size = 1 image_size = 512 tta = True submit = True enet_type = ['resnet200d'] * 5 model_path = ['.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold0_cv953.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold1_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold2_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth']<set_options>
class ResidualUnit(tf.keras.layers.Layer): def __init__(self, filters, strides=1, activation='relu', **kwargs): super().__init__(**kwargs) self.activation = tf.keras.activations.get(activation) self.main_layers = [ tf.keras.layers.Conv2D(filters, 3, strides=strides, padding='SAME', use_bias=False), tf.keras.layers.BatchNormalization() , self.activation, tf.keras.layers.Conv2D(filters, 3, strides=1, padding='SAME', use_bias=False), tf.keras.layers.BatchNormalization() , self.activation, tf.keras.layers.Conv2D(filters, 3, strides=1, padding='SAME', use_bias=False), tf.keras.layers.BatchNormalization() , ] self.skip_layers = [] if strides > 1: self.skip_layers = [ tf.keras.layers.Conv2D(filters, 1, strides=strides, padding='SAME', use_bias=False), tf.keras.layers.BatchNormalization() , ] def call(self, inputs): Z = inputs for layer in self.main_layers: Z=layer(Z) skip_Z = inputs for layer in self.skip_layers: skip_Z = layer(skip_Z) return self.activation(Z +skip_Z )
Digit Recognizer
18,487,180
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master') sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') DEBUG = False %matplotlib inline device = torch.device('cuda')if not DEBUG else torch.device('cpu' )<choose_model_class>
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(64,(7,7), input_shape=(28, 28, 1), padding='SAME')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(2, 2)) prev_filters = 64 for filters in [64]*2 + [128]*2 + [256]*2: print(filters) strides = 1 if filters == prev_filters else 2 model.add(ResidualUnit(filters, strides=strides)) prev_filters = filters model.add(tf.keras.layers.GlobalAvgPool2D()) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(0.5*0.5)) model.add(tf.keras.layers.Dense(int(filters/2), activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax'))
Digit Recognizer
18,487,180
class RANZCRResNet200D(nn.Module): def __init__(self, model_name='resnet200d', out_dim=11, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<concatenate>
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc']) checkpoint = ModelCheckpoint( filepath=f'resnet-{int(time.time())}.dhf5', monitor='loss', save_best_only=True ) annealer = LearningRateScheduler(lambda x: 1e-3 * 0.8**x) callbacks = [checkpoint, annealer]
Digit Recognizer
18,487,180
transforms_test = albumentations.Compose([ Resize(image_size, image_size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2() ] )<load_from_csv>
batch_size = 64 history = model.fit(datagen.flow(X_train, y_label, batch_size=batch_size), epochs = 30, verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=callbacks, )
Digit Recognizer
18,487,180
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') test['file_path'] = test.StudyInstanceUID.apply(lambda x: os.path.join('.. /input/ranzcr-clip-catheter-line-classification/test', f'{x}.jpg')) target_cols = test.iloc[:, 1:12].columns.tolist() test_dataset = RANZCRDataset(test, 'test', transform=transforms_test) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=24 )<choose_model_class>
model.evaluate(X_train, y_label )
Digit Recognizer
18,487,180
if submit: test_preds_1 = [] for i in range(len(enet_type)) : if enet_type[i] == 'resnet200d': print('resnet200d loaded') model = RANZCRResNet200D(enet_type[i], out_dim=len(target_cols)) model = model.to(device) model.load_state_dict(torch.load(model_path[i], map_location='cuda:0')) if tta: test_preds_1 += [tta_inference_func(test_loader)] else: test_preds_1 += [inference_func(test_loader)]<load_from_csv>
def predict_proba(X, model, num_samples): preds = [model(X, training=True)for _ in range(num_samples)] return np.stack(preds ).mean(axis=0) def predict_class(X, model, num_samples): proba_preds = predict_proba(X, model, num_samples) return np.argmax(proba_preds, axis=1 )
Digit Recognizer
18,487,180
submission = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') submission[target_cols] = np.mean(test_preds_1, axis=0 )<import_modules>
y_pred = predict_class(X_test, model, 10 )
Digit Recognizer
18,487,180
from pathlib import Path import random from scipy.sparse import coo_matrix import gc from joblib import Parallel, delayed import typing as tp from torch.utils import data<define_variables>
res = pd.DataFrame(y_pred, columns=['Label']) res.index = res.index + 1 res.index.rename('ImageId', inplace=True) res.to_csv('res.csv' )
Digit Recognizer
18,487,180
ROOT = Path.cwd().parent INPUT = ROOT / "input" OUTPUT = ROOT / "output" DATA = INPUT / "ranzcr-clip-catheter-line-classification" TRAIN = DATA / "train" TEST = DATA / "test" TRAINED_MODEL = INPUT / "ranzcr-clip-weights-for-multi-head-model-v2" TMP = ROOT / "tmp" TMP.mkdir(exist_ok=True) RANDAM_SEED = 1086 N_CLASSES = 11 FOLDS = [0, 1, 2, 3, 4] N_FOLD = len(FOLDS) IMAGE_SIZE =(512, 512) CONVERT_TO_RANK = False FAST_COMMIT = False CLASSES = [ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]<load_from_csv>
successive_outputs = [layer.output for layer in model.layers[0:]] visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs) img = random.choice(X_train) plt.imshow(img, cmap=plt.cm.binary) plt.show()
Digit Recognizer
18,487,180
for p in DATA.iterdir() : print(p.name) train = pd.read_csv(DATA / "train.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv" )<split>
successive_feature_maps = visualization_model.predict(img) layer_names = [layer.name for layer in model.layers] for layer_name, feature_map in zip(layer_names, successive_feature_maps): if len(feature_map.shape)== 4: n_features = feature_map.shape[-1] size = feature_map.shape[1] pic_num_per_row = n_features // 8 + 1 display_grid_arr = [] display_grid = np.zeros(( size, size * pic_num_per_row)) for i in range(n_features): if i>0 and(i % pic_num_per_row == 0 or i == n_features): display_grid_arr.append(display_grid) display_grid = np.zeros(( size, size * pic_num_per_row)) index = i % pic_num_per_row x = feature_map[0, :, :, i] x -= x.mean() x /= x.std() x *= 64 x += 128 x = np.clip(x, 0, 255 ).astype('uint8') display_grid[:, index * size :(index + 1)* size] = x row_num = len(display_grid_arr) scale = 200./ n_features fig, axes = plt.subplots(row_num, 1, figsize=(scale * pic_num_per_row, scale * row_num)) fig.suptitle(layer_name, fontsize=40) for i in range(row_num): axes[i].imshow(display_grid_arr[i], aspect='auto', cmap='viridis' )
Digit Recognizer
18,487,180
if FAST_COMMIT and len(smpl_sub)== 3582: smpl_sub = smpl_sub.iloc[:64 * 3].reset_index(drop=True )<categorify>
y_pred = model.predict_classes(X_train )
Digit Recognizer
18,487,180
def multi_label_stratified_group_k_fold(label_arr: np.array, gid_arr: np.array, n_fold: int, seed: int=42): np.random.seed(seed) random.seed(seed) start_time = time.time() n_train, n_class = label_arr.shape gid_unique = sorted(set(gid_arr)) n_group = len(gid_unique) gid2aid = dict(zip(gid_unique, range(n_group))) aid_arr = np.vectorize(lambda x: gid2aid[x] )(gid_arr) cnts_by_class = label_arr.sum(axis=0) col, row = np.array(sorted(enumerate(aid_arr), key=lambda x: x[1])).T cnts_by_group = coo_matrix( (np.ones(len(label_arr)) ,(row, col)) ).dot(coo_matrix(label_arr)).toarray().astype(int) del col del row cnts_by_fold = np.zeros(( n_fold, n_class), int) groups_by_fold = [[] for fid in range(n_fold)] group_and_cnts = list(enumerate(cnts_by_group)) np.random.shuffle(group_and_cnts) print("finished preparation", time.time() - start_time) for aid, cnt_by_g in sorted(group_and_cnts, key=lambda x: -np.std(x[1])) : best_fold = None min_eval = None for fid in range(n_fold): cnts_by_fold[fid] += cnt_by_g fold_eval =(cnts_by_fold / cnts_by_class ).std(axis=0 ).mean() cnts_by_fold[fid] -= cnt_by_g if min_eval is None or fold_eval < min_eval: min_eval = fold_eval best_fold = fid cnts_by_fold[best_fold] += cnt_by_g groups_by_fold[best_fold].append(aid) print("finished assignment.", time.time() - start_time) gc.collect() idx_arr = np.arange(n_train) for fid in range(n_fold): val_groups = groups_by_fold[fid] val_indexs_bool = np.isin(aid_arr, val_groups) train_indexs = idx_arr[~val_indexs_bool] val_indexs = idx_arr[val_indexs_bool] print("[fold {}]".format(fid), end=" ") print("n_group:(train, val)=({}, {})".format(n_group - len(val_groups), len(val_groups)) , end=" ") print("n_sample:(train, val)=({}, {})".format(len(train_indexs), len(val_indexs))) yield train_indexs, val_indexs<concatenate>
from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
Digit Recognizer
18,487,180
label_arr = train[CLASSES].values group_id = train.PatientID.values train_val_indexs = list( multi_label_stratified_group_k_fold(label_arr, group_id, N_FOLD, RANDAM_SEED))<feature_engineering>
conf_max = confusion_matrix(y_label, y_pred) conf_max
Digit Recognizer
18,487,180
train["fold"] = -1 for fold_id,(trn_idx, val_idx)in enumerate(train_val_indexs): train.loc[val_idx, "fold"] = fold_id train.groupby("fold")[CLASSES].sum()<train_model>
diff_num = 5
Digit Recognizer
18,487,180
<choose_model_class><EOS>
a_1d = norm_conf_max.flatten() idx_1d = a_1d.argsort() [-diff_num:] x_idx, y_idx = np.unravel_index(idx_1d, norm_conf_max.shape) print(x_idx, y_idx )
Digit Recognizer
18,555,598
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout import matplotlib.pyplot as plt import keras from keras.utils.np_utils import to_categorical
Digit Recognizer
18,555,598
class MultiHeadResNet200D(nn.Module): def __init__( self, out_dims_head: tp.List[int]=[3, 4, 3, 1], pretrained=False ): self.base_name = "resnet200d_320" self.n_heads = len(out_dims_head) super(MultiHeadResNet200D, self ).__init__() base_model = timm.create_model( self.base_name, num_classes=sum(out_dims_head), pretrained=False) in_features = base_model.num_features if pretrained: pretrained_model_path = '.. /input/startingpointschestx/resnet200d_320_chestx.pth' state_dict = dict() for k, v in torch.load(pretrained_model_path, map_location='cpu')["model"].items() : if k[:6] == "model.": k = k.replace("model.", "") state_dict[k] = v base_model.load_state_dict(state_dict) base_model.reset_classifier(0, '') self.backbone = base_model for i, out_dim in enumerate(out_dims_head): layer_name = f"head_{i}" layer = nn.Sequential( SpatialAttentionBlock(in_features, [64, 32, 16, 1]), nn.AdaptiveAvgPool2d(output_size=1), nn.Flatten(start_dim=1), nn.Linear(in_features, in_features), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(in_features, out_dim)) setattr(self, layer_name, layer) def forward(self, x): h = self.backbone(x) hs = [ getattr(self, f"head_{i}" )(h)for i in range(self.n_heads)] y = torch.cat(hs, axis=1) return y m = MultiHeadResNet200D([3, 4, 3, 1], False) m = m.eval() x = torch.rand(1, 3, 256, 256) with torch.no_grad() : y = m(x) print("[forward test]") print("input:\t{} output:\t{}".format(x.shape, y.shape)) del m; del x; del y gc.collect()<categorify>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') test1 = test.copy()
Digit Recognizer
18,555,598
class LabeledImageDataset(data.Dataset): def __init__( self, file_list: tp.List[ tp.Tuple[tp.Union[str, Path], tp.Union[int, float, np.ndarray]]], transform_list: tp.List[tp.Dict], ): self.file_list = file_list self.transform = ImageTransformForCls(transform_list) def __len__(self): return len(self.file_list) def __getitem__(self, index): img_path, label = self.file_list[index] img = self._read_image_as_array(img_path) img, label = self.transform(( img, label)) return img, label def _read_image_as_array(self, path: str): img_arr = cv2.imread(str(path)) img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB) return img_arr<create_dataframe>
x_train=train.drop(['label'],1) y_train=train['label'] x_train=x_train.values.reshape(-1,28,28,1) test=test.values.reshape(-1,28,28,1) x_train=x_train/255 test=test/255
Digit Recognizer
18,555,598
def get_dataloaders_for_inference( file_list: tp.List[tp.List], batch_size=64, ): dataset = LabeledImageDataset( file_list, transform_list=[ ["Normalize", { "always_apply": True, "max_pixel_value": 255.0, "mean": ["0.4887381077884414"], "std": ["0.23064819430546407"]}], ["ToTensorV2", {"always_apply": True}], ]) loader = data.DataLoader( dataset, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return loader<categorify>
model=models.Sequential([ Conv2D(32,(5,5), activation='relu' , input_shape=(28,28,1)) , MaxPooling2D(pool_size=(2,2)) , Conv2D(64,(5,5), activation ='relu'), MaxPooling2D(pool_size=(2,2)) , Dropout(0.25), Conv2D(64,(3,3), activation ='relu'), MaxPooling2D(pool_size=(2,2)) , Dropout(0.25), Flatten() , Dense(64, activation='relu'), Dense(10, activation='softmax') ] )
Digit Recognizer
18,555,598
class ImageTransformBase: def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]): augmentations_list = [ self._get_augmentation(aug_name )(**params) for aug_name, params in data_augmentations] self.data_aug = albumentations.Compose(augmentations_list) def __call__(self, pair: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]: raise NotImplementedError def _get_augmentation(self, aug_name: str)-> tp.Tuple[ImageOnlyTransform, DualTransform]: if hasattr(albumentations, aug_name): return getattr(albumentations, aug_name) else: return eval(aug_name) class ImageTransformForCls(ImageTransformBase): def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]): super(ImageTransformForCls, self ).__init__(data_augmentations) def __call__(self, in_arrs: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]: img, label = in_arrs augmented = self.data_aug(image=img) img = augmented["image"] return img, label<load_pretrained>
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) model.fit(x_train, y_train, epochs=60, batch_size=64 )
Digit Recognizer
18,555,598
def load_setting_file(path: str): with open(path)as f: settings = yaml.safe_load(f) return settings def set_random_seed(seed: int = 42, deterministic: bool = False): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = deterministic def run_inference_loop(stgs, model, loader, device): model.to(device) model.eval() pred_list = [] with torch.no_grad() : for x, t in tqdm(loader): y = model(x.to(device)) pred_list.append(y.sigmoid().detach().cpu().numpy()) pred_arr = np.concatenate(pred_list) del pred_list return pred_arr<set_options>
y_test = model.predict(test) y_test = np.argmax(y_test, axis = 1) index_list = [] for i in list(test1.index): index_list.append(i+1) submission_df = pd.DataFrame({ "ImageId": index_list, "Label": y_test }) submission_df.to_csv("submission_cnn.csv", index = False )
Digit Recognizer
18,243,332
if not torch.cuda.is_available() : device = torch.device("cpu") else: device = torch.device("cuda") print(device )<load_pretrained>
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from keras.utils.np_utils import to_categorical
Digit Recognizer
18,243,332
model_dir = TRAINED_MODEL test_dir = TEST_RESIZED test_file_list = [ (test_dir / f"{img_id}.png", [-1] * 11) for img_id in smpl_sub["StudyInstanceUID"].values] test_loader = get_dataloaders_for_inference(test_file_list, batch_size=64) test_preds_arr = np.zeros(( N_FOLD, len(smpl_sub), N_CLASSES)) for fold_id in FOLDS: print(f"[fold {fold_id}]") stgs = load_setting_file(model_dir / f"fold{fold_id}" / "settings.yml") stgs["model"]["params"]["pretrained"] = False model = MultiHeadResNet200D(**stgs["model"]["params"]) model_path = model_dir / f"best_model_fold{fold_id}.pth" model.load_state_dict(torch.load(model_path, map_location=device)) test_pred = run_inference_loop(stgs, model, test_loader, device) test_preds_arr[fold_id] = test_pred del model torch.cuda.empty_cache() gc.collect()<prepare_output>
train=pd.read_csv('.. /input/digit-recognizer/train.csv') test=pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
18,243,332
sub = smpl_sub.copy() sub[CLASSES] = test_preds_arr.mean(axis=0 )<prepare_output>
x_train=train.drop(['label'],1) y_train=train['label']
Digit Recognizer
18,243,332
Final_Submission = smpl_sub.copy() Final_Submission[CLASSES] =.50 * sub[CLASSES] +.50 * submission[CLASSES]<save_to_csv>
x_train=np.array(x_train) test=np.array(test )
Digit Recognizer
18,243,332
Final_Submission.to_csv("submission.csv", index=False )<set_options>
x_train=x_train/255 test=test/255
Digit Recognizer
18,243,332
sys.path.append('.. /input/pytorch-images-seresnet') warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<define_variables>
target=x_train.reshape(-1,28,28,1) test=test.reshape(-1,28,28,1) y_train=np.array(y_train) label=to_categorical(y_train) label.shape
Digit Recognizer
18,243,332
IMAGE_SIZE = 640 BATCH_SIZE = 128 TEST_PATH = '.. /input/ranzcr-clip-catheter-line-classification/test' MODEL_PATH = '.. /input/resnet200d-public/resnet200d_320_CV9632.pth'<load_from_csv>
from keras.models import Sequential from keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout
Digit Recognizer
18,243,332
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv' )<categorify>
model=Sequential([ Conv2D(32,(5,5), activation='relu' , input_shape=(28,28,1)) , MaxPooling2D(pool_size=(2,2)) , Conv2D(64,(5,5), activation ='relu'), MaxPooling2D(pool_size=(2,2)) , Dropout(0.25), Conv2D(64,(3,3), activation ='relu'), MaxPooling2D(pool_size=(2,2)) , Dropout(0.25), Flatten() , Dense(64, activation='relu'), Dense(10, activation='softmax') ] )
Digit Recognizer
18,243,332
def get_transforms() : return Compose([ Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize( ), ToTensorV2() , ] )<choose_model_class>
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'] )
Digit Recognizer
18,243,332
class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<categorify>
model.fit(target,label,epochs=40,batch_size=64 )
Digit Recognizer
18,243,332
def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs<choose_model_class>
Y_pred = model.predict(test) Y_pred_classes = np.argmax(Y_pred,axis = 1 )
Digit Recognizer
18,243,332
model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH)['model']) model.eval() models = [model.to(device)]<load_pretrained>
submission_data = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
18,243,332
test_dataset = TestDataset(test, transform=get_transforms()) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 , pin_memory=True) predictions = inference(models, test_loader, device )<save_to_csv>
submission_data['Label']=Y_pred_classes
Digit Recognizer
18,243,332
target_cols = test.iloc[:, 1:12].columns.tolist() test[target_cols] = predictions test[['StudyInstanceUID'] + target_cols].to_csv('submission.csv', index=False) test.head()<define_variables>
submission_data.to_csv('submit.csv' ,index=False )
Digit Recognizer
18,243,332
batch_size = 1 image_size = 512 tta = True submit = True enet_type = ['resnet200d'] * 5 model_path = ['.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold0_cv953.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold1_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold2_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth'] fast_sub = False fast_sub_path = '.. /input/xxxxxx/your_submission.csv'<set_options>
def test_output(i): plt.imshow(x_train[i],cmap='gray') predicted=np.argmax(model.predict(target[i].reshape(-1,28,28,1))) actual=np.argmax(label[i]) plt.xlabel(f'predicted= {predicted} Actual= {actual}' )
Digit Recognizer
18,243,332
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master') sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') DEBUG = False %matplotlib inline device = torch.device('cuda')if not DEBUG else torch.device('cpu' )<choose_model_class>
from PIL import Image, ImageGrab
Digit Recognizer
18,243,332
class RANZCRResNet200D(nn.Module): def __init__(self, model_name='resnet200d', out_dim=11, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<concatenate>
def predict_digit1(img): img = Image.open(img) plt.imshow(img) img = img.convert('L', dither=Image.NONE) img = img.resize(( 28,28)) img = np.array(img) img=np.invert(img) predicted=np.argmax(model.predict(img.reshape(-1,28,28,1))) plt.xlabel(f'Predicted= {predicted}' )
Digit Recognizer
18,243,332
transforms_test = albumentations.Compose([ Resize(image_size, image_size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2() ] )<load_from_csv>
predict_digit1('.. /input/temporary/Images/images.jfif' )
Digit Recognizer
18,243,332
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') test['file_path'] = test.StudyInstanceUID.apply(lambda x: os.path.join('.. /input/ranzcr-clip-catheter-line-classification/test', f'{x}.jpg')) target_cols = test.iloc[:, 1:12].columns.tolist() test_dataset = RANZCRDataset(test, 'test', transform=transforms_test) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=24 )<choose_model_class>
predict_digit1('.. /input/temporary/Images/download.png' )
Digit Recognizer
18,243,332
if submit: test_preds = [] for i in range(len(enet_type)) : if enet_type[i] == 'resnet200d': print('resnet200d loaded') model = RANZCRResNet200D(enet_type[i], out_dim=len(target_cols)) model = model.to(device) model.load_state_dict(torch.load(model_path[i], map_location='cuda:0')) if tta: test_preds += [tta_inference_func(test_loader)] else: test_preds += [inference_func(test_loader)] submission = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') submission[target_cols] = np.mean(test_preds, axis=0) submission.to_csv('submission.csv', index=False) else: pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv' ).to_csv('submission.csv', index=False )<load_from_csv>
predict_digit1('.. /input/temporary/Images/531-5314816_handwritten-1-number-9-hand-written-png-transparent.png' )
Digit Recognizer
18,243,332
<categorify><EOS>
predict_digit1('.. /input/temporary/Images/1.jpg' )
Digit Recognizer
20,287,499
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
import numpy as np import pandas as pd import os import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout import pandas as pd import matplotlib.pyplot as plt
Digit Recognizer
20,287,499
dfx = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv') df_train, df_valid = model_selection.train_test_split(dfx, test_size=0.1, random_state=42, stratify=dfx.label.values) _train = df_train.reset_index(drop=True) df_valid = df_valid.reset_index(drop=True) image_path = ".. /input/cassava-leaf-disease-classification/train_images/" train_image_paths = [os.path.join(image_path, x)for x in df_train.image_id.values] valid_image_paths = [os.path.join(image_path, x)for x in df_valid.image_id.values] train_targets = df_train.label.values valid_targets = df_valid.label.values<create_dataframe>
train = np.loadtxt(open('/kaggle/input/digit-recognizer/train.csv', 'r'), delimiter=',', skiprows=1, dtype='float32') test = np.loadtxt(open('/kaggle/input/digit-recognizer/test.csv', 'r'), delimiter=',', skiprows=1, dtype='float32') train_images = train[:, 1:].reshape(( train.shape[0], 28, 28, 1)) / 255.0 train_labels = train[:, 0].astype(np.uint8) test_images = test.reshape(( test.shape[0], 28, 28, 1)) / 255.0
Digit Recognizer
20,287,499
cassava_train = CassavaDataset(train_image_paths, train_targets, 'train') cassava_test = CassavaDataset(valid_image_paths, valid_targets, 'test') batch_size = 16 train_loader = DataLoader(cassava_train, batch_size=batch_size, shuffle=False, num_workers=2) test_loader = DataLoader(cassava_test, batch_size=batch_size, shuffle=False, num_workers=2 )<compute_train_metric>
augmentation_layer = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomRotation(0.1, input_shape=(28, 28, 1)) , tf.keras.layers.experimental.preprocessing.RandomZoom(( 0.2, 0.2)) , ] )
Digit Recognizer
20,287,499
class AverageMeter: def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)) : maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1 ).expand_as(pred)) return [correct[:k].reshape(-1 ).float().sum(0)* 100./ batch_size for k in topk]<train_model>
for i in range(5): new_img = augmentation_layer(train_images[np.random.randint(train_images.shape[0])] ).numpy() plt.imshow(new_img.reshape(( 28, 28))) plt.show()
Digit Recognizer
20,287,499
def train_epoch(model, loader, device, loss_func, optimizer, scheduler): model.train() summary_loss = AverageMeter() summary_acc = AverageMeter() start = time.time() n = len(loader) for batch in tqdm(loader): images, labels = batch images = images.to(device) labels = labels.to(device) out = model(images) loss = loss_func(out, labels) loss.backward() optimizer.step() optimizer.zero_grad() with torch.no_grad() : acc = accuracy(out, labels)[0] summary_loss.update(loss.detach().item() , batch_size) summary_acc.update(acc.detach().item() , batch_size) train_time = str(datetime.timedelta(seconds=time.time() - start)) print('Train loss: {:.5f} - Train acc: {:.2f}% - time: {}'.format(summary_loss.avg, summary_acc.avg, train_time)) return summary_loss, summary_acc<set_options>
model = Sequential([ tf.keras.layers.Input(( 28, 28, 1)) , augmentation_layer, Conv2D(32, 3, activation='relu', padding="same"), MaxPooling2D(2), Conv2D(64, 3, activation='relu', padding="same"), MaxPooling2D(2), Conv2D(64, 3, activation='relu', padding="same"), MaxPooling2D(2), Flatten() , Dropout(0.3), Dense(128, activation='relu'), Dropout(0.3), Dense(10, activation='softmax') ] )
Digit Recognizer
20,287,499
resnet = timm.create_model('resnext50_32x4d', pretrained=True) num_ftrs = resnet.fc.in_features resnet.fc = nn.Linear(num_ftrs, 5) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") resnet.to(device )<choose_model_class>
model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
20,287,499
num_epochs = 1 best_acc = 0 best_epoch = 0 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(resnet.parameters() , lr=0.01, momentum=0.9) scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=2, verbose=True, eps=1e-6) for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch + 1, num_epochs)) train_loss, train_acc = train_epoch(resnet, train_loader, device, criterion, optimizer, scheduler) val_loss, val_acc = validate_epoch(resnet, test_loader, device, criterion) scheduler.step(val_loss.avg) if val_acc.avg > best_acc: best_acc = val_acc.avg best_epoch = epoch if epoch == 9: print('Saving model...') PATH = './timm_resnext_epoch{}_384.pth'.format(epoch + 1) torch.save(resnet.state_dict() ,PATH )<choose_model_class>
history = model.fit(train_images, train_labels, epochs=100 )
Digit Recognizer
20,287,499
num_epochs = 10 best_acc = 0 best_epoch = 0 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(xception.parameters() , lr=0.01, momentum=0.9) scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=2, verbose=True, eps=1e-6) for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch + 1, num_epochs)) train_loss, train_acc = train_epoch(xception, train_loader, device, criterion, optimizer, scheduler) val_loss, val_acc = validate_epoch(xception, test_loader, device, criterion) scheduler.step(val_loss.avg) if val_acc.avg > best_acc: best_acc = val_acc.avg best_epoch = epoch if epoch == 9: print('Saving model...') PATH = './timm_xception_epoch{}_384.pth'.format(epoch + 1) torch.save(xception.state_dict() ,PATH )<find_best_params>
predition_model = tf.keras.Sequential() for layer in model.layers: if layer != augmentation_layer: predition_model.add(layer) predition_model.compile(loss="sparse_categorical_crossentropy", optimizer="adam" )
Digit Recognizer
20,287,499
PATH = './timm_resnext_epoch10_384.pth' resnet = timm.create_model('resnext50_32x4d', pretrained=False) num_ftrs = resnet.fc.in_features resnet.fc = nn.Linear(num_ftrs, 5) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") resnet.to(device) resnet.load_state_dict(torch.load(PATH)) resnet.eval()<load_from_csv>
test_labels = np.argmax(predition_model.predict(test_images), axis=-1) print(test_labels.shape )
Digit Recognizer
20,287,499
submission_df = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') submission_df.head()<normalization>
image_ids = np.arange(1, test_labels.shape[0]+1) result = np.concatenate(( image_ids.reshape(image_ids.shape[0], 1), test_labels.reshape(test_labels.shape[0], 1)) , axis=1) df = pd.DataFrame(result, columns=["ImageId", "Label"], dtype='int') df.to_csv("submission.csv", index=False )
Digit Recognizer
20,119,647
input_size = 384 stats =([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]) trans1 = transforms.Compose([transforms.Resize(( input_size, input_size)) , transforms.Pad(8, padding_mode='reflect'), transforms.ToTensor() , transforms.Normalize(*stats)]) trans2 = transforms.Compose([transforms.Resize(( input_size, input_size)) , transforms.RandomHorizontalFlip(p=0.3), transforms.RandomResizedCrop(input_size), transforms.ToTensor() , transforms.Normalize(*stats)]) trans3 = transforms.Compose([transforms.Resize(( input_size, input_size)) , transforms.RandomVerticalFlip(p=0.3), transforms.RandomResizedCrop(input_size), transforms.ToTensor() , transforms.Normalize(*stats)]) trans4 = transforms.Compose([transforms.Resize(( input_size, input_size)) , transforms.RandomHorizontalFlip(p=0.5), transforms.RandomVerticalFlip(p=0.5), transforms.RandomResizedCrop(input_size), transforms.ToTensor() , transforms.Normalize(*stats)]) transs = [trans1, trans2, trans3, trans4]<prepare_x_and_y>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
20,119,647
test_path = '/kaggle/input/cassava-leaf-disease-classification/test_images/' test_images = os.listdir(test_path) train_image_paths = [os.path.join(test_path, x)for x in test_images] y_preds = [] y2_preds = [] p = 0 for i in test_images: res = [] image = Image.open(f'/kaggle/input/cassava-leaf-disease-classification/test_images/{i}') input_size = 384 outs = torch.Tensor(np.zeros(( len(transs), 5))) outs2 = torch.Tensor(np.zeros(( len(transs), 5))) k = 0 for trans in transs: img = trans(image) img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2]) img = Variable(img.to(device)) out = resnet(img) out2= xception(img) outs[k,:] = 4*out outs2[k,:] = 5*out2 k += 1 out = outs.mean(axis=0) out2 = outs2.mean(axis=0) res.append(out2) mean = torch.mean(torch.stack(res), dim = 0) _, predicted = torch.max(out2.data, 0) y_preds.append(predicted.item() )<create_dataframe>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
20,119,647
df_sub = pd.DataFrame({'image_id': test_images, 'label': y_preds}) display(df_sub )<save_to_csv>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
20,119,647
df_sub.to_csv('submission.csv', index=False )<install_modules>
Y_train=to_categorical(Y_train, num_classes=10 )
Digit Recognizer
20,119,647
pip install cleantext<install_modules>
random_seed=2
Digit Recognizer
20,119,647
pip install ktrain<set_options>
X_train,X_val,Y_train,Y_val = train_test_split(X_train,Y_train,test_size=0.1,random_state=random_seed )
Digit Recognizer
20,119,647
warnings.filterwarnings("ignore" )<load_from_csv>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
20,119,647
df = pd.read_csv(".. /input/nlp-getting-started/train.csv") display(df.head()) display(df.shape )<categorify>
optimizer = RMSprop(lr=0.001,rho=0.9,epsilon=1e-08,decay=0.0 )
Digit Recognizer
20,119,647
l=len(df) display(l) cleanlist=[] textlength=[] for i in range(l): ct=cleantext.clean(df.iloc[i,3], clean_all= True) cleanlist.append(ct) lct=len(ct) textlength.append(lct) <create_dataframe>
model.compile(optimizer=optimizer,loss="categorical_crossentropy",metrics=["accuracy"] )
Digit Recognizer
20,119,647
df_clean=pd.DataFrame(cleanlist) df_clean.columns=['cleantext'] frames=[df,df_clean] newdf=pd.concat(frames, axis=1) display(newdf )<train_model>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',patience=3,verbose=1,factor=0.5,min_lr=0.00001 )
Digit Recognizer
20,119,647
( x_train, y_train),(x_test, y_test), preproc=text.texts_from_df(newdf, 'cleantext',label_columns=['target'], maxlen=127,max_features=100000, preprocess_mode='bert', val_pct=.1 )<train_model>
epochs=15 batch_size=86
Digit Recognizer
20,119,647
model=text.text_classifier('bert',(x_train, y_train), preproc=preproc) learner=ktrain.get_learner(model, train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=32 )<train_model>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
20,119,647
learner.fit_onecycle(2e-5, 3) predictor=ktrain.get_predictor(learner.model, preproc )<predict_on_test>
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
20,119,647
predictor.predict(['calm','earthquake'] )<load_from_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
20,119,647
<predict_on_test><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
21,936,867
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_output>
import tensorflow as tf import numpy as np import pandas as pd
Digit Recognizer
21,936,867
df_pred=pd.DataFrame(predlist) df_pred.columns=['target'] frames=[df1,df_pred] df2=pd.concat(frames, axis=1) display(df2.head() )<feature_engineering>
training = pd.read_csv('.. /input/digit-recognizer/train.csv' )
Digit Recognizer
21,936,867
df2.loc[df2['target']=='target','target']=1 df2.loc[df2['target']=='not_target','target']=0 display(df2['target'].mean()) df2=df2[['id','target']] display(df2.shape) display(df2.head() )<save_to_csv>
x_train, y_train = training.iloc[:, 1:], training.iloc[:, 0:1]
Digit Recognizer
21,936,867
df2.to_csv("submission.csv", index=False )<load_from_url>
x_train = x_train / 255
Digit Recognizer
21,936,867
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py<import_modules>
model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=5, padding="same", activation="relu", input_shape=[28, 28, 1])) model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=5, padding="same", activation="relu")) model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")) model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")) model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=256, activation='relu')) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Dense(units=10, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) model.summary()
Digit Recognizer
21,936,867
import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import layers, models, optimizers from tensorflow.keras.callbacks import ModelCheckpoint<load_from_csv>
model.fit(x_train, y_train, epochs=15 )
Digit Recognizer
21,936,867
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv" )<categorify>
test = pd.read_csv('.. /input/digit-recognizer/test.csv') test = test / 255 test = test.values.reshape(-1, 28, 28, 1 )
Digit Recognizer
21,936,867
def bert_encode(texts, tokenizer, max_len): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence)+ [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<data_type_conversions>
predictions = model.predict(test )
Digit Recognizer
21,936,867
<categorify><EOS>
export = pd.DataFrame([np.argmax(prediction)for prediction in predictions]) export.index += 1 export = export.reset_index() export.columns = ['ImageId', 'Label'] export.to_csv('submission.csv', index=False )
Digit Recognizer
21,648,756
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
from tensorflow.keras.preprocessing.image \
Digit Recognizer