kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,684,407
data_df_sample_submission = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') test_files = os.listdir(".. /input/hpa-single-cell-image-classification/test") color_list = ["_red.png", "_green.png", "_yellow.png", "_blue.png"] test_files_names = [re.sub(r'|'.join(map(re.escape, color_list)) , '', elem)for elem in test_files] test_files_names = list(set(test_files_names)) d = [] for i, file in enumerate(test_files_names): img = cv2.imread(f".. /input/hpa-single-cell-image-classification/test/{file}_red.png") height, width, channels = img.shape d.append({ "ID" : file, "ImageWidth" : width, "ImageHeight": height, "PredictionString" : "0 1 eNoLCAgIMAEABJkBdQ==" }) if i%50 == 0: print(i) df_from_files = pd.DataFrame(d ).sort_values("ID" ).reset_index(drop=True) assert all(df_from_files == data_df_sample_submission), "Dataframes do not match"<define_variables>
cnn.add(layer = tf.keras.layers.Dense(units = 108, activation='relu'))
Digit Recognizer
11,684,407
PATH_TEST = ".. /input/hpa-single-cell-image-classification/test" MODEL_PATH = ".. /input/models-hpa/" MODELS_LIST_EFFNET = [ "efficientnet-b4_rgby_lr_0.001_ADAM_steplr_g085_focal1_g1.0_resize640_mediumaug_3.pth", "efficientnet-b4_rgby_lr_0.0015_ADAM_focal1_g1.0_resize640_10pcTest_HEAVY_AUG_E3_F0.pth", "efficientnet-b4_rgby_lr_0.001_ADAM_steplr_g085_focal1_g1.0_resize640_mediumaug_E4_F4.pth", ] MODELS_LIST_RESNEST = [ "resnest101_rgby_lr_0.002_SGD_polyoptim_focal1_g1.0_scheduler_largedataset_resize640_mediumaug_3.pth", "resnest101_rgby_lr_0.002_SGD_focal1_g1.0_resize640_5pcTest_BS8_E3_F0.pth", ] MODEL_LABELS_EFF = ".. /input/hpa-tensorflow-models/model_green.06-0.07.h5" MODEL_LABELS_RN = ".. /input/hpa-tensorflow-models/model_rgb_resnext101.09-0.10.h5" MODEL_LABELS_VIT = ".. /input/hpa-tensorflow-models/ggg_ViTB16_RedPlat_ADAMW_BCE_EPOCH12-VAL0.0957.h5" <init_hyperparams>
cnn.add(layer = tf.keras.layers.Dropout(0.5))
Digit Recognizer
11,684,407
class CFG: debug=False verbose = False num_workers=8 model_name_effnet = 'efficientnet-b4' model_name_resnest = 'resnest101' size=640 seed=2002 classes = 19 color_mode = "rgby" resnest = True effnet = True extra_model_for_labels = True extra_model_is_tf = True only_green_extra_model = True color_mode_image_level = "rgb" split = [0.6, 0.4, 0] size_seg = None split_image_level = [0.33, 0.33, 0.34, 0] split_cam_level = [0.5, 0.5] split_sigmoid_graboost = [0.5, 0.5] sigmoid_factor = 2.0 sigmoid_move = 0.2 is_demo = len(data_df)==559 if CFG.is_demo: data_df = data_df[:10] batch_size_ = 4<normalization>
cnn.add(layer = tf.keras.layers.Dense(units = 108, activation='relu'))
Digit Recognizer
11,684,407
Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose, HueSaturationValue, CoarseDropout ) dataset_mean = [0.0994, 0.0466, 0.0606, 0.0879] dataset_std = [0.1406, 0.0724, 0.1541, 0.1264] def get_transforms(*, data_type): if data_type == 'valid': return Compose([ Resize(CFG.size, CFG.size), ToTensorV2() , ]) elif data_type == 'test_green_model': return Compose([ Resize(600, 600), ]) elif data_type == 'test_green_model_torch': return Compose([ Resize(CFG.size, CFG.size), ToTensorV2() , ] )<load_pretrained>
cnn.add(layer = tf.keras.layers.Dense(units = 10, activation = 'softmax'))
Digit Recognizer
11,684,407
def load_RGBY_image(image_id, path, mode="cam", image_size=None): if mode == "green_model": green = read_img_scale255(image_id, "green",path, image_size) stacked_images = np.transpose(np.array([green, green, green]),(1,2,0)) return stacked_images if mode=="cam": red = read_img(image_id, "red", path, image_size) green = read_img(image_id, "green",path, image_size) blue = read_img(image_id, "blue",path, image_size) yellow = read_img(image_id, "yellow",path, image_size) if CFG.color_mode == "rgby": stacked_images = np.transpose(np.array([red, green, blue,yellow]),(1,2,0)) else: stacked_images = np.transpose(np.array([red, green, blue]),(1,2,0)) return stacked_images def read_img(image_id, color, path, image_size=None): filename = f'{path}/{image_id}_{color}.png' assert os.path.exists(filename), f'not found {filename}' img = cv2.imread(filename, cv2.IMREAD_UNCHANGED) if image_size is not None: img = cv2.resize(img,(image_size, image_size)) if img.max() > 255: img_max = img.max() img =(img/255 ).astype('uint8') return img def read_img_scale255(image_id, color, path, image_size=None): filename = f'{path}/{image_id}_{color}.png' assert os.path.exists(filename), f'not found {filename}' img = cv2.imread(filename, cv2.IMREAD_UNCHANGED) if image_size is not None: img = cv2.resize(img,(image_size, image_size)) if img.max() > 255: img_max = img.max() img =(img/255 ).astype('uint8')/255 return img def one_hot_embedding(label, classes): vector = np.zeros(( classes), dtype = np.float32) if len(label)> 0: vector[label] = 1. return vector class HPADataset_Test(Dataset): def __init__(self, ids, path=None, transforms=None, mode="cam"): self.ids = ids self.transforms = transforms self.mode = mode self.path = path def __len__(self): return len(self.ids) def __getitem__(self, idx): _ids = self.ids.iloc[idx] image = load_RGBY_image(_ids, self.path, self.mode) if self.transforms: augmented = self.transforms(image=image) image = augmented['image'] return image, _ids<categorify>
cnn.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'] )
Digit Recognizer
11,684,407
def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'): def decode(path): if CFG.color_mode_image_level == "ggg": file_bytes = tf.io.read_file(path + "_green.png") if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32)/ 255.0 img = tf.image.resize(img, target_size) return img if CFG.color_mode_image_level == "rgb": r = tf.io.read_file(path + "_red.png") g = tf.io.read_file(path + "_green.png") b = tf.io.read_file(path + "_blue.png") red = tf.io.decode_png(r, channels=1) blue = tf.io.decode_png(g, channels=1) green = tf.io.decode_png(b, channels=1) red = tf.image.resize(red, target_size) blue = tf.image.resize(blue, target_size) green = tf.image.resize(green, target_size) img = tf.stack([red, green, blue], axis=-1) img = tf.squeeze(img) img = tf.image.convert_image_dtype(img, tf.float32)/ 255 return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset_tf(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, img_size=300, cache_dir=""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None, target_size=(img_size, img_size)) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else(paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir)if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO)if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle)if shuffle else dset dset = dset.batch(bsize ).prefetch(AUTO) return dset test_paths = PATH_TEST + "/" + data_df['ID'] test_decoder_600 = build_decoder(with_labels=False, target_size=(600, 600)) test_decoder_384 = build_decoder(with_labels=False, target_size=(384, 384)) CFG.color_mode_image_level = "ggg" dtest_tf_green_600 = build_dataset_tf( test_paths, bsize=batch_size_, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder_600 ) CFG.color_mode_image_level = "rgb" dtest_tf_rgb_600 = build_dataset_tf( test_paths, bsize=batch_size_, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder_600 ) CFG.color_mode_image_level = "ggg" dtest_tf_ggg_384 = build_dataset_tf( test_paths, bsize=batch_size_, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder_384 )<prepare_x_and_y>
model_train = cnn.fit(x = X_train, y = y_train, validation_data =(X_val, y_val), epochs = 25 )
Digit Recognizer
11,684,407
class Yield_Images_Dataset(Dataset): def __init__(self, csv_file, root=PATH_TEST, transform=None): self.images_df = csv_file self.transform = transform self.root = root def __len__(self): return len(self.images_df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() _id = self.images_df["ID"].iloc[idx] r = os.path.join(self.root, f'{_id}_red.png') y = os.path.join(self.root, f'{_id}_yellow.png') b = os.path.join(self.root, f'{_id}_blue.png') r = cv2.imread(r, 0) y = cv2.imread(y, 0) b = cv2.imread(b, 0) size = r.shape[0] if CFG.size_seg == None: ryb_image = np.stack(( r, y, b), axis=2)/255. blue_image = b/255. return blue_image, ryb_image, size, _id if size != CFG.size_seg: blue_image = cv2.resize(b,(CFG.size_seg, CFG.size_seg)) /255. ryb_image = np.stack(( r, y, b), axis=2) ryb_image = cv2.resize(ryb_image,(CFG.size_seg, CFG.size_seg)) /255. else: ryb_image = np.stack(( r, y, b), axis=2)/255. blue_image = b/255. return blue_image, ryb_image, size, _id <set_options>
y_pred = np.argmax(cnn.predict(testing), axis = -1 )
Digit Recognizer
11,684,407
NUCLEI_MODEL_URL, TWO_CHANNEL_CELL_MODEL_URL) NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 /(0.0167 * 255)] * 3} class CellSegmentator(object): def __init__( self, nuclei_model="./nuclei_model.pth", cell_model="./cell_model.pth", model_width_height=None, device="cuda", multi_channel_model=True, return_without_scale_restore=False, scale_factor=0.25, padding=False ): if device != "cuda" and device != "cpu" and "cuda" not in device: raise ValueError(f"{device} is not a valid device(cuda/cpu)") if device != "cpu": try: assert torch.cuda.is_available() except AssertionError: print("No GPU found, using CPU.", file=sys.stderr) device = "cpu" self.device = device if isinstance(nuclei_model, str): if not os.path.exists(nuclei_model): print( f"Could not find {nuclei_model}.Downloading it now", file=sys.stderr, ) download_with_url(NUCLEI_MODEL_URL, nuclei_model) nuclei_model = torch.load( nuclei_model, map_location=torch.device(self.device) ) if isinstance(nuclei_model, torch.nn.DataParallel)and device == "cpu": nuclei_model = nuclei_model.module self.nuclei_model = nuclei_model.to(self.device) self.multi_channel_model = multi_channel_model if isinstance(cell_model, str): if not os.path.exists(cell_model): print( f"Could not find {cell_model}.Downloading it now", file=sys.stderr ) if self.multi_channel_model: download_with_url(MULTI_CHANNEL_CELL_MODEL_URL, cell_model) else: download_with_url(TWO_CHANNEL_CELL_MODEL_URL, cell_model) cell_model = torch.load(cell_model, map_location=torch.device(self.device)) self.cell_model = cell_model.to(self.device) self.model_width_height = model_width_height self.return_without_scale_restore = return_without_scale_restore self.scale_factor = scale_factor self.padding = padding def _image_conversion(self, images): microtubule_imgs, er_imgs, nuclei_imgs = images if self.multi_channel_model: if not isinstance(er_imgs, list): raise ValueError("Please speicify the image path(s)for er channels!") else: if not er_imgs is None: raise ValueError( "second channel should be None for two channel model predition!" ) if not isinstance(microtubule_imgs, list): raise ValueError("The microtubule images should be a list") if not isinstance(nuclei_imgs, list): raise ValueError("The microtubule images should be a list") if er_imgs: if not len(microtubule_imgs)== len(er_imgs)== len(nuclei_imgs): raise ValueError("The lists of images needs to be the same length") else: if not len(microtubule_imgs)== len(nuclei_imgs): raise ValueError("The lists of images needs to be the same length") if not all(isinstance(item, np.ndarray)for item in microtubule_imgs): microtubule_imgs = [ os.path.expanduser(item)for _, item in enumerate(microtubule_imgs) ] nuclei_imgs = [ os.path.expanduser(item)for _, item in enumerate(nuclei_imgs) ] microtubule_imgs = list( map(lambda item: imageio.imread(item), microtubule_imgs) ) nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs)) if er_imgs: er_imgs = [os.path.expanduser(item)for _, item in enumerate(er_imgs)] er_imgs = list(map(lambda item: imageio.imread(item), er_imgs)) if not er_imgs: er_imgs = [ np.zeros(item.shape, dtype=item.dtype) for _, item in enumerate(microtubule_imgs) ] cell_imgs = list( map( lambda item: np.dstack(( item[0], item[1], item[2])) , list(zip(microtubule_imgs, er_imgs, nuclei_imgs)) , ) ) return cell_imgs def _pad(self, image): rows, cols = image.shape[:2] self.scaled_shape = rows, cols img_pad= cv2.copyMakeBorder( image, 32, (32 - rows % 32), 32, (32 - cols % 32), cv2.BORDER_REFLECT, ) return img_pad def pred_nuclei(self, images): def _preprocess(images): if isinstance(images[0], str): raise NotImplementedError('Currently the model requires images as numpy arrays, not paths.') self.target_shapes = [image.shape for image in images] if self.model_width_height: images = np.array([transform.resize(image,(self.model_width_height,self.model_width_height)) for image in images]) else: images = [transform.rescale(image, self.scale_factor)for image in images] if self.padding: images = [self._pad(image)for image in images] nuc_images = np.array([np.dstack(( image[..., 2], image[..., 2], image[..., 2])) if len(image.shape)>= 3 else np.dstack(( image, image, image)) for image in images]) nuc_images = nuc_images.transpose([0, 3, 1, 2]) return nuc_images def _segment_helper(imgs): with torch.no_grad() : mean = torch.as_tensor(NORMALIZE["mean"], device=self.device) std = torch.as_tensor(NORMALIZE["std"], device=self.device) imgs = torch.tensor(imgs ).float() imgs = imgs.to(self.device) imgs = imgs.sub_(mean[:, None, None] ).div_(std[:, None, None]) imgs = self.nuclei_model(imgs) imgs = F.softmax(imgs, dim=1) return imgs preprocessed_imgs = _preprocess(images) predictions = _segment_helper(preprocessed_imgs) predictions = predictions.to("cpu" ).numpy() predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape) for pred, target_shape in zip(predictions, self.target_shapes)] return predictions def _restore_scaling(self, n_prediction, target_shape): n_prediction = n_prediction.transpose([1, 2, 0]) if self.padding: n_prediction = n_prediction[ 32 : 32 + self.scaled_shape[0], 32 : 32 + self.scaled_shape[1],... ] n_prediction[..., 0] = 0 if not self.return_without_scale_restore: n_prediction = cv2.resize( n_prediction, (target_shape[0], target_shape[1]), interpolation=cv2.INTER_AREA, ) return n_prediction def pred_cells(self, images, precombined=False): def _preprocess(images): self.target_shapes = [image.shape for image in images] for image in images: if not len(image.shape)== 3: raise ValueError("image should has 3 channels") if self.model_width_height: images = np.array([transform.resize(image,(self.model_width_height,self.model_width_height)) for image in images]) else: images = np.array([transform.rescale(image, self.scale_factor, multichannel=True)for image in images]) if self.padding: images = np.array([self._pad(image)for image in images]) cell_images = images.transpose([0, 3, 1, 2]) return cell_images def _segment_helper(imgs): with torch.no_grad() : mean = torch.as_tensor(NORMALIZE["mean"], device=self.device) std = torch.as_tensor(NORMALIZE["std"], device=self.device) imgs = torch.tensor(imgs ).float() imgs = imgs.to(self.device) imgs = imgs.sub_(mean[:, None, None] ).div_(std[:, None, None]) imgs = self.cell_model(imgs) imgs = F.softmax(imgs, dim=1) return imgs if not precombined: images = self._image_conversion(images) preprocessed_imgs = _preprocess(images) predictions = _segment_helper(preprocessed_imgs) predictions = predictions.to("cpu" ).numpy() predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape) for pred, target_shape in zip(predictions, self.target_shapes)] return predictions<define_variables>
predictions = pd.DataFrame({"ImageId" : list(range(1, len(y_pred)+1)) , "Label" : y_pred} )
Digit Recognizer
11,684,407
<define_variables><EOS>
predictions.to_csv('predictions.csv', index = False )
Digit Recognizer
11,590,103
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
import pandas as pd import numpy as np import tensorflow as tf
Digit Recognizer
11,590,103
NUC_MODEL = ".. /input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth" CELL_MODEL = ".. /input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth" segmentator_even_faster = CellSegmentator( NUC_MODEL, CELL_MODEL, device="cuda", multi_channel_model=True, padding=True, return_without_scale_restore=True )<create_dataframe>
train=pd.read_csv('/kaggle/input/digit-recognizer/train.csv') print("Training Dataset Shape:",train.shape) train.head()
Digit Recognizer
11,590,103
yield_ims_1728 = Yield_Images_Dataset(predict_df_1728) yield_ims_2048 = Yield_Images_Dataset(predict_df_2048) yield_ims_3072 = Yield_Images_Dataset(predict_df_3072) yield_ims_4096 = Yield_Images_Dataset(predict_df_4096) dataloader_ims_seg_1728 = DataLoader(yield_ims_1728, batch_size=24, shuffle=False, num_workers=0) dataloader_ims_seg_2048 = DataLoader(yield_ims_2048, batch_size=12, shuffle=False, num_workers=0) dataloader_ims_seg_3072 = DataLoader(yield_ims_3072, batch_size=3, shuffle=False, num_workers=0) dataloader_ims_seg_4096 = DataLoader(yield_ims_4096, batch_size=3, shuffle=False, num_workers=0) dataloaders_all_sizes = [dataloader_ims_seg_1728, dataloader_ims_seg_2048, dataloader_ims_seg_3072, dataloader_ims_seg_4096] start_time = time.time() even_faster_outputs = [] output_ids = [] batch_size = 24 sizes_list = [] im_proc = 0 for i, dataloader_ims_seg in enumerate(dataloaders_all_sizes): print(f"GETTING IMAGE SIZES: {IMAGE_SIZES[i]}, BATCHES: {len(dataloader_ims_seg)}") print for blue_images, ryb_images, sizes, _ids in dataloader_ims_seg: print(f"SEGMENT COUNT: {im_proc}") blue_batch = blue_images.numpy() ryb_batch = ryb_images.numpy() nuc_segmentations = segmentator_even_faster.pred_nuclei(blue_batch) cell_segmentations = segmentator_even_faster.pred_cells(ryb_batch, precombined=True) for data_id, nuc_seg, cell_seg, size in zip(_ids, nuc_segmentations, cell_segmentations, sizes): _, cell = utils.label_cell(nuc_seg, cell_seg) even_faster_outputs.append(np.ubyte(cell)) output_ids.append(data_id) sizes_list.append(size.numpy()) im_proc += len(_ids) del dataloader_ims_seg print(time.time() - start_time) cell_masks_df = pd.DataFrame(list(zip(output_ids, even_faster_outputs,sizes_list)) , columns=["ID", "mask", "ori_size"] )<drop_column>
test=pd.read_csv('/kaggle/input/digit-recognizer/test.csv') print('Test Dataset Shape:',test.shape) test.head()
Digit Recognizer
11,590,103
cell_masks_df = cell_masks_df.set_index('ID') cell_masks_df = cell_masks_df.reindex(index=data_df['ID']) cell_masks_df = cell_masks_df.reset_index()<set_options>
x=x.reshape(-1,28,28,1) test=test.values.reshape(-1,28,28,1) x=x/255 test=test/255 y=to_categorical(y )
Digit Recognizer
11,590,103
del sizes_list del even_faster_outputs del output_ids del segmentator_even_faster del yield_ims_1728 del yield_ims_2048 del yield_ims_3072 del yield_ims_4096 del dataloader_ims_seg_1728 del dataloader_ims_seg_2048 del dataloader_ims_seg_3072 del dataloader_ims_seg_4096 del dataloaders_all_sizes libc = ctypes.CDLL("libc.so.6") libc.malloc_trim(0) gc.collect() torch.cuda.empty_cache() torch.cuda.empty_cache()<set_options>
train_datagen=ImageDataGenerator( rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, fill_mode='nearest' )
Digit Recognizer
11,590,103
libc = ctypes.CDLL("libc.so.6") libc.malloc_trim(0) gc.collect() torch.cuda.empty_cache() torch.cuda.empty_cache() torch.cuda.empty_cache() torch.cuda.empty_cache()<prepare_x_and_y>
ensem=10 model=[0]*ensem for i in range(ensem): model[i]=Sequential() model[i].add(Conv2D(filters=32,kernel_size=(3,3),padding='same',activation='relu',input_shape=(28,28,1))) model[i].add(BatchNormalization()) model[i].add(Conv2D(filters=32,kernel_size=(3,3),padding='same',activation='relu')) model[i].add(BatchNormalization()) model[i].add(Conv2D(filters=32,kernel_size=(3,3),padding='same',activation='relu')) model[i].add(BatchNormalization()) model[i].add(MaxPool2D(2,2)) model[i].add(Dropout(0.2)) model[i].add(Conv2D(filters=64,kernel_size=(3,3),padding='same',activation='relu')) model[i].add(BatchNormalization()) model[i].add(Conv2D(filters=64,kernel_size=(3,3),padding='same',activation='relu')) model[i].add(BatchNormalization()) model[i].add(Conv2D(filters=64,kernel_size=(3,3),padding='same',activation='relu')) model[i].add(BatchNormalization()) model[i].add(MaxPool2D(2,2)) model[i].add(Dropout(0.2)) model[i].add(GlobalAveragePooling2D()) model[i].add(Dense(128,activation='relu')) model[i].add(Dropout(0.2)) model[i].add(Dense(10,activation='softmax')) model[i].compile(loss='categorical_crossentropy',metrics=['accuracy'],optimizer='adam' )
Digit Recognizer
11,590,103
X_test = data_df["ID"]<train_model>
callback=tf.keras.callbacks.EarlyStopping(monitor='accuracy',min_delta=0,patience=5,mode='auto',restore_best_weights=True,verbose=0) lrs=tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x, verbose=0) history=[0]*ensem for i in range(ensem): train_x,valid_x,train_y,valid_y=train_test_split(x,y,test_size=0.2) history[i]=model[i].fit_generator(train_datagen.flow(train_x,train_y,batch_size=128), epochs=100, steps_per_epoch=train_x.shape[0]//128, verbose=0, validation_data=(valid_x,valid_y), validation_steps=valid_x.shape[0]//128, callbacks=[callback,lrs]) print('Model {}: Epochs=100, Train_Accuracy:{}, Val_Accuracy:{}'.format(i+1,max(history[i].history['accuracy']),max(history[i].history['val_accuracy'])) )
Digit Recognizer
11,590,103
<create_dataframe><EOS>
prediction=np.zeros(( test.shape[0],10)) for i in range(ensem): prediction=prediction+model[i].predict(test) prediction=np.argmax(prediction,axis = 1) prediction=pd.Series(prediction,name="Label") submission = pd.concat([pd.Series(range(1,28001),name="ImageId"),prediction],axis=1) submission.to_csv("digit_recognizer.csv",index=False )
Digit Recognizer
11,517,111
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import tensorflow as tf from kerastuner import HyperModel from kerastuner.tuners import RandomSearch from tensorflow.python.keras.callbacks import ModelCheckpoint import seaborn as sns from pylab import rcParams from sklearn.metrics import confusion_matrix
Digit Recognizer
11,517,111
def swish(x, beta=1.0): return x * torch.sigmoid(beta*x) def get_all_cams(batch_cam_scaled, model, scales, ims_per_batch): bs = ims_per_batch with torch.no_grad() : ori_w, ori_h = CFG.size, CFG.size strided_up_size =(CFG.size, CFG.size) all_scale_cams = torch.from_numpy(np.zeros(( bs, len(scales), 19, CFG.size, CFG.size)) ).cuda() all_scale_preds = torch.from_numpy(np.zeros(( bs, len(scales), 19)) ).cuda() num_channels = 4 for i, images in enumerate(batch_cam_scaled): with torch.cuda.amp.autocast() : logits, features = model(images, with_cam=True) features = swish(features) logits = logits.reshape(bs*4//bs//4, 4, bs, 19 ).mean(1 ).view(bs*4//4, 19) all_scale_preds[:, i, :] = logits features = torch.cat([features[0:bs], features[bs:bs*2].flip(2), features[bs*2:bs*3].flip(3), torch.flip(features[bs*3:bs*4],(3,2)) ]) size_feats = features.shape[-1] features = features.reshape(bs*4//bs//4, 4, bs, 19, size_feats, size_feats ).sum(1 ).view(bs*4//4, 19, size_feats, size_feats) cams = F.interpolate(features,(CFG.size, CFG.size), mode='bicubic', align_corners=False) all_scale_cams[:, i, :, :, :] = cams all_logits = np.sum(all_scale_preds.detach().cpu().numpy() , axis=1) all_cams = np.sum(all_scale_cams.detach().cpu().numpy() , axis=1) print("CAMS DONE") return {"hr_cams": all_cams, "logits" : all_logits} <define_variables>
rcParams['figure.figsize'] = 15, 15 NUM_CLASSES = 10 INPUT_SHAPE =(28, 28, 1 )
Digit Recognizer
11,517,111
label_names = [ '0-Nucleoplasm', '1-Nuclear membrane', '2-Nucleoli', '3-Nucleoli fibrillar center', '4-Nuclear speckles', '5-Nuclear bodies', '6-Endoplasmic reticulum', '7-Golgi apparatus', '8-Intermediate filaments', '9-Actin filaments', '10-Microtubules', '11-Mitotic spindle', '12-Centrosome', '13-Plasma membrane', '14-Mitochondria', '15-Aggresome', '16-Cytosol', '17-Vesicles + cytosolic patterns', '18-Negative' ]<categorify>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') X = train.iloc[:, 1:] y = train.iloc[:, 0]
Digit Recognizer
11,517,111
def encode_binary_mask(mask: np.ndarray)-> t.Text: if mask.dtype != np.bool: raise ValueError( "encode_binary_mask expects a binary mask, received dtype == %s" % mask.dtype) mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError( "encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape) mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str def get_all_encoded_cells(mask): print(mask.shape) cell_masks = [] for i in range(1, np.max(mask)+1): enc_mask = encode_binary_mask(( mask == i)) cell_masks.append(enc_mask) return cell_masks<normalization>
X /= 256 test /= 256 X = X.values.reshape(( -1,)+ INPUT_SHAPE) test = test.values.reshape(( -1,)+ INPUT_SHAPE )
Digit Recognizer
11,517,111
def resize_mask(mask): resized_mask = resize_full_mask(mask, CFG.size) cell_masks = [] for i in range(1, np.max(mask)+1): cell_masks.append(( resized_mask == i)) return cell_masks def resize_full_mask(mask, size): resized_mask = cv2.resize(mask,(size,size),interpolation=cv2.INTER_NEAREST_EXACT) return resized_mask <categorify>
class CNNHyperModel(HyperModel): def __init__(self, input_shape, num_classes): self.input_shape = input_shape self.num_classes = num_classes def build(self, hp): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(8, 3, padding='same', dilation_rate=(3, 3), \ activation='relu', input_shape=self.input_shape)) model.add(tf.keras.layers.Conv2D(16, 3, padding='same', \ activation='relu')) max_or_avg =hp.Int( 'max_or_avg', min_value=1, max_value=2, step=1, default=1) three = hp.Fixed('three', 3) model.add(tf.keras.layers.AvgPool2D(pool_size=max_or_avg, name=f'first_avg_pool_{max_or_avg}')) model.add(tf.keras.layers.MaxPool2D(pool_size=three - max_or_avg)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(32, 3, padding='same', dilation_rate=(3, 3), \ activation='relu')) model.add(tf.keras.layers.Conv2D(64, 3, padding='same', \ activation='relu')) model.add(tf.keras.layers.AvgPool2D(pool_size=max_or_avg, name=f'second_avg_pool_{max_or_avg}')) model.add(tf.keras.layers.MaxPool2D(pool_size=three - max_or_avg)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Flatten()) drop_rate = hp.Float('dropout', min_value=0.3, max_value=0.7, default=0.3, step=0.2, ) model.add(tf.keras.layers.Dropout(rate=drop_rate, name=f'drop{drop_rate}')) units=hp.Int('units', min_value=28, max_value=4*28, step=28, default=28 ) model.add(tf.keras.layers.Dense( units=units, name=f'Dense{units}', activation='relu' )) model.add(tf.keras.layers.Dense(self.num_classes)) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.Adam() model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) return model
Digit Recognizer
11,517,111
def get_pred_string(mask_probas, cell_masks_fullsize_enc): assert len(mask_probas)== len(cell_masks_fullsize_enc), "Probas have different length than masks" string = "" for enc_mask, mask_proba in zip(cell_masks_fullsize_enc, mask_probas): for cls, proba in enumerate(mask_proba): string += str(cls)+ " " + str(proba)+ " " + enc_mask.decode("utf-8")+ " " return string<load_pretrained>
hypermodel = CNNHyperModel(input_shape=INPUT_SHAPE, num_classes=NUM_CLASSES) tuner = RandomSearch( hypermodel, objective='val_accuracy', max_trials=24, executions_per_trial=1, directory='./random_search', project_name='MNIST' ) tuner.search_space_summary()
Digit Recognizer
11,517,111
PATH_SCALER_GRADBOOST = ".. /input/scaler-and-gradboost" scaler_resnest0 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/scaler_resnest0.pkl", 'rb')) scaler_resnest1 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/scaler_resnest1.pkl", 'rb')) scaler_effnet0 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/scaler_effnet0.pkl", 'rb')) scaler_effnet1 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/scaler_effnet1.pkl", 'rb')) scaler_effnet2 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/scaler_effnet2.pkl", 'rb')) model_gradboost_resnest0 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/GradientBoostingRegressor_resnest0.pkl", 'rb')) model_gradboost_resnest1 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/GradientBoostingRegressor_resnest1.pkl", 'rb')) model_gradboost_effnet0 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/GradientBoostingRegressor_effnet0.pkl", 'rb')) model_gradboost_effnet1 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/GradientBoostingRegressor_effnet1.pkl", 'rb')) model_gradboost_effnet2 = pickle.load(open(f"{PATH_SCALER_GRADBOOST}/GradientBoostingRegressor_effnet2.pkl", 'rb')) def get_prob_from_cams_masks(cams, masks, labels, labels_from_labelmodel, verbose=True, typ=None): print(f"GETTING MODEL {typ}") if typ=="resnest0": scaler = scaler_resnest0 model_gradboost = model_gradboost_resnest0 if typ=="resnest1": scaler = scaler_resnest1 model_gradboost = model_gradboost_resnest1 if typ=="resnest2": scaler = scaler_resnest1 model_gradboost = model_gradboost_resnest1 if typ=="effnet0": scaler = scaler_effnet0 model_gradboost = model_gradboost_effnet0 if typ=="effnet1": scaler = scaler_effnet1 model_gradboost = model_gradboost_effnet1 if typ=="effnet2": scaler = scaler_effnet2 model_gradboost = model_gradboost_effnet2 if typ=="effnet3": scaler = scaler_effnet2 model_gradboost = model_gradboost_effnet2 masks_probas = np.zeros(( len(masks), 19)) for i, mask in enumerate(masks): for label, cam in enumerate(cams): cam_by_mask = np.multiply(mask, cam) cam_mask_product = np.multiply(cam_by_mask, labels[label]) masks_probas[i, label] = np.sum(cam_mask_product) if verbose: print(f"MASK: {i} PROB-RAW: {masks_probas[i, :]}") print("--------------------------------------") std_scaler = preprocessing.RobustScaler().fit(masks_probas.reshape(-1, 1)) for i, mask in enumerate(masks): std_scaled = std_scaler.transform(masks_probas[i, :].reshape(-1, 1)) [:,0] model_scaled = scaler.transform(masks_probas[i, :].reshape(-1, 1)) [:,0] if verbose: print(f"MASK: {i} STD-Scaled: {std_scaled}") print(f"MASK: {i} Model-Scaled: {model_scaled}") print("--------------------------------------") sigmoid_probas = sigmoid_factor(std_scaled, factor=CFG.sigmoid_factor, move=CFG.sigmoid_move) gradboost_probas = model_gradboost.predict(model_scaled.reshape(-1, 1)) if verbose: print(f"MASK: {i} PROB-SIGMOID: {sigmoid_probas}") print(f"MASK: {i} PROB-GradBoost: {gradboost_probas}") print("--------------------------------------") if CFG.extra_model_for_labels: masks_probas[i, :] = sigmoid_probas*CFG.split_sigmoid_graboost[0] + gradboost_probas*CFG.split_sigmoid_graboost[1] masks_probas[i, :] = CFG.split[0] * masks_probas[i, :] + CFG.split[1] * labels_from_labelmodel + CFG.split[2] * labels if verbose: print(f"MASK: {i} PROB-WITH-LABELMODEL: {masks_probas[i, :]}") return masks_probas<import_modules>
X_train_val, X_test, y_train_val, y_test = train_test_split(X, y, test_size=1/7 )
Digit Recognizer
11,517,111
class Classifier_EffNet(nn.Module, ABC_Model): def __init__(self, backbone, num_classes=19): super(Classifier_EffNet, self ).__init__() self.enet = EfficientNet.from_name(backbone, num_classes=num_classes, in_channels=3, include_top=False) dict_sizes = { 'efficientnet-b0' : 1280, 'efficientnet-b1' : 1280, 'efficientnet-b2' : 1408, 'efficientnet-b3' : 1536, 'efficientnet-b4' : 1792, 'efficientnet-b5' : 2048 } size_conv2d = dict_sizes[backbone] self.classifier = nn.Conv2d(size_conv2d, num_classes, 1, bias=False) self.num_classes = num_classes self.initialize([self.classifier]) def forward(self, x, with_cam=False): x = self.enet.extract_features(x) if with_cam: features = self.classifier(x) logits = self.global_average_pooling_2d(features) return logits, features else: x = self.global_average_pooling_2d(x, keepdims=True) logits = self.classifier(x ).view(-1, self.num_classes) return logits class Classifier_EffNet_GREEN(nn.Module, ABC_Model): def __init__(self, backbone, num_classes=19): super(Classifier_EffNet_GREEN, self ).__init__() self.enet = EfficientNet.from_name(backbone, num_classes=num_classes, in_channels=3, include_top=False) dict_sizes = { 'efficientnet-b0' : 1280, 'efficientnet-b1' : 1280, 'efficientnet-b2' : 1408, 'efficientnet-b3' : 1536, 'efficientnet-b4' : 1792, 'efficientnet-b5' : 2048, 'efficientnet-b7' : 2560, } self.dense = nn.Linear(dict_sizes[backbone],19) self.initialize([self.dense]) def forward(self, x, with_cam=False): x = self.enet.extract_features(x) x = self.global_average_pooling_2d(x) logits = self.dense(x) return logits class FixedBatchNorm(nn.BatchNorm2d): def forward(self, x): return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps) def group_norm(features): return nn.GroupNorm(4, features) class Backbone(nn.Module, ABC_Model): def __init__(self, model_name, num_classes=20, mode='fix', segmentation=False): super().__init__() self.mode = mode if self.mode == 'fix': self.norm_fn = FixedBatchNorm else: self.norm_fn = nn.BatchNorm2d if 'resnet' in model_name: self.model = resnet.ResNet(resnet.Bottleneck, resnet.layers_dic[model_name], strides=(2, 2, 2, 1), batch_norm_fn=self.norm_fn) state_dict = model_zoo.load_url(resnet.urls_dic[model_name]) state_dict.pop('fc.weight') state_dict.pop('fc.bias') self.model.load_state_dict(state_dict) else: if segmentation: dilation, dilated = 4, True else: dilation, dilated = 2, False self.model = eval("resnest." + model_name )(pretrained=False, dilated=dilated, dilation=dilation, norm_layer=self.norm_fn) del self.model.avgpool del self.model.fc self.stage1 = nn.Sequential(self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool) self.stage2 = nn.Sequential(self.model.layer1) self.stage3 = nn.Sequential(self.model.layer2) self.stage4 = nn.Sequential(self.model.layer3) self.stage5 = nn.Sequential(self.model.layer4) class Classifier(Backbone): def __init__(self, model_name, num_classes=20, mode='fix'): super().__init__(model_name, num_classes, mode) self.classifier = nn.Conv2d(2048, num_classes, 1, bias=False) self.num_classes = num_classes self.initialize([self.classifier]) def forward(self, x, with_cam=False): x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.stage5(x) if with_cam: features = self.classifier(x) logits = self.global_average_pooling_2d(features) return logits, features else: x = self.global_average_pooling_2d(x, keepdims=True) logits = self.classifier(x ).view(-1, self.num_classes) return logits<split>
search_results = tuner.search(X_train_val, y_train_val, epochs=10, validation_split=1/6, batch_size=100 )
Digit Recognizer
11,517,111
def get_separate_labels(ims, model, model_for_labels_state, ims_per_batch, verbose=False): bs = ims_per_batch model.load_state_dict(model_for_labels_state["model_state_dict"]) with torch.no_grad() : image_batch = copy.deepcopy(ims) image_batch = image_batch.float() image_batch_augs_fl2 = image_batch.flip(2) image_batch_augs_fl3 = image_batch.flip(3) image_batch_augs_fl32 = torch.flip(image_batch,(3,2)) images = torch.cat([image_batch, image_batch_augs_fl2, image_batch_augs_fl3, image_batch_augs_fl32], dim=0) images = images.cuda() print(images.shape) with torch.cuda.amp.autocast() : logits = model(images, with_cam=True) logits = logits.reshape(bs*4//bs//4, 4, bs, 19 ).mean(1 ).view(bs*4//4, 19) labels = torch.sigmoid(logits) if verbose: print("LABELS FROM LABELS MODEL TORCH") print(labels) return labels.cpu().numpy() def get_separate_labels_tf(ims, model, verbose=False, name=None): bs = ims_per_batch image_batch = copy.deepcopy(ims) image_batch_fl_lr = tf.image.flip_left_right(image_batch) image_batch_fl_up = tf.image.flip_up_down(image_batch) image_batch_fl_up_lr = tf.image.flip_up_down(image_batch_fl_lr) ims = tf.concat([image_batch, image_batch_fl_lr, image_batch_fl_up, image_batch_fl_up_lr], axis=0) labels = model.predict(ims, verbose=1) labels = labels.reshape(bs*4//bs//4, 4, bs, 19 ).mean(1) if verbose: print(f"LABELS FROM LABELS MODEL {name}") print(labels) return labels[0] <set_options>
best_model = tuner.get_best_models(num_models=1)[0] best_model.summary() print(best_model.evaluate(X_test, y_test))
Digit Recognizer
11,517,111
sys.path.append(".. /input/faustomorales-vitkeras/vit_keras/") CONFIG_B = { "dropout": 0.1, "mlp_dim": 3072, "num_heads": 12, "num_layers": 12, "hidden_size": 768, } class TransformerBlock(tf.keras.layers.Layer): def __init__(self, *args, num_heads=12, mlp_dim=3072, dropout=0.1, **kwargs): super().__init__(*args, **kwargs) self.num_heads = num_heads self.mlp_dim = mlp_dim self.dropout = dropout def build(self, input_shape): self.att = MultiHeadSelfAttention( num_heads=self.num_heads, name="MultiHeadDotProductAttention_1", ) self.mlpblock = tf.keras.Sequential( [ tf.keras.layers.Dense( self.mlp_dim, activation="linear", name=f"{self.name}/Dense_0", ), tf.keras.layers.Lambda( lambda x: tf.keras.activations.gelu(x, approximate=False) ) if hasattr(tf.keras.activations, "gelu") else tf.keras.layers.Lambda( lambda x: tfa.activations.gelu(x, approximate=False) ), tf.keras.layers.Dropout(self.dropout), tf.keras.layers.Dense(input_shape[-1], name=f"{self.name}/Dense_1"), tf.keras.layers.Dropout(self.dropout), ], name="MlpBlock_3", ) self.layernorm1 = tf.keras.layers.LayerNormalization( epsilon=1e-6, name="LayerNorm_0" ) self.layernorm2 = tf.keras.layers.LayerNormalization( epsilon=1e-6, name="LayerNorm_2" ) self.dropout = tf.keras.layers.Dropout(self.dropout) def call(self, inputs, training): x = self.layernorm1(inputs) x, weights = self.att(x) x = self.dropout(x, training=training) x = x + inputs y = self.layernorm2(x) y = self.mlpblock(y) return x + y, weights class MultiHeadSelfAttention(tf.keras.layers.Layer): def __init__(self, *args, num_heads=12, **kwargs): super().__init__(*args, **kwargs) self.num_heads = num_heads def build(self, input_shape): hidden_size = input_shape[-1] num_heads = self.num_heads if hidden_size % num_heads != 0: raise ValueError( f"embedding dimension = {hidden_size} should be divisible by number of heads = {num_heads}" ) self.hidden_size = hidden_size self.projection_dim = hidden_size // num_heads self.query_dense = tf.keras.layers.Dense(hidden_size, name="query") self.key_dense = tf.keras.layers.Dense(hidden_size, name="key") self.value_dense = tf.keras.layers.Dense(hidden_size, name="value") self.combine_heads = tf.keras.layers.Dense(hidden_size, name="out") def attention(self, query, key, value): score = tf.matmul(query, key, transpose_b=True) dim_key = tf.cast(tf.shape(key)[-1], score.dtype) scaled_score = score / tf.math.sqrt(dim_key) weights = tf.nn.softmax(scaled_score, axis=-1) output = tf.matmul(weights, value) return output, weights def separate_heads(self, x, batch_size): x = tf.reshape(x,(batch_size, -1, self.num_heads, self.projection_dim)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs): batch_size = tf.shape(inputs)[0] query = self.query_dense(inputs) key = self.key_dense(inputs) value = self.value_dense(inputs) query = self.separate_heads(query, batch_size) key = self.separate_heads(key, batch_size) value = self.separate_heads(value, batch_size) attention, weights = self.attention(query, key, value) attention = tf.transpose(attention, perm=[0, 2, 1, 3]) concat_attention = tf.reshape(attention,(batch_size, -1, self.hidden_size)) output = self.combine_heads(concat_attention) return output, weights <train_on_grid>
def schedule(epoch, lr): return 10**(-(epoch//10)-3) lr_schedule = tf.keras.callbacks.LearningRateScheduler(schedule, verbose=1) cb_checkpointer_val = ModelCheckpoint(filepath = '.. /working/best_val.hdf5', monitor = 'val_accuracy', save_best_only = True, mode = 'auto' )
Digit Recognizer
11,517,111
if CFG.resnest: model_resnest = Classifier(CFG.model_name_resnest, CFG.classes, mode="normal") if CFG.color_mode == "rgby": weight = model_resnest.model.conv1[0].weight.clone() model_resnest.model.conv1[0] = nn.Conv2d(4, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) with torch.no_grad() : model_resnest.model.conv1[0].weight[:, :3] = weight model_resnest.model.conv1[0].weight[:, 3] = model_resnest.model.conv1[0].weight[:, 0] model_resnest.to(device) model_resnest.eval() model_states_resnet = [torch.load(MODEL_PATH + f"{model}")for model in MODELS_LIST_RESNEST] if CFG.effnet: model_effnet = Classifier_EffNet(CFG.model_name_effnet) if CFG.color_mode == "rgby": model_effnet.enet._conv_stem.in_channels = 4 model_effnet.enet._conv_stem.weight = torch.nn.Parameter(torch.cat([model_effnet.enet._conv_stem.weight, model_effnet.enet._conv_stem.weight[:, 0:1, :, :]], axis=1)) model_effnet.to(device) model_effnet.eval() model_states_effnet = [torch.load(MODEL_PATH + f"{model}")for model in MODELS_LIST_EFFNET] if CFG.extra_model_for_labels: if CFG.extra_model_is_tf: model_green_eff = tf.keras.models.load_model(MODEL_LABELS_EFF) model_green_rn = tf.keras.models.load_model(MODEL_LABELS_RN) model_green_vit = tf.keras.models.load_model(MODEL_LABELS_VIT, custom_objects={ 'ClassToken': layers.ClassToken, 'AddPositionEmbs' : layers.AddPositionEmbs, 'TransformerBlock' : TransformerBlock, 'MultiHeadSelfAttention' : MultiHeadSelfAttention}) else: model_for_labels_state = torch.load(MODEL_LABELS) model_green = Classifier_EffNet_GREEN("efficientnet-b7") model_green.to(device) model_green.eval() dl_test = DataLoader(test_data, batch_size=batch_size_, shuffle=False, num_workers=0) <define_variables>
X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val, test_size=1/6 )
Digit Recognizer
11,517,111
def inference_one_batch(batch_cam, batch_ids_cam, batch_seg, ims_per_batch, batch_eff_tf=None, batch_rn_tf_600=None, batch_vit_tf_384=None, show_image=True, show_seg=True, verbose=True): batch_ids_seg = tuple(batch_seg["ID"]) print(batch_ids_cam) print(batch_ids_seg) assert batch_ids_cam == batch_ids_seg, "IDS OF SEGMENTATION AND CAMS DONT MATCH" print(f"GETTING {ims_per_batch} CELL MASKS") cell_mask_list = batch_seg["mask"] cell_mask_sizes = batch_seg["ori_size"] cell_masks_full_size = [] for e,(mask, size)in enumerate(zip(cell_mask_list, cell_mask_sizes)) : cell_masks_full_size.append(resize_full_mask(mask,size)) res_cell_masks = [resize_mask(cell_mask)for cell_mask in cell_mask_list] if show_seg: show_seg_cells(batch_cam, res_cell_masks, batch_ids_seg, batch_cam) del cell_mask_list cell_masks_fullsize_enc_list = [] for cell_mask in cell_masks_full_size: cell_masks_fullsize_enc = get_all_encoded_cells(cell_mask) cell_masks_fullsize_enc_list.append(cell_masks_fullsize_enc) del cell_masks_full_size print(f"ENCODED {len(cell_masks_fullsize_enc_list)} CELL MASKS") labels_model_eff = get_separate_labels_tf(batch_eff_tf, model_green_eff, verbose=verbose, name=MODEL_LABELS_EFF) labels_model_rn = get_separate_labels_tf(batch_rn_tf_600, model_green_rn, verbose=verbose, name=MODEL_LABELS_RN) labels_model_vit = get_separate_labels_tf(batch_vit_tf_384, model_green_vit, verbose=verbose, name=MODEL_LABELS_VIT) labels_model = labels_model_eff*CFG.split_image_level[0] + labels_model_rn*CFG.split_image_level[1] + labels_model_vit*CFG.split_image_level[2] print("LABELS FROM LABEL MODEL") print(labels_model) print("RESIZE IMAGES") batch_cam_scaled = [] scales = [1.0, 1.3, 1.6] st = time.time() for i, scale in enumerate(scales): image_batch_pil = torch.from_numpy(np.zeros(( ims_per_batch, 4, round(CFG.size*scale), round(CFG.size*scale)))) image_batch = copy.deepcopy(batch_cam) for j, im in enumerate(image_batch): im = ToPILImage()(im) im = im.resize(( round(CFG.size*scale), round(CFG.size*scale)) , resample=PIL.Image.BICUBIC) im = torchvision.transforms.functional.to_tensor(im)*255 image_batch_pil[j] = im image_batch_resized = image_batch_pil.float() image_batch_augs_fl2 = image_batch_resized.flip(2) image_batch_augs_fl3 = image_batch_resized.flip(3) image_batch_augs_fl32 = torch.flip(image_batch_resized,(3,2)) images = torch.cat([image_batch_resized, image_batch_augs_fl2, image_batch_augs_fl3, image_batch_augs_fl32], dim=0) images = images.cuda() batch_cam_scaled.append(images) print(f"TIME FOR RESIZING WITH PIL {time.time() - st}") if CFG.resnest: print("GETTING CAMS AND PREDS RESNEST") mask_probas_resnest_folds = [] folds_resnest = len(MODELS_LIST_RESNEST) all_ims_resnest = [] time_spent_mask_probas = [] for f, model_state_rn in enumerate(model_states_resnet): all_hr_cams, sig_labels = get_hrcams_vis(batch_cam_scaled, show_image, model_resnest, model_state_rn, scales, ims_per_batch) mask_probas_resnest_batches = [] print(f"GETTING MASKS PROBAS FOLD {f}") for b,(cams, mask, sig_label, label_model)in enumerate(zip(all_hr_cams, res_cell_masks, sig_labels, labels_model)) : print(f"GETTING MASKS PROBAS BATCH {b}") mask_probas_resnest = get_prob_from_cams_masks(cams, mask, sig_label, label_model, verbose, typ=f"resnest{f}") if verbose: print(mask_probas_resnest) mask_probas_resnest_batches.append(mask_probas_resnest) mask_probas_resnest_folds.append(mask_probas_resnest_batches) for b in range(ims_per_batch): all_ims_resnest.append(np.mean(np.array([mask_probas_resnest_folds[i][b] for i in range(folds_resnest)]), axis=0)) if CFG.effnet: print("GETTING CAMS AND PREDS EFFICIENTNET") mask_probas_effnet_folds = [] folds_effnet = len(MODELS_LIST_EFFNET) all_ims_effnet = [] time_spent_mask_probas = [] for f, model_state_eff in enumerate(model_states_effnet): all_hr_cams, sig_labels = get_hrcams_vis(batch_cam_scaled, show_image, model_effnet, model_state_eff, scales, ims_per_batch) mask_probas_effnet_batches = [] print(f"GETTING MASKS PROBAS FOLD {f}") for b,(cams, mask, sig_label, label_model)in enumerate(zip(all_hr_cams, res_cell_masks, sig_labels, labels_model)) : print(f"GETTING MASKS PROBAS BATCH {b}") s_t = time.time() mask_probas_effnet = get_prob_from_cams_masks(cams, mask, sig_label, label_model, verbose, typ=f"effnet{f}") if verbose: print(mask_probas_effnet) mask_probas_effnet_batches.append(mask_probas_effnet) mask_probas_effnet_folds.append(mask_probas_effnet_batches) for b in range(ims_per_batch): all_ims_effnet.append(np.mean(np.array([mask_probas_effnet_folds[i][b] for i in range(folds_effnet)]), axis=0)) if CFG.resnest and CFG.effnet: mask_probas = CFG.split_cam_level[0]*np.array(all_ims_effnet)+ CFG.split_cam_level[1]*np.array(all_ims_resnest) elif CFG.resnest and not CFG.effnet: mask_probas = all_ims_resnest elif CFG.effnet and not CFG.resnest: mask_probas = all_ims_effnet if verbose: print(mask_probas) return batch_ids_cam, sizes, cell_masks_fullsize_enc_list, mask_probas<set_options>
fit_history = best_model.fit(X_train, y_train, epochs=40, batch_size=100, validation_data=(X_val, y_val), callbacks = [lr_schedule, cb_checkpointer_val] )
Digit Recognizer
11,517,111
libc = ctypes.CDLL("libc.so.6") libc.malloc_trim(0) gc.collect() gc.collect() gc.collect() torch.cuda.empty_cache() torch.cuda.empty_cache() torch.cuda.empty_cache() torch.cuda.empty_cache()<define_variables>
best_model.load_weights('./best_val.hdf5') best_model.evaluate(X_test, y_test )
Digit Recognizer
11,517,111
if CFG.is_demo: index = 0 batch0, ids0 = next(itertools.islice(dl_test, index, None)) if CFG.extra_model_is_tf: batch_tf_green = next(itertools.islice(dtest_tf_green_600, index, None)) batch_tf_rgb_600 = next(itertools.islice(dtest_tf_rgb_600, index, None)) batch_tf_rgb_384 = next(itertools.islice(dtest_tf_ggg_384, index, None)) print(batch_tf_green.shape) print(batch_tf_rgb_600.shape) print(batch_tf_rgb_384.shape) <split>
cb_checkpointer = ModelCheckpoint(filepath = '.. /working/best.hdf5', monitor = 'accuracy', save_best_only = True, mode = 'auto' )
Digit Recognizer
11,517,111
df = pd.DataFrame(columns=["image_id", "pred"]) i = 0 start_time = time.time() ims_done = 0 for i,(( batch_cam, batch_ids_cam),(batch_tf_green_600),(batch_tf_rgb_600),(batch_tf_ggg_384)) in enumerate(zip(dl_test, dtest_tf_green_600, dtest_tf_rgb_600, dtest_tf_ggg_384)) : ims_per_batch = len(batch_ids_cam) batch_seg = cell_masks_df[i*batch_size_ : i*batch_size_ + batch_size_] ids, sizes, cell_masks_fullsize_enc_list, probas = inference_one_batch(batch_cam, batch_ids_cam, batch_seg, ims_per_batch, batch_eff_tf=batch_tf_green_600, batch_rn_tf_600=batch_tf_rgb_600, batch_vit_tf_384=batch_tf_ggg_384, show_image=False, show_seg=False, verbose=False) for id, proba, cell_mask_enc in zip(ids, probas, cell_masks_fullsize_enc_list): pred_string = get_pred_string(proba, cell_mask_enc) d = {"image_id":id, "pred": pred_string} df = df.append(d, ignore_index=True) ims_done += ims_per_batch print(f"---{ims_done} IMAGES DONE---") torch.cuda.empty_cache() gc.collect() print("--- %s seconds ---" %(time.time() - start_time)) <merge>
final_fit_history = best_model.fit(X, y, epochs=40, batch_size=100, callbacks = [lr_schedule, cb_checkpointer] )
Digit Recognizer
11,517,111
sub = pd.merge( data_df, df, how="left", left_on='ID', right_on='image_id', ) def isNaN(num): return num != num for i, row in sub.iterrows() : if isNaN(row['pred']): continue sub.PredictionString.loc[i] = row['pred'] <save_to_csv>
best_model.load_weights('./best.hdf5' )
Digit Recognizer
11,517,111
if all(df_from_files == data_df_sample_submission): sub.to_csv("submission.csv",index=False) <install_modules>
best_model.evaluate(X, y )
Digit Recognizer
11,517,111
!pip install.. /input/kerasapplications/keras-team-keras-applications-3b180cb -f./ --no-index -q !pip install.. /input/efficientnet/efficientnet-1.1.0/ -f./ --no-index -q<feature_engineering>
pred = pd.DataFrame({'ImageId' : np.arange(test.shape[0])+ 1, 'Label': best_model.predict(test ).argmax(axis=-1)}) pred.to_csv('pred.csv', index=False) pred
Digit Recognizer
11,226,330
os.environ['SM_FRAMEWORK'] = 'tf.keras' <load_pretrained>
pip install livelossplot
Digit Recognizer
11,226,330
MODEL_PATH = '.. /input/train-fpn-segmentation-model-no-43/' with open(MODEL_PATH+'hparams.json')as json_file: hparams = json.load(json_file) hparams<normalization>
%matplotlib inline
Digit Recognizer
11,226,330
IMG_SIZE = hparams['IMG_SIZE'] SCALE_FACTOR = hparams['SCALE_FACTOR'] K_SPLITS = hparams['K_SPLITS'] def read_tif_file(fname): img = io.imread(fname) img = np.squeeze(img) if img.shape[0] == 3: img = img.swapaxes(0,1) img = img.swapaxes(1,2) return img def map_img2file(fname): img = read_tif_file(fname) dims = np.array(img.shape) ch = 1 if len(dims)== 2 else dims[2] for i in range(ch): f = np.memmap('img{}.dat'.format(i), dtype=np.uint8, mode='w+', shape=(dims[0], dims[1])) f[:] = img[:,:,i] if ch > 1 else img[:,:] del f return dims def get_patch_from_file(dims, pos, psize): ch = 1 if len(dims)== 2 else dims[2] patch = np.zeros([psize[0], psize[1]], dtype=np.uint8)if ch == 1 else np.zeros([psize[0], psize[1], ch], dtype=np.uint8) for i in range(ch): f = np.memmap('img{}.dat'.format(i), dtype=np.uint8, mode='r', shape=(dims[0], dims[1])) p = f[pos[0]:pos[0]+psize[0], pos[1]:pos[1]+psize[1]] crop = p.shape if ch == 1: patch[0:p.shape[0], 0:p.shape[1]] = p else: patch[0:p.shape[0], 0:p.shape[1],i] = p del f return patch, crop<categorify>
train_set = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_set = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') img_col = 28 img_row = 28
Digit Recognizer
11,226,330
def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs )<feature_engineering>
X_train_df = train_set.drop(['label'],axis = 1) y_train_df = train_set['label'] X_test_df = test_set X_tr = np.asarray(X_train_df)/255 y_tr = np.asarray(y_train_df) X_te = np.asarray(X_test_df)/255 print(type(X_tr)) print(X_tr.shape) print(X_te.shape)
Digit Recognizer
11,226,330
def create_TTA_batch(img): if len(img.shape)< 4: img = np.expand_dims(img, 0) batch=np.zeros(( img.shape[0]*8,img.shape[1],img.shape[2],img.shape[3]), dtype=np.float32) for i in range(img.shape[0]): orig = tf.keras.preprocessing.image.img_to_array(img[i,:,:,:])/255. batch[i*8,:,:,:] = orig batch[i*8+1,:,:,:] = np.rot90(orig, axes=(0, 1), k=1) batch[i*8+2,:,:,:] = np.rot90(orig, axes=(0, 1), k=2) batch[i*8+3,:,:,:] = np.rot90(orig, axes=(0, 1), k=3) orig = orig[:, ::-1] batch[i*8+4,:,:,:] = orig batch[i*8+5,:,:,:] = np.rot90(orig, axes=(0, 1), k=1) batch[i*8+6,:,:,:] = np.rot90(orig, axes=(0, 1), k=2) batch[i*8+7,:,:,:] = np.rot90(orig, axes=(0, 1), k=3) return batch def mask_TTA(masks): batch=np.zeros(( masks.shape[0],masks.shape[1],masks.shape[2],masks.shape[3]), dtype=np.float32) for i in range(masks.shape[0]//8): batch[i*8,:,:,:] = masks[i*8] batch[i*8+1,:,:,:] = np.rot90(masks[i*8+1], axes=(0, 1), k=3) batch[i*8+2,:,:,:] = np.rot90(masks[i*8+2], axes=(0, 1), k=2) batch[i*8+3,:,:,:] = np.rot90(masks[i*8+3], axes=(0, 1), k=1) batch[i*8+4,:,:,:] = masks[i*8+4][:, ::-1] batch[i*8+5,:,:,:] = np.rot90(masks[i*8+5], axes=(0, 1), k=3)[:, ::-1] batch[i*8+6,:,:,:] = np.rot90(masks[i*8+6], axes=(0, 1), k=2)[:, ::-1] batch[i*8+7,:,:,:] = np.rot90(masks[i*8+7], axes=(0, 1), k=1)[:, ::-1] return(batch )<save_to_csv>
train_set['label'].value_counts().sort_index()
Digit Recognizer
11,226,330
MEANING_OF_LIFE = 42 MEANING_OF_LIFE_REV = int(str(MEANING_OF_LIFE)[::-1]) PATH = '.. /input/hubmap-kidney-segmentation/test/' filelist = glob.glob(PATH+'*.tiff') if len(filelist)== 5: filelist = filelist[:1] SUB_FILE = './submission.csv' with open(SUB_FILE, 'w')as f: f.write("id,predicted ") MODELS = [MODEL_PATH+'FPN-model-2'] s_th = MEANING_OF_LIFE+K_SPLITS p_th = IMG_SIZE*IMG_SIZE//32 size = int(IMG_SIZE * SCALE_FACTOR) OVERLAP = size//2 STEP = size-OVERLAP for file in filelist: fid = file.replace('\','.' ).replace('/','.' ).split('.')[-2] print(fid) dims = map_img2file(file) pmask = np.zeros(dims[:2], dtype=np.uint8) x_shft, y_shft = 0,0 if fid == 'afa5e8098': x_shft, y_shft = MEANING_OF_LIFE, MEANING_OF_LIFE_REV for modl in range(len(MODELS)) : print(MODELS[modl]) mname = MODELS[modl] with open(mname+'.json', 'r')as m: lm = m.read() model = model_from_json(lm) model.load_weights(mname+'.h5') for x in range(( dims[0]-OVERLAP-x_shft)//STEP + min(1,(dims[0]-OVERLAP-x_shft)% STEP)) : for y in range(( dims[1]-OVERLAP-y_shft)//STEP + min(1,(dims[1]-OVERLAP-y_shft)% STEP)) : tile, crop = get_patch_from_file(dims, [x*STEP+x_shft, y*STEP+y_shft], [size,size]) patch = cv2.resize(tile, dsize=(IMG_SIZE, IMG_SIZE), interpolation = cv2.INTER_AREA) _, s, _ = cv2.split(cv2.cvtColor(patch, cv2.COLOR_BGR2HSV)) if(s>s_th ).sum() > p_th: batch = create_TTA_batch(patch) preds = model.predict(batch) pred = mask_TTA(preds) mask = np.rint(np.sum(pred, axis=0)) pint =cv2.resize(mask.astype(int), dsize=(size, size), interpolation = cv2.INTER_NEAREST) pmask[x*STEP:x*STEP+crop[0], y*STEP:y*STEP+crop[1]] += pint[0:crop[0], 0:crop[1]].astype(np.uint8) pmask = pmask >= MEANING_OF_LIFE_REV - K_SPLITS with open(SUB_FILE, 'a')as f: f.write("{},".format(fid)) f.write(rle_encode_less_memory(pmask)) f.write(" " )<set_options>
X_train_f = X_tr.reshape(42000,img_col,img_row,1) X_test_f= X_te.reshape(28000,img_col,img_row,1) y_train_f = to_categorical(y_tr) y_train_f.shape[1]
Digit Recognizer
11,226,330
%rm -f *.dat<import_modules>
model = Sequential() model.add(Conv2D(64,(4,4),padding = 'valid', input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(2,2),padding = 'same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(2,2),padding = 'same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(Dense(128)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(64)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(64)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(10 , activation = 'softmax'))
Digit Recognizer
11,226,330
<save_to_csv>
init_lr = 1e-2 decay_steps = 150 alpha = 1e-5 beta = 1e-8 num_periods=4 lin_cos_dec1 = tf.keras.experimental.LinearCosineDecay(init_lr, decay_steps, num_periods=num_periods, alpha=alpha, beta=beta, name='LinCosDec 1' )
Digit Recognizer
11,226,330
submit_file = '.. /input/hubmaplocal/FrogUnetR34_ASPP_AttDecode_final_thr0.4.csv' submission = pd.read_csv(submit_file, index_col='id') sample_sub = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv', index_col='id') pub_ids = submission.index.values predictions = submission.values sample_sub.loc[pub_ids] = predictions sample_sub.to_csv('submission.csv') print('done!') print(sample_sub )<install_modules>
opt = Adam(lr=0.00005) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
Digit Recognizer
11,226,330
!pip install.. /input/packages/pretrainedmodels-0.7.4-py3-none-any.whl !pip install.. /input/segmentationmodelspytorch/segmentation_models/timm-0.1.20-py3-none-any.whl !pip install.. /input/packages/efficientnet_pytorch-0.6.3-py2.py3-none-any.whl !pip install.. /input/segmentationmodelspytorch/segmentation_models/segmentation_models_pytorch-0.1.2-py3-none-any.whl clear_output()<install_modules>
epochs = 150 batch_size = 64 X_train, X_val, y_train, y_val = train_test_split(X_train_f, y_train_f, test_size=0.3 , random_state = 5) image_gen = ImageDataGenerator(rotation_range = 25 ,shear_range = 0.25,zoom_range = [1.25,0.75],width_shift_range= 0.1,height_shift_range=0.1) image_gen2 = ImageDataGenerator() train_batches = image_gen.flow(X_train,y_train,batch_size = batch_size) val_batches =image_gen2.flow(X_val,y_val,batch_size = batch_size )
Digit Recognizer
11,226,330
!pip install git+https://github.com/qubvel/segmentation_models.pytorch<load_from_csv>
steps_per_epoch = train_batches.n//train_batches.batch_size validation_steps = val_batches.n//val_batches.batch_size reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75,patience=3, min_lr=0.00001, mode='auto',verbose=1) callbacks = [PlotLossesKerasTF() , reduce_lr]
Digit Recognizer
11,226,330
sample_submission = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv') sample_submission = sample_submission.set_index('id') seed = 1015 np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) test_files = sample_submission.index.tolist()<import_modules>
history=model.fit_generator(generator=train_batches, steps_per_epoch = steps_per_epoch, epochs=epochs, validation_data=val_batches, validation_steps=validation_steps , callbacks=callbacks )
Digit Recognizer
11,226,330
<choose_model_class><EOS>
predictions = model.predict_classes(X_test_f, verbose=0) submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("mysub5.csv", index=False, header=True )
Digit Recognizer
11,492,746
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_pretrained>
activations, regularizers, Sequential , utils, callbacks, optimizers ) Flatten, Dense, BatchNormalization , Activation, Dropout, Conv2D, MaxPool2D )
Digit Recognizer
11,492,746
PATH = ".. /input/hubmap-models2/" model_names = [ "1_unet-timm-effb7_0.9509_epoch_28.pth", "2_unet-timm-effb7_0.9488_epoch_28.pth", "3_unet-timm-effb7_0.9503_epoch_29.pth", "4_unet-timm-effb7_0.9500_epoch_28.pth", "5_unet-timm-effb7_0.9518_epoch_27.pth", ] models = [] for model_name in model_names: models.append(torch.load(PATH + model_name, map_location= 'cpu'))<define_variables>
BASEPATH = '.. /input/digit-recognizer/' def reload(df='train.csv', msg=True, path=BASEPATH): o = pd.read_csv(BASEPATH + df) print(f'{df} loaded!') return o train = reload() y_train = train.label.copy() y_train = utils.to_categorical(y_train, 10) X_train = train.drop(columns='label') def preprocess(df): df = df / 256.0 df = df.values.reshape(-1, 28, 28, 1) return df X_train = preprocess(X_train) test = reload('test.csv') X_test = preprocess(test )
Digit Recognizer
11,492,746
sz = 512 test_path = '.. /input/hubmap-kidney-segmentation/test/' for step, person_idx in enumerate(test_files): print(f'load {step+1}/{len(test_files)} data...') img = tiff.imread(test_path + person_idx + '.tiff' ).squeeze() if img.shape[0] == 3: img = img.transpose(1,2,0) predict_mask_l1 = np.zeros(( img.shape[0], img.shape[1]), dtype = bool) landscape =img.shape[0]// 512 portrait = img.shape[1]// 512 sz = 512 print('predict mask...') for x in tqdm(range(landscape)) : for y in range(portrait): start_x =(512)* x end_x =(1024)+ start_x start_y =(512)* y end_y =(1024)+ start_y if x == landscape-1: start_x = img.shape[0] - 1024 end_x = img.shape[0] if y == portrait-1: start_y = img.shape[1] - 1024 end_y = img.shape[1] sample_img = img[start_x : end_x, start_y : end_y,:] sample_img = cv2.resize(sample_img,(sz,sz),interpolation = cv2.INTER_AREA)/256 sample_img = torch.cuda.FloatTensor(sample_img.transpose([2,0,1])[np.newaxis,...]) sample_pred = np.zeros([512,512]) for model in models: sample_pred += model.predict(sample_img ).cpu().numpy() [0,0,:,:] sample_pred = sample_pred/len(models) sample_pred = cv2.resize(sample_pred,(1024,1024),interpolation = cv2.INTER_NEAREST) sample_pred = np.where(sample_pred > 0.3, True, False ).astype(bool) predict_mask_l1[start_x + 256 : end_x - 256, start_y + 256 : end_y - 256] = sample_pred[256:256 + 512,256:256 + 512] del sample_img del sample_pred gc.collect() predict_mask_l1 = predict_mask_l1.astype(np.uint8) contours, hierarchy = cv2.findContours(predict_mask_l1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) predict_mask_l2 = np.zeros(( img.shape[0], img.shape[1]), dtype = bool) for cont in tqdm(contours): center_y, center_x = cont.mean(axis = 0 ).round(0 ).astype(int)[0] left_x = int(center_x - 512) top_y = int(center_y - 512) if left_x < 0: left_x = 0 elif left_x + 1024 > img.shape[0]: left_x = img.shape[0] - 1024 if top_y < 0: top_y = 0 elif top_y + 1024 > img.shape[1]: top_y = img.shape[1] - 1024 sample_img_l2 = img[left_x : left_x + 1024, top_y : top_y+ 1024,:] sample_img_l2 = cv2.resize(sample_img_l2,(sz,sz),interpolation = cv2.INTER_AREA)/256 sample_img_l2 = torch.cuda.FloatTensor(sample_img_l2.transpose([2,0,1])[np.newaxis,...]) sample_pred_l2 = np.zeros([512,512]) for model in models: sample_pred_l2 += model.predict(sample_img_l2 ).cpu().numpy() [0,0,:,:] sample_pred_l2 = sample_pred_l2/len(models) sample_pred_l2 = cv2.resize(sample_pred_l2,(1024,1024),interpolation = cv2.INTER_NEAREST) sample_pred_l2 = np.where(sample_pred_l2 > 0.5, True, False ).astype(np.uint8) contours_l2, hierarchy = cv2.findContours(sample_pred_l2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours_l2)< 1: print('no contour') continue for cont_l2 in contours_l2: min_y, min_x = cont_l2.min(axis = 0 ).round(0 ).astype(int)[0] max_y, max_x = cont_l2.max(axis = 0 ).round(0 ).astype(int)[0] if(min_x < 512)and(max_x > 512): if(min_y < 512)and(max_y > 512): sample_mask_l2 = np.zeros(sample_pred_l2.shape, dtype = np.uint8) sample_center = cv2.drawContours(sample_mask_l2, [cont_l2], 0, (255, 255, 255), -1) predict_mask_l2[left_x : left_x + 1024, top_y : top_y+ 1024] =\ np.logical_or(predict_mask_l2[left_x : left_x + 1024, top_y : top_y+ 1024], sample_center) del predict_mask_l1 del img gc.collect() print('convert mask to rle ') predict_rle = rle_encode_less_memory(predict_mask_l2) sample_submission.loc[person_idx,'predicted'] = predict_rle del predict_rle del predict_mask_l2 gc.collect() sample_submission = sample_submission.reset_index() sample_submission.to_csv('/kaggle/working/submission.csv',index=False )<install_modules>
l1 = 0 l2 = 0.01 ini_lr = 0.001 val_size = 0.3 batch_size = 30 activation = 'relu' if activation == 'selu': initializer = 'lecun_normal' elif activation in ['relu', 'elu']: initializer = 'he_normal' else: initializer = 'glorot_normal' sched_lr_val_acc = True decay_rate = 0.97 X_tr, X_val, y_tr, y_val = train_test_split( X_train, y_train, test_size=val_size, random_state=713) n_train_val_samples = X_tr.shape[0] n_train_samples = X_tr.shape[0] + X_val.shape[0] def generate_tfdata(X, y=None, batch_size=batch_size , shuffle_repeat=True): if y is None: data = Dataset.from_tensor_slices(( X,)) else: data = Dataset.from_tensor_slices(( X, y)) if shuffle_repeat: data = data.shuffle(10000, 713, reshuffle_each_iteration=True ).repeat() if batch_size: data = data.batch(batch_size) return data.prefetch(1) tr_set = generate_tfdata(X_tr, y_tr) val_set = generate_tfdata(X_val, y_val, shuffle_repeat=False) train_set = generate_tfdata(X_train, y_train) test_set = generate_tfdata(X_test, shuffle_repeat=False )
Digit Recognizer
11,492,746
!pip install.. /input/keras-applications/Keras_Applications-1.0.8/ -f./ --no-index !pip install.. /input/image-classifiers/image_classifiers-1.0.0/ -f./ --no-index !pip install.. /input/efficientnet-1-0-0/efficientnet-1.0.0/ -f./ --no-index !pip install.. /input/segmentation-models/segmentation_models-1.0.1/ -f./ --no-index<define_variables>
def get_regularizer(l1=l1, l2=l2): return regularizers.l1_l2(l1=l1, l2=l2) def get_optimizer(lr=ini_lr, beta_1=0.9, beta_2=0.999 , decay_rate=decay_rate , n_samples=n_train_samples, batch_size=batch_size): if sched_lr_val_acc == False: lr = optimizers.schedules.ExponentialDecay( lr , decay_steps = n_samples // batch_size // 2 , decay_rate = decay_rate ) return optimizers.Adam(lr, beta_1=beta_1, beta_2=beta_2) def get_callbacks(es_params=['val_loss', 1e-3, 15] , lr_params=['val_accuracy', 0.001, 5]): cb = [] cb.append(callbacks.EarlyStopping( monitor=es_params[0], min_delta=es_params[1], patience=es_params[2] , verbose=1, restore_best_weights=True )) if sched_lr_val_acc: cb.append(callbacks.ReduceLROnPlateau( monitor=lr_params[0], factor=0.5, patience=lr_params[2] , min_delta=lr_params[1], min_lr=ini_lr*0.1**5 )) cb.append(callbacks.TerminateOnNaN()) return cb DefaultConv2D = partial(Conv2D , kernel_size=3 , strides=1 , activation=activation , kernel_initializer=initializer , padding='SAME') DefaultMaxPool2D = partial(MaxPool2D , pool_size=2 , strides=2) DefaultDense = partial(Dense , activation=activation , kernel_initializer=initializer , kernel_regularizer=get_regularizer()) def get_model(compiling=True, n_samples=n_train_samples, batch_size=batch_size , activation=activation, initializer=initializer, p=0.5): model = Sequential( [DefaultConv2D(filters=32, kernel_size=5, input_shape=(28,28,1)) , BatchNormalization() , DefaultMaxPool2D() , DefaultConv2D(filters=64) , BatchNormalization() , DefaultConv2D(filters=64) , BatchNormalization() , DefaultMaxPool2D() , DefaultConv2D(filters=128) , BatchNormalization() , DefaultConv2D(filters=128) , BatchNormalization() , DefaultMaxPool2D() , Flatten() , DefaultDense(units=64) , Dropout(p) , DefaultDense(units=32) , Dropout(p) , Dense(units=10, activation='softmax') ] ) optimizer = get_optimizer(n_samples=n_samples, batch_size=batch_size) if compiling: opt = get_optimizer() model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model
Digit Recognizer
11,492,746
%env SM_FRAMEWORK=tf.keras<set_options>
verbose = 1 model = get_model(n_samples=n_train_val_samples) print(model.summary()) epochs = 100 cb = get_callbacks() history = model.fit(tr_set, epochs = epochs , steps_per_epoch = n_train_val_samples // batch_size , validation_data = val_set , callbacks = cb , verbose = verbose )
Digit Recognizer
11,492,746
warnings.filterwarnings('ignore') print('tensorflow version:', tf.__version__) os.environ['CUDA_VISIBLE_DEVICES'] = '0' gpu_devices = tf.config.experimental.list_physical_devices('GPU') if gpu_devices: for gpu_device in gpu_devices: print('device available:', gpu_device) pd.set_option('display.max_columns', None )<define_variables>
y_pred = model.predict(test_set) y_pred = np.argmax(y_pred, axis=1) submission = reload('sample_submission.csv') submission['Label'] = y_pred submission.to_csv('submission.csv', index=False )
Digit Recognizer
11,499,669
TEST = True KAGGLE = True MDLS_FOLDS = {'v39': [0, 2, 3, 4]} if KAGGLE: DATA_PATH = '.. /input/hubmap-kidney-segmentation' MDLS_PATHS = {ver: f'.. /input/kidney-models-{ver}' for ver, _ in MDLS_FOLDS.items() } else: DATA_PATH = './data2' MDLS_PATHS = {ver: f'./models_{ver}' for ver, _ in MDLS_FOLDS.items() } THRESHOLD =.35 VOTERS = 1 TTAS = [0, 1] EXPAND = 4 MIN_OVERLAP = 1 / 16 IDNT = rasterio.Affine(1, 0, 0, 0, 1, 0) STRATEGY = tf.distribute.get_strategy() SUB_PATH = f'{DATA_PATH}/test' if TEST else f'{DATA_PATH}/train' start_time = time.time()<load_pretrained>
train, test = pd.read_csv('.. /input/digit-recognizer/train.csv'), \ pd.read_csv('.. /input/digit-recognizer/test.csv') train.head()
Digit Recognizer
11,499,669
params_dict = {} for ver, _ in MDLS_FOLDS.items() : with open(f'{MDLS_PATHS[ver]}/params.json')as file: params_dict[ver] = json.load(file) for ver, params in params_dict.items() : print('version:', ver, '| loaded params:', params, ' ' )<categorify>
y_train = train['label'] x_train, x_test = train.iloc[:,1:], test
Digit Recognizer
11,499,669
def enc2mask(encs, shape): img = np.zeros(shape[0] * shape[1], dtype=np.uint8) for m, enc in enumerate(encs): if isinstance(enc, np.float)and np.isnan(enc): continue s = enc.split() for i in range(len(s)// 2): start = int(s[2 * i])- 1 length = int(s[2 * i + 1]) img[start : start + length] = 1 + m return img.reshape(shape ).T def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs )<compute_test_metric>
x_train, x_test = x_train / 255., x_test / 255. x_train, x_test = x_train.values.reshape(-1,28,28,1),\ x_test.values.reshape(-1,28,28,1 )
Digit Recognizer
11,499,669
def dice_coef(y_true, y_pred, smooth=1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return(2 * intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) def dice_loss(y_true, y_pred, smooth=1): return(1 - dice_coef(y_true, y_pred, smooth)) def bce_dice_loss(y_true, y_pred): return params['bce_weight'] * binary_crossentropy(y_true, y_pred)+ \ (1 - params['bce_weight'])* dice_loss(y_true, y_pred) def get_model(backbone, input_shape, path, loss_type='bce_dice', umodel='unet', classes=1, lr=.001): if backbone == 'efficientnetb0': weights = f'{path}/efficientnet-b0_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5' elif backbone == 'efficientnetb1': weights = f'{path}/efficientnet-b1_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5' elif backbone == 'efficientnetb2': weights = f'{path}/efficientnet-b2_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5' elif backbone == 'efficientnetb3': weights = f'{path}/efficientnet-b3_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5' elif backbone == 'resnet34': weights = f'{path}/resnet34_imagenet_1000_no_top.h5' else: raise AttributeError('mode parameter error') with STRATEGY.scope() : if loss_type == 'bce_dice': loss = bce_dice_loss elif loss_type == 'bce_jaccard_loss': loss = bce_jaccard_loss else: raise AttributeError('loss mode parameter error') if umodel == 'unet': model = Unet(backbone_name=backbone, encoder_weights=weights, input_shape=input_shape, classes=classes, activation='sigmoid') elif umodel == 'fpn': model = FPN(backbone_name=backbone, encoder_weights=weights, input_shape=input_shape, classes=classes, activation='sigmoid') elif umodel == 'link': model = Linknet(backbone_name=backbone, encoder_weights=weights, input_shape=input_shape, classes=classes, activation='sigmoid') else: raise AttributeError('umodel mode parameter error') model.compile( optimizer=tfa.optimizers.Lookahead( tf.keras.optimizers.Adam(learning_rate=lr) ), loss=loss, metrics=[dice_coef] ) return model<categorify>
y_train = to_categorical(y_train, num_classes=10 )
Digit Recognizer
11,499,669
def make_grid(shape, window=256, min_overlap=32): x, y = shape nx = x //(window - min_overlap)+ 1 x1 = np.linspace(0, x, num=nx, endpoint=False, dtype=np.int64) x1[-1] = x - window x2 =(x1 + window ).clip(0, x) ny = y //(window - min_overlap)+ 1 y1 = np.linspace(0, y, num=ny, endpoint=False, dtype=np.int64) y1[-1] = y - window y2 =(y1 + window ).clip(0, y) slices = np.zeros(( nx, ny, 4), dtype=np.int64) for i in range(nx): for j in range(ny): slices[i, j] = x1[i], x2[i], y1[j], y2[j] return slices.reshape(nx * ny, 4) def flip(img, axis=0): if axis == 1: return img[::-1, :, ] elif axis == 2: return img[:, ::-1, ] elif axis == 3: return img[::-1, ::-1, ] else: return img<define_variables>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=32, kernel_size=(5,5), \ activation='relu', input_shape=(28,28,1)) , tf.keras.layers.MaxPooling2D(pool_size=(2,2)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), \ activation='relu', input_shape=(28,28,1)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), \ activation='relu', input_shape=(28,28,1)) , tf.keras.layers.Dropout (.4), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)) , tf.keras.layers.Flatten() , tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout (.4), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) model.summary()
Digit Recognizer
11,499,669
img_files = [x for x in os.listdir(SUB_PATH)if '.tiff' in x] print('images idxs:', img_files )<create_dataframe>
num_epochs = 50 history = model.fit(x_train, y_train, epochs=num_epochs, callbacks=[ tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6), tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=4) ], validation_split=0.2, verbose=0 )
Digit Recognizer
11,499,669
df_sub = pd.DataFrame(subm ).T df_sub<save_to_csv>
scores = model.evaluate(x_train, y_train, batch_size=32) print(f'Loss: {scores[0]} Accuracy: {scores[1]}' )
Digit Recognizer
11,499,669
df_sub.to_csv('submission.csv', index=False )<set_options>
y_predicted = model.predict(x_test) y_test_labels = np.argmax(y_predicted, axis=1 )
Digit Recognizer
11,499,669
color = sns.color_palette() %matplotlib inline warnings.filterwarnings("ignore") pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 50 )<load_from_csv>
x_test_ids = [(i + 1)for i in range(x_test.shape[0])]
Digit Recognizer
11,499,669
<feature_engineering><EOS>
results = pd.DataFrame({ 'ImageId': x_test_ids, 'Label': y_test_labels }) results.to_csv('submission.csv', index=False )
Digit Recognizer
11,088,069
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<filter>
DEVICE = "TPU" SEED = 8080 FOLDS = 5 FOLD_WEIGHTS = [1./FOLDS]*FOLDS BATCH_SIZE = 256 EPOCHS = 5000 MONITOR = "val_loss" MONITOR_MODE = "min" ES_PATIENCE = 5 LR_PATIENCE = 0 LR_FACTOR = 0.5 EFF_NET = 3 EFF_NET_WEIGHTS = 'noisy-student' LABEL_SMOOTHING = 0.1 VERBOSE = 1
Digit Recognizer
11,088,069
df_train[(df_train['floor'])== 33]<drop_column>
!pip install -q efficientnet >> /dev/null
Digit Recognizer
11,088,069
df_train.drop(df_train.index[7457], inplace=True )<data_type_conversions>
import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.keras.backend as K import efficientnet.tfkeras as efn from sklearn.model_selection import KFold from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
11,088,069
df_train['year'] = df_train['timestamp'].apply(lambda x: x[:4] ).astype(int) df_train['month'] = df_train['timestamp'].apply(lambda x: x[5:7] ).astype(int) df_test['year'] = df_test['timestamp'].apply(lambda x: x[:4] ).astype(int) df_test['month'] = df_test['timestamp'].apply(lambda x: x[5:7] ).astype(int )<sort_values>
if DEVICE == "TPU": print("connecting to TPU...") try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: print("Could not connect to TPU") tpu = None if tpu: try: print("initializing TPU...") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("TPU initialized") except _: print("failed to initialize TPU") else: DEVICE = "GPU" if DEVICE != "TPU": print("Using default strategy for CPU and single GPU") strategy = tf.distribute.get_strategy() if DEVICE == "GPU": print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}' )
Digit Recognizer
11,088,069
missingValues = df_train.columns[df_train.isnull().any() ].tolist() pd.isnull(df_train[missingValues] ).sum().sort_values(ascending=False )<define_variables>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") train.describe()
Digit Recognizer
11,088,069
cols_fillna_mode = ['floor', 'product_type', 'num_room', 'state', 'hospital_beds_raion', 'build_count_brick', 'build_count_monolith', 'green_part_2000'] cols_fillna_mean = ['life_sq', 'metro_min_walk', 'metro_km_walk', 'railroad_station_walk_km', 'railroad_station_walk_min', 'cafe_sum_1500_min_price_avg', 'cafe_sum_1500_max_price_avg', 'cafe_avg_price_1500', 'cafe_sum_2000_max_price_avg', 'cafe_avg_price_2000']<categorify>
X = train.drop(labels=['label'], axis=1) X = X.astype('float32') X = X / 255 X = X.values.reshape(X.shape[0],28,28,1) X = np.pad(X,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant') X = np.squeeze(X, axis=-1) X = stacked_img = np.stack(( X,)*3, axis=-1) X.shape
Digit Recognizer
11,088,069
for col in cols_fillna_mode: df_train[col].fillna(df_train[col].mode().iloc[0],inplace=True) df_test[col].fillna(df_train[col].mode().iloc[0],inplace=True) for col in cols_fillna_mean: df_train[col].fillna(df_train[col].mean() ,inplace=True) df_test[col].fillna(df_train[col].mean() ,inplace=True )<define_variables>
y = train['label'].values.astype('float32') y = tf.keras.utils.to_categorical(y, 10) y
Digit Recognizer
11,088,069
numerical_features = df_train.dtypes[df_train.dtypes != "object"].index categorical_features = df_train.dtypes[df_train.dtypes == "object"].index print("Кол-во количественных признаков: ", len(numerical_features)) print("Кол-во категориальных признаков: ", len(categorical_features))<sort_values>
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") test.describe()
Digit Recognizer
11,088,069
df_train.isna().sum().sort_values(ascending=False )<drop_column>
X_test = test.astype('float32') X_test = X_test / 255 X_test = X_test.values.reshape(X_test.shape[0],28,28,1) X_test = np.pad(X_test,(( 0,0),(2,2),(2,2),(0,0)) , mode='constant') X_test = np.squeeze(X_test, axis=-1) X_test = stacked_img = np.stack(( X_test,)*3, axis=-1) X_test.shape
Digit Recognizer
11,088,069
df_train.drop(['id', 'price_doc', 'timestamp'], axis=1, inplace=True) id_test = df_test['id'] df_test.drop(['id', 'timestamp'], axis=1, inplace=True )<define_variables>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False )
Digit Recognizer
11,088,069
numerical_features = df_train.dtypes[df_train.dtypes != "object"].index categorical_features = df_train.dtypes[df_train.dtypes == "object"].index print("Кол-во количественных признаков: ", len(numerical_features)) print("Кол-во категориальных признаков: ", len(categorical_features))<categorify>
eff_nets = [ efn.EfficientNetB0, efn.EfficientNetB1, efn.EfficientNetB2, efn.EfficientNetB3, efn.EfficientNetB4, efn.EfficientNetB5, efn.EfficientNetB6, efn.EfficientNetB7, efn.EfficientNetL2, ] def build_model() : inp = tf.keras.layers.Input(shape=(X.shape[1], X.shape[2], X.shape[3])) oup = eff_nets[EFF_NET]( input_shape=(X.shape[1], X.shape[2], X.shape[3]), weights=EFF_NET_WEIGHTS, include_top=False, )(inp) oup = tf.keras.layers.GlobalAveragePooling2D()(oup) oup = tf.keras.layers.Dense(512, activation='linear' )(oup) oup = tf.keras.layers.Activation('relu' )(oup) oup = tf.keras.layers.Dropout(0.5 )(oup) oup = tf.keras.layers.Dense(10, activation='linear' )(oup) oup = tf.keras.layers.Activation('softmax' )(oup) model = tf.keras.Model(inputs=[inp], outputs=[oup]) loss = tf.keras.losses.CategoricalCrossentropy( from_logits=False, label_smoothing=LABEL_SMOOTHING, ) opt = tf.keras.optimizers.Nadam(learning_rate=3e-4) model.compile(optimizer=opt,loss=loss,metrics=['acc']) return model build_model().summary()
Digit Recognizer
11,088,069
encoder = OneHotEncoder(handle_unknown='error') encoder_cols_train = pd.DataFrame(encoder.fit_transform(df_train[categorical_features] ).toarray()) encoder_cols_test = pd.DataFrame(encoder.transform(df_test[categorical_features] ).toarray() )<categorify>
%%time oof = np.zeros(( X.shape[0], y.shape[1])) preds = np.zeros(( X_test.shape[0], y.shape[1])) skf = KFold(n_splits=FOLDS,shuffle=True,random_state=SEED) for fold,(idxT,idxV)in enumerate(skf.split(X)) : if DEVICE=='TPU': if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) print(' print(' print(' K.clear_session() with strategy.scope() : model = build_model() weights_filename='fold-%i.h5'%fold sv = tf.keras.callbacks.ModelCheckpoint( weights_filename, monitor=MONITOR, verbose=VERBOSE, save_best_only=True, save_weights_only=True, mode=MONITOR_MODE, save_freq='epoch') lrr = tf.keras.callbacks.ReduceLROnPlateau( monitor=MONITOR, factor=LR_FACTOR, patience=LR_PATIENCE, verbose=VERBOSE, mode=MONITOR_MODE ) es = tf.keras.callbacks.EarlyStopping( monitor=MONITOR, patience=ES_PATIENCE, verbose=VERBOSE, mode=MONITOR_MODE, ) print('Generating train data...') i = 0 datagen.fit(X[idxT]) steps = 2 *(X[idxT].shape[0] // BATCH_SIZE) X_train = None y_train = None with tqdm(total=steps)as pbar: for arr in datagen.flow(X[idxT], y[idxT], batch_size=BATCH_SIZE): if X_train is None: X_train = arr[0] y_train = arr[1] else: X_train = np.concatenate(( X_train, arr[0])) y_train = np.concatenate(( y_train, arr[1])) i += 1 pbar.update(1) if i >= steps: break print('Training...') history = model.fit( X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks = [ sv, lrr, es, ], validation_data=( X[idxV], y[idxV] ), verbose=VERBOSE, ) print('Loading best model...') model.load_weights('fold-%i.h5'%fold) print('Predicting OOF...') oof[idxV,] = model.predict([X[idxV]],verbose=VERBOSE) print('Predicting Test...') preds +=(model.predict([X_test], verbose=VERBOSE)* FOLD_WEIGHTS[fold]) acc_sum = 0 for k in idxV: if np.argmax(oof[k])== np.argmax(y[k]): acc_sum += 1 print('>>>> FOLD {} Accuracy = {:.4f}%'.format(fold+1,acc_sum/len(idxV)*100)) print()
Digit Recognizer
11,088,069
encoder_cols_train.columns = encoder.get_feature_names(categorical_features) encoder_cols_test.columns = encoder.get_feature_names(categorical_features) encoder_cols_train.index = df_train.index encoder_cols_test.index = df_test.index<drop_column>
final_predictions = pd.Series(np.argmax(preds, axis=1), name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),final_predictions], axis=1) submission.to_csv("submission.csv",index=False) submission.head()
Digit Recognizer
11,040,563
num_df_train = df_train.drop(categorical_features, axis=1) num_df_test = df_test.drop(categorical_features, axis=1 )<categorify>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
11,040,563
df_train_encoded = pd.concat([num_df_train, encoder_cols_train], axis=1) df_test_encoded = pd.concat([num_df_test, encoder_cols_test], axis=1) print("Train dataset shape:", df_train_encoded.shape) print("Test dataset shape:", df_test_encoded.shape )<sort_values>
( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data() train1 = np.concatenate([x_train1, x_test1], axis=0) y_train1 = np.concatenate([y_train1, y_test1], axis=0) Y_train1 = y_train1 X_train1 = train1.reshape(-1, 28*28 )
Digit Recognizer
11,040,563
df_train_encoded.median().sort_values(ascending=False )<train_model>
X_train = X_train / 255.0 test = test / 255.0 X_train1 = X_train1 / 255.0
Digit Recognizer
11,040,563
X = df_train_encoded.drop(['price_doc_log'], axis=1) y = df_train_encoded['price_doc_log'] print("X shape:", X.shape) print("y shape:", y.shape )<split>
X_train = np.concatenate(( X_train.values, X_train1)) Y_train = np.concatenate(( Y_train, Y_train1))
Digit Recognizer
11,040,563
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=2022) X_test = df_test_encoded<predict_on_test>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
11,040,563
tree = DecisionTreeRegressor(random_state=2022, max_depth=5, min_samples_split=20) tree.fit(X_train, y_train) tree_predictions_log = tree.predict(X_val) tree_predictions = np.exp(tree_predictions_log )<compute_test_metric>
random_seed = 2
Digit Recognizer
11,040,563
print('RMSLE:', np.sqrt(mean_squared_log_error(np.exp(y_val), tree_predictions)) )<predict_on_test>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
11,040,563
predict = np.exp(tree.predict(X_test)) submission = pd.DataFrame({'id': id_test, 'price_doc': predict}) submission.head()<save_to_csv>
model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10, activation = "softmax")) model.summary()
Digit Recognizer
11,040,563
submission.to_csv('DecisionTree.csv', index=False )<prepare_x_and_y>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
11,040,563
dmatrix_train = xgb.DMatrix(X_train, y_train) dmatrix_val = xgb.DMatrix(X_val, y_val) dmatrix_test = xgb.DMatrix(X_test )<train_model>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
11,040,563
xgb_params = { 'eta': 0.05, 'max_depth': 5, 'subsample': 1.0, 'colsample_bytree': 0.7, 'objective': 'reg:squarederror', 'eval_metric': 'rmse', 'verbosity': 0 } partial_model = xgb.train(xgb_params, dmatrix_train, num_boost_round=1000, evals=[(dmatrix_val, 'val')], early_stopping_rounds=20, verbose_eval=20) num_boost_round = partial_model.best_iteration<train_model>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
11,040,563
model = xgb.train(dict(xgb_params, verbose=1), dmatrix_train, num_boost_round=num_boost_round )<predict_on_test>
epochs = 50 batch_size = 32
Digit Recognizer
11,040,563
predict = np.exp(model.predict(dmatrix_val)) print('RMSLE:', np.sqrt(mean_squared_log_error(np.exp(y_val), predict)) )<predict_on_test>
Digit Recognizer
11,040,563
ylog_pred = model.predict(dmatrix_test) y_pred = np.exp(ylog_pred) submission = pd.DataFrame({'id': id_test, 'price_doc': y_pred}) submission.head()<save_to_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
11,040,563
submission.to_csv("XGB_new_clear_submission.csv", index=False )<prepare_x_and_y>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
11,040,563
dmatrix_train = xgb.DMatrix(X_train, y_train) dmatrix_test = xgb.DMatrix(X_test )<init_hyperparams>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
11,040,563
xgb_params = { 'eta': 0.05, 'max_depth': 5, 'subsample': 0.7, 'colsample_bytree': 0.7, 'objective': 'reg:squarederror', 'eval_metric': 'rmse', 'verbosity': 0 }<compute_train_metric>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_submission.csv",index=False )
Digit Recognizer
9,740,221
cv_output = xgb.cv(xgb_params, dmatrix_train, num_boost_round=1000, early_stopping_rounds=20, verbose_eval=50, show_stdv=False) num_boost_rounds = len(cv_output )<train_model>
import matplotlib.pyplot as plt
Digit Recognizer
9,740,221
model = xgb.train(dict(xgb_params, verbose=1), dmatrix_train, num_boost_round=num_boost_rounds )<predict_on_test>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
9,740,221
predict = np.exp(model.predict(dmatrix_test)) submission = pd.DataFrame({'id': id_test, 'price_doc': predict}) submission.head()<save_to_csv>
sample_submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
9,740,221
submission.to_csv('XGB_CV.csv', index=False )<split>
X_train = train.drop(['label'], axis = 1) y_train = train['label'] X_test = test X_train = X_train / 255.0 X_test = X_test / 255.0
Digit Recognizer
9,740,221
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=2022) X_test = df_test_encoded<categorify>
X_train=X_train.values X_test=X_test.values test=test.values X_train=X_train.reshape(X_train.shape[0], 28, 28, 1) X_test=X_test.reshape(X_test.shape[0], 28, 28, 1) test=test.reshape(test.shape[0] , 28 , 28 , 1 )
Digit Recognizer
9,740,221
pca = PCA(n_components=20 ).fit(X_train) X_train_pca=pca.transform(X_train) X_val_pca=pca.transform(X_val )<prepare_x_and_y>
import tensorflow as tf from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping
Digit Recognizer