kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,333,538
seed_everything(42) label_df = pd.read_csv(label_path) if 'fold' not in label_df.columns: skf = StratifiedKFold(n_splits=5, shuffle=True) label_df.loc[:, 'fold'] = 0 for fold_num,(train_index, val_index)in enumerate(skf.split(X=label_df.index, y=label_df.label.values)) : label_df.loc[label_df.iloc[val_index].index, 'fold'] = fold_num if do_predict: infer = pl.Trainer(gpus=1) oof_dict = {'image_id': [], 'label': [], 'fold': []} for fold_num in range(n_folds): val_df = label_df[label_df.fold == fold_num] test_dataset = TestDataset(img_dir, val_df, img_size=cfg.img_size) test_dataloader = DataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=4, shuffle=False) state_dict = get_state_dict_from_checkpoint(log_dir, fold_num) model = LitTester(cfg.network, state_dict) pred = infer.test(model, test_dataloaders=test_dataloader, verbose=False)[0] oof_dict['image_id'].extend(val_df.image_id.values) oof_dict['label'].extend(pred['prob'].tolist()) oof_dict['fold'].extend([fold_num] * len(pred['prob'])) pred_df = pd.DataFrame(oof_dict) pred_df.to_csv('oof.csv', index=False) else: pred_df = pd.read_csv(os.path.join(log_dir, 'oof.csv'))<load_from_csv>
results = np.zeros(( test_X.shape[0],10)) for j in range(nets): results = results + model[j].predict(test_X) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("StratifiedKFold_10_batch100_double_val_loss.csv",index=False )
Digit Recognizer
11,333,538
if do_submit: sub = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') infer = pl.Trainer(gpus=1) test_dataset = TestDataset('.. /input/cassava-leaf-disease-classification/test_images', sub, img_size=cfg.img_size) test_dataloader = DataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=4, shuffle=False) preds = [] for fold_num in range(n_folds): state_dict = get_state_dict_from_checkpoint(log_dir, fold_num) model = LitTester(cfg.network, state_dict) pred = infer.test(model, test_dataloaders=test_dataloader, verbose=False)[0] preds.append(pred['prob']) sub['label'] = np.argmax(np.mean(preds, axis=0), axis=1) sub.to_csv(os.path.join(os.getcwd() , 'submission.csv'), index=False )<sort_values>
show_test_digits(range(500,530))
Digit Recognizer
11,298,742
label_df = label_df.sort_values(by='image_id', ascending=1) pred_df = pred_df.sort_values(by='image_id', ascending=1) ids, labels = label_df.image_id.values, label_df.label.values preds = np.array([literal_eval(pred)if isinstance(pred, str)else pred for pred in pred_df.label.values]) print(f'total {len(ids)} images') print(f'prediction shape: {preds.shape}, label shape: {labels.shape}' )<define_variables>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
11,298,742
s = labels psx = preds K = len(np.unique(s)) thresholds = [np.mean(psx[:,k][s == k])for k in range(K)] thresholds = np.asarray(thresholds) confident_joint = np.zeros(( K, K), dtype = int) for i, row in enumerate(psx): s_label = s[i] confident_bins = row >= thresholds - 1e-6 num_confident_bins = sum(confident_bins) if num_confident_bins == 1: confident_joint[s_label][np.argmax(confident_bins)] += 1 elif num_confident_bins > 1: confident_joint[s_label][np.argmax(row)] += 1 confident_joint = cleanlab.latent_estimation.calibrate_confident_joint( confident_joint, s) cleanlab.util.print_joint_matrix(confident_joint) MIN_NUM_PER_CLASS = 5 prune_count_matrix = cleanlab.pruning.keep_at_least_n_per_class( prune_count_matrix=confident_joint.T, n=MIN_NUM_PER_CLASS, ) s_counts = np.bincount(s) noise_masks_per_class = [] for k in range(K): noise_mask = np.zeros(len(psx), dtype=bool) psx_k = psx[:, k] if s_counts[k] > MIN_NUM_PER_CLASS: for j in range(K): if k != j: num2prune = prune_count_matrix[k][j] if num2prune > 0: margin = psx_k - psx[:, j] s_filter = s == j threshold = -np.partition( -margin[s_filter], num2prune - 1 )[num2prune - 1] noise_mask = noise_mask |(s_filter &(margin >= threshold)) noise_masks_per_class.append(noise_mask) else: noise_masks_per_class.append(np.zeros(len(s), dtype=bool)) label_errors_bool = np.stack(noise_masks_per_class ).any(axis=0) for i, pred_label in enumerate(psx.argmax(axis=1)) : if label_errors_bool[i] and np.all(pred_label == s[i]): label_errors_bool[i] = False label_errors_idx = np.arange(len(s)) [label_errors_bool] self_confidence = np.array( [np.mean(psx[i][s[i]])for i in label_errors_idx] ) margin = self_confidence - psx[label_errors_bool].max(axis=1) label_errors_idx = label_errors_idx[np.argsort(margin)]<feature_engineering>
X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255
Digit Recognizer
11,298,742
total_idx = np.arange(len(ids)) clean_idx = np.array([idx for idx in total_idx if idx not in label_errors_idx]) guesses = np.stack(noise_masks_per_class ).argmax(axis=0) guesses[clean_idx] = labels[clean_idx] clean_ids = ids[clean_idx] clean_labels = labels[clean_idx] clean_guesses = guesses[clean_idx] noisy_ids = ids[label_errors_idx] noisy_labels = labels[label_errors_idx] noisy_guesses = guesses[label_errors_idx] print(f'[clean ratio] \t {len(clean_idx)/ len(total_idx)* 100:.2f}%') print(f'[noise ratio] \t {len(noisy_ids)/ len(total_idx)* 100:.2f}%' )<prepare_output>
y_train = train.pop("label") y_train = y_train.values print(y_train.shape) y_train = to_categorical(y_train, num_classes=10) print(y_train.shape) print(y_train[0] )
Digit Recognizer
11,298,742
all_data = pd.DataFrame({'image_id': ids, 'given_label': labels, 'guess_label': guesses}) all_data['is_noisy'] =(all_data.given_label != all_data.guess_label) all_data['max_prob'] = preds.max(axis=1 )<define_variables>
import tensorflow as tf
Digit Recognizer
11,298,742
class_colors = np.array([' num2class = [f'{idx}-{elem}' for idx, elem in enumerate(num2class)]<install_modules>
def squeeze_excite_block(filters,input): se = tf.keras.layers.GlobalAveragePooling2D()(input) se = tf.keras.layers.Reshape(( 1, filters))(se) se = tf.keras.layers.Dense(filters//16, activation='relu' )(se) se = tf.keras.layers.Dense(filters, activation='sigmoid' )(se) se = tf.keras.layers.multiply([input, se]) return se
Digit Recognizer
11,298,742
!pip install -U -q.. /input/resnest/resnest-0.0.5-py3-none-any.whl<import_modules>
def make_model() : s = tf.keras.Input(shape=X_train.shape[1:]) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(s) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.BatchNormalization()(x) x = squeeze_excite_block(32,x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.BatchNormalization()(x) x = squeeze_excite_block(32,x) x = tf.keras.layers.AveragePooling2D(2 )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.BatchNormalization()(x) x = squeeze_excite_block(32,x) x = tf.keras.layers.AveragePooling2D(2 )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same' )(x) x = tf.keras.layers.BatchNormalization()(x) x = squeeze_excite_block(32,x) x = tf.keras.layers.AveragePooling2D(2 )(x) x = tf.keras.layers.concatenate([tf.keras.layers.GlobalMaxPooling2D()(x), tf.keras.layers.GlobalAveragePooling2D()(x)]) x = tf.keras.layers.Dense(10,activation='softmax',use_bias=False, kernel_regularizer=tf.keras.regularizers.l1(0.00025))(x) return tf.keras.Model(inputs=s, outputs=x )
Digit Recognizer
11,298,742
HorizontalFlip, VerticalFlip, Transpose, RandomResizedCrop, Compose, Normalize, ShiftScaleRotate, CenterCrop, Resize, RandomResizedCrop ) <import_modules>
model=make_model() model.compile(optimizer=optimizers.Adam(lr=0.0001), loss='categorical_crossentropy',metrics=['accuracy']) model.summary()
Digit Recognizer
11,298,742
import timm<init_hyperparams>
model.fit(x=X_train, y=y_train, batch_size=32, epochs=15 )
Digit Recognizer
11,298,742
CFG = { 'seed': 1337, 'img_size': 512, 'bs': 32, 'num_workers': 4, 'tta': 4, }<load_from_csv>
pred = model.predict(X_test,verbose=1) predictions = pred.argmax(axis=-1 )
Digit Recognizer
11,298,742
submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv') submission.head()<set_options>
sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') submission = sub['ImageId']
Digit Recognizer
11,298,742
def seed_everything(seed: int): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False<normalization>
pred = pd.DataFrame(data=predictions ,columns=["Label"]) DT = pd.merge(submission , pred, on=None, left_index= True, right_index=True) DT.head()
Digit Recognizer
11,298,742
def get_img(path): return cv2.imread(path)[:, :, ::-1]<define_variables>
DT.to_csv('submission.csv',index = False )
Digit Recognizer
11,869,267
seed_everything(CFG['seed'] )<categorify>
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D, BatchNormalization, AvgPool2D from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.utils import to_categorical from sklearn.metrics import confusion_matrix from tensorflow.random import set_seed from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns
Digit Recognizer
11,869,267
class CassavaDataset(Dataset): def __init__(self, df, data_root, transforms=None): super().__init__() self.df = df.reset_index(drop=True ).copy() self.transforms = transforms self.data_root = data_root def __len__(self): return self.df.shape[0] def __getitem__(self, index: int): img = get_img(f"{self.data_root}/{self.df.iloc[index]['image_id']}") if self.transforms: img = self.transforms(image=img)['image'] return img<feature_engineering>
df_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
11,869,267
class CassvaClassifierV1(nn.Module): def __init__(self, n_classes: int = 5, dropout: float =.5): super().__init__() self.backbone = resnest50_fast_4s2x40d(pretrained=False) self.pool = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(p=dropout) self.emb_size: int = 2048 self.classifier = nn.Linear(self.emb_size, n_classes) def cnn_feature_extractor(self, x): x = self.backbone.conv1(x) x = self.backbone.bn1(x) x = self.backbone.relu(x) x = self.backbone.maxpool(x) x1 = self.backbone.layer1(x) x2 = self.backbone.layer2(x1) x3 = self.backbone.layer3(x2) x4 = self.backbone.layer4(x3) return x4 def forward(self, x): x = self.cnn_feature_extractor(x) x = self.pool(x) x = self.dropout(x) x = x.view(-1, self.emb_size) x = self.classifier(x) return x<choose_model_class>
np.random.seed(42) set_seed(42 )
Digit Recognizer
11,869,267
class CassvaClassifierV2(nn.Module): def __init__(self, n_classes: int = 5): super().__init__() self.model = timm.create_model('tf_efficientnet_b4_ns', pretrained=False) self.model.classifier = nn.Linear(self.model.classifier.in_features, n_classes) def forward(self, x): return self.model(x )<choose_model_class>
def doSubmission(y_pred): test_Id = np.arange(1, y_pred.size+1, dtype=np.int) pred_dict = {"ImageId": test_Id, "Label": y_pred} df = pd.DataFrame(pred_dict) df.to_csv("sample_submission.csv", index=False, index_label=False )
Digit Recognizer
11,869,267
class CassvaClassifierV3(nn.Module): def __init__(self, n_classes: int = 5): super().__init__() self.model = timm.create_model('tf_efficientnet_b3_ns', pretrained=False) self.model.classifier = nn.Linear(self.model.classifier.in_features, n_classes) def forward(self, x): return self.model(x )<predict_on_test>
y = df_train.label.to_numpy() X = df_train.drop(columns=["label"] ).to_numpy() X = X / 255.0
Digit Recognizer
11,869,267
def inference_one_epoch(model, data_loader, device): model.eval() image_preds_all = [] for imgs in data_loader: image_preds = model(imgs.to(device ).float()) image_preds_all += [torch.softmax(image_preds, 1 ).cpu().numpy() ] return np.concatenate(image_preds_all, axis=0 )<set_options>
X_totrain = X.reshape(X.shape[0], 28, 28, 1 )
Digit Recognizer
11,869,267
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<define_variables>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y) X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1 )
Digit Recognizer
11,869,267
def get_models(model_paths, recipe: str): models = [] for model_path in model_paths: model_name: str = model_path.split('/')[-1] n_folds: int = int([x[-1] for x in model_name.split('-')if x.startswith('fold')][0]) n_epochs: int = int([x[6:] for x in model_name.split('-')if x.startswith('epochs')][0]) if not model_name.startswith(recipe): continue if model_name.startswith('resnest50_fast_4s2x40d-fmix-cutmix-'): model = CassvaClassifierV1().to(device) elif model_name.startswith('resnest50_fast_4s2x40d-cutmix-fmix-'): model = CassvaClassifierV1().to(device) elif model_name.startswith('resnest50_fast_4s2x40d-fcl-'): model = CassvaClassifierV1().to(device) elif model_name.startswith('effnetb4-fcl-'): model = CassvaClassifierV2().to(device) elif model_name.startswith('effnetb3-cutmix-scce-'): model = CassvaClassifierV3().to(device) elif model_name.startswith('effnetb4-cutmix-fmix-'): model = CassvaClassifierV2().to(device) elif model_name.startswith('effnetb4-pseudo-cutmix-fmix-'): model = CassvaClassifierV2().to(device) else: continue print(f'[+] load {model_name}') model.load_state_dict(torch.load(model_path, map_location=device)) model.eval() models.append(model) return models<create_dataframe>
y_cat = to_categorical(y, 10) y_train_cat = to_categorical(y_train, 10) y_test_cat = to_categorical(y_test, 10 )
Digit Recognizer
11,869,267
test = pd.DataFrame() test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/')) test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms()) tst_loader = torch.utils.data.DataLoader( test_ds, batch_size=CFG['bs'], num_workers=CFG['num_workers'], shuffle=False, pin_memory=True )<define_variables>
test = df_test.to_numpy(np.float64) test = test.reshape(test.shape[0], 28, 28, 1) test /= 255.0
Digit Recognizer
11,869,267
recipe_list = [ 'effnetb4-cutmix-fmix-', 'effnetb4-fcl-', 'resnest50_fast_4s2x40d-cutmix-fmix-', 'resnest50_fast_4s2x40d-fcl-', 'resnest50_fast_4s2x40d-fmix-cutmix-', ]<feature_engineering>
def convNeuralNetwork() : cnn = Sequential() cnn.add(Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), input_shape=(28, 28, 1), activation="tanh", padding="same")) cnn.add(BatchNormalization()) cnn.add(AvgPool2D(pool_size=(2, 2), strides=(1, 1), padding="same")) cnn.add(Dropout(0.2)) cnn.add(Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation="tanh", padding="same")) cnn.add(BatchNormalization()) cnn.add(AvgPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")) cnn.add(Dropout(0.2)) cnn.add(Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation="tanh", padding="same")) cnn.add(AvgPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")) cnn.add(Dropout(0.2)) cnn.add(Flatten()) cnn.add(Dense(units=84, activation="tanh")) cnn.add(Dropout(0.2)) cnn.add(Dense(units=10, activation="softmax")) cnn.compile(optimizer="SGD", loss="categorical_crossentropy", metrics=["accuracy"]) return cnn
Digit Recognizer
11,869,267
preds = [] with torch.no_grad() : for recipe in recipe_list: models = get_models(model_paths, recipe) preds_per_recipe = np.mean( [ np.mean([inference_one_epoch(model, tst_loader, device)for _ in range(CFG['tta'])], axis=0) for model in models ], axis=0 ) preds.append(preds_per_recipe) del models<define_variables>
early_stopping = EarlyStopping(monitor="val_loss", patience=4, verbose=1, restore_best_weights=True) lr_reducer = ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=3, verbose=1) cnn = convNeuralNetwork() cnn_hist = cnn.fit(X_train, y_train_cat, validation_data=(X_test, y_test_cat), epochs=100, batch_size=16, callbacks=[early_stopping, lr_reducer] )
Digit Recognizer
11,869,267
tst_preds = weights[0] * preds[0] + weights[1] * preds[1] + weights[2] * preds[2] + weights[3] * preds[3] + weights[4] * preds[4] <feature_engineering>
early_stopping = EarlyStopping(monitor="loss", patience=4, restore_best_weights=True, verbose=1) model_checkpoint = ModelCheckpoint(filepath="./", monitor="loss", verbose=1, save_best_only=True, save_weights_only=True) lr_reducer = ReduceLROnPlateau(monitor="loss", factor=0.1, patience=3, verbose=1) model= convNeuralNetwork() model_hist = model.fit(X_totrain, y_cat, epochs=100, batch_size=16, callbacks=[early_stopping, lr_reducer, model_checkpoint] )
Digit Recognizer
11,869,267
<save_to_csv><EOS>
y_pred = model.predict(test ).argmax(1) doSubmission(y_pred )
Digit Recognizer
11,792,160
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<install_modules>
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv") mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
Digit Recognizer
11,792,160
!pip install.. /input/validators !cp -R.. /input/vit-keras/vit_keras./<import_modules>
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
Digit Recognizer
11,792,160
Flatten,GlobalAveragePooling2D,BatchNormalization, Activation print(tf.__version__ )<find_best_params>
test['dataset'] = 'test'
Digit Recognizer
11,792,160
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print(f'Running on TPU {tpu.master() }') except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}' )<define_variables>
train['dataset'] = 'train'
Digit Recognizer
11,792,160
SEED = 100 DEBUG = False WANDB = False VALIDATION_SIZE = 0.2 BATCH_SIZE = 8 *REPLICAS LEARNING_RATE = 3e-5 * REPLICAS EPOCHS=40 MODEL_NAME = "VitL16" N_FOLDS = 5 TTA = False N_TTA = 3 T_1 = 0.2 T_2 = 1.2 SMOOTH_FRACTION = 0.01 N_ITER = 5 HEIGHT = 512 WIDTH = 512 HEIGHT_RS = 512 WIDTH_RS = 512 CHANNELS = 3 N_CLASSES = 5 MODEL_SAVE_PATH = "" if DEBUG: EPOCHS = 2 <normalization>
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
Digit Recognizer
11,792,160
def transform_rotation(image, height, rotation): DIM = height XDIM = DIM%2 rotation = rotation * tf.random.uniform([1],dtype='float32') rotation = math.pi * rotation / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_shear(image, height, shear): DIM = height XDIM = DIM%2 shear = shear * tf.random.uniform([1],dtype='float32') shear = math.pi * shear / 180. one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def data_augment_cutout(image, min_mask_size=(int(HEIGHT *.1), int(HEIGHT *.1)) , max_mask_size=(int(HEIGHT *.125), int(HEIGHT *.125))): p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_cutout >.85: n_cutout = tf.random.uniform([], 10, 15, dtype=tf.int32) image = random_cutout(image, HEIGHT, WIDTH, min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout >.6: n_cutout = tf.random.uniform([], 5, 10, dtype=tf.int32) image = random_cutout(image, HEIGHT, WIDTH, min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) elif p_cutout >.25: n_cutout = tf.random.uniform([], 2, 5, dtype=tf.int32) image = random_cutout(image, HEIGHT, WIDTH, min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout) else: image = random_cutout(image, HEIGHT, WIDTH, min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=1) return image def random_cutout(image, height, width, channels=3, min_mask_size=(10, 10), max_mask_size=(80, 80), k=1): assert height > min_mask_size[0] assert width > min_mask_size[1] assert height > max_mask_size[0] assert width > max_mask_size[1] for i in range(k): mask_height = tf.random.uniform(shape=[], minval=min_mask_size[0], maxval=max_mask_size[0], dtype=tf.int32) mask_width = tf.random.uniform(shape=[], minval=min_mask_size[1], maxval=max_mask_size[1], dtype=tf.int32) pad_h = height - mask_height pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32) pad_bottom = pad_h - pad_top pad_w = width - mask_width pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32) pad_right = pad_w - pad_left cutout_area = tf.zeros(shape=[mask_height, mask_width, channels], dtype=tf.uint8) cutout_mask = tf.pad([cutout_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1) cutout_mask = tf.squeeze(cutout_mask, axis=0) image = tf.multiply(tf.cast(image, tf.float32), tf.cast(cutout_mask, tf.float32)) return image<define_search_space>
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True) labels = mnist['label'].values mnist.drop('label', axis=1, inplace=True) mnist.columns = cols
Digit Recognizer
11,792,160
def data_augment(image, label): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32) flip_left = tf.random.uniform([], 0, 1.0, dtype=tf.float32) flip_right = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_rotation >.2: if p_rotation >.6: image = transform_rotation(image, HEIGHT, rotation=10.) else: image = transform_rotation(image, HEIGHT, rotation=-10.) if flip_left>0.5: image = tf.image.random_flip_left_right(image) if flip_right>0.5 : image = tf.image.random_flip_up_down(image) image = tf.image.resize(image, size=[HEIGHT, WIDTH]) return image, label<define_variables>
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
Digit Recognizer
11,792,160
copyfile(src = ".. /input/bitempered-logistic-loss-tensorflow-v2/bi_tempered_loss.py", dst = ".. /working/loss.py") <compute_train_metric>
for i in range(len(idx_mnist)) : if dataset_from[i] == 'test': sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
Digit Recognizer
11,792,160
<choose_model_class><EOS>
sample_submission.to_csv('submission.csv', index=False )
Digit Recognizer
11,762,664
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
11,762,664
files_path = '.. /input/cassava-leaf-disease-classification/test_images' TEST_FILENAMES = tf.io.gfile.glob('.. /input/cassava-leaf-disease-classification/test_tfrecords/*') model_path_list = [ '.. /input/cassava-leaf-vit-models/ViTB16_best_fold_0__v3_.h5', '.. /input/cassava-leaf-vit-models/ViTB16_best_fold_0_v4_.h5', '.. /input/cassava-leaf-vit-models/ViTB16_best_fold_1_v4_.h5', '.. /input/cassava-leaf-vit-models/ViTB16_best_fold_2_v4_.h5', '.. /input/cassava-leaf-vit-models/ViTB16_best_fold_3_v4_.h5' ] test_preds = np.zeros(( len(os.listdir(files_path)) , N_CLASSES)) for idx, model_path in enumerate(model_path_list): print("Model: ", model_path) tf.keras.backend.clear_session() model = get_vit_model('imagenet21k') model.load_weights(model_path) if TTA: for step in range(N_TTA): print(f"TTA step: {step+1}/{N_TTA}") test_ds = get_dataset(TEST_FILENAMES, labeled=False, ordered=True, augment = True) x_test = test_ds.map(lambda image, image_name: image) test_preds += model.predict(x_test) else: test_ds = get_dataset(TEST_FILENAMES, labeled=False, ordered=True) x_test = test_ds.map(lambda image, image_name: image) test_preds += model.predict(x_test )<save_to_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
11,762,664
test_preds = np.argmax(test_preds, axis=-1) image_names = [img_name.numpy().decode('utf-8')for img, img_name in iter(test_ds.unbatch())] submission = pd.DataFrame({'image_id': image_names, 'label': test_preds}) submission.to_csv('submission.csv', index=False) display(submission.head() )<train_model>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
11,762,664
print(f'Training path:{__training_path} Test path:{__test_path}' )<install_modules>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
11,762,664
!{sys.executable} -m pip install --upgrade scikit-learn=="0.24.2"<import_modules>
random_seed = 2
Digit Recognizer
11,762,664
import sklearn; sklearn.show_versions()<load_from_csv>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
11,762,664
def __load__data(__training_path, __test_path, concat=False): __train_dataset = pd.read_csv(__training_path, delimiter=',') __test_dataset = pd.read_csv(__test_path, delimiter=',') return __train_dataset, __test_dataset __train_dataset, __test_dataset = __load__data(__training_path, __test_path, concat=True) __train_dataset.head()<define_variables>
nets = 5 model = [0] *nets for j in range(nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size = 3, padding='same', activation='relu', input_shape =(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 3, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(MaxPooling2D(pool_size=(2, 2))) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size = 3, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 5, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(MaxPooling2D(pool_size=(2, 2))) model[j].add(Dropout(0.25)) model[j].add(Conv2D(128, kernel_size = 4, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(MaxPooling2D(pool_size=(2, 2))) model[j].add(Flatten()) model[j].add(Dense(128, activation='relu')) model[j].add(Dropout(0.7)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
11,762,664
__test_dataset_submission_columns = __test_dataset['Id']<drop_column>
Digit Recognizer
11,762,664
__train_dataset.drop(['Descript', 'Resolution'], axis=1, inplace=True )<feature_engineering>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.3, shear_range = 0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
11,762,664
_DATE_COLUMNS = ['Dates'] for _col in _DATE_COLUMNS: train_date_col = pd.to_datetime(__train_dataset[_col], errors='coerce') __train_dataset["year"] = train_date_col.dt.year __train_dataset["month"] = train_date_col.dt.month __train_dataset["day"] = train_date_col.dt.day __train_dataset.drop(_col, axis=1, inplace=True) test_date_col = pd.to_datetime(__test_dataset[_col], errors='coerce') __test_dataset["year"] = test_date_col.dt.year __test_dataset["month"] = test_date_col.dt.month __test_dataset["day"] = test_date_col.dt.day __test_dataset.drop(_col, axis=1, inplace=True )<categorify>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * nets epochs = 25 for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1) history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=32), epochs = epochs, steps_per_epoch = X_train2.shape[0]//32, verbose = 2, validation_data =(X_val2,Y_val2), callbacks=[annealer]) print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
11,762,664
__feature_train = __train_dataset.drop(['Category'], axis=1) __target_train =__train_dataset['Category'] __feature_test = __test_dataset.drop(['Id'], axis=1) _CATEGORICAL_COLS = ['Address', 'PdDistrict', 'DayOfWeek'] __col_indices = [__feature_train.columns.get_loc(col)for col in _CATEGORICAL_COLS] _ohe = OneHotEncoder(handle_unknown='ignore') __ct = ColumnTransformer([("ohe", _ohe, __col_indices)], remainder = 'passthrough') __feature_train = __ct.fit_transform(__feature_train) __feature_test = __ct.transform(__feature_test )<train_on_grid>
results = np.zeros(( test.shape[0],10)) for j in range(nets): results = results + model[j].predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("MNIST-CNN-ENSEMBLE.csv",index=False )
Digit Recognizer
11,541,596
__model = CatBoostClassifier() __model.fit(__feature_train, __target_train) __y_pred = __model.predict_proba(__feature_test )<create_dataframe>
df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') df2 = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') print(df.shape) print(df2.shape )
Digit Recognizer
11,541,596
submission = pd.DataFrame(columns=['Id'], data=__test_dataset_submission_columns) submission = pd.concat([submission, pd.DataFrame(__y_pred, columns=["ARSON", "ASSAULT", "BAD CHECKS", "BRIBERY", "BURGLARY", "DISORDERLY CONDUCT", "DRIVING UNDER THE INFLUENCE", "DRUG/NARCOTIC", "DRUNKENNESS", "EMBEZZLEMENT", "EXTORTION", "FAMILY OFFENSES", "FORGERY/COUNTERFEITING", "FRAUD", "GAMBLING", "KIDNAPPING", "LARCENY/THEFT", "LIQUOR LAWS", "LOITERING", "MISSING PERSON", "NON-CRIMINAL", "OTHER OFFENSES", "PORNOGRAPHY/OBSCENE MAT", "PROSTITUTION", "RECOVERED VEHICLE", "ROBBERY", "RUNAWAY", "SECONDARY CODES", "SEX OFFENSES FORCIBLE", "SEX OFFENSES NON FORCIBLE", "STOLEN PROPERTY", "SUICIDE", "SUSPICIOUS OCC", "TREA", "TRESPASS", "VANDALISM", "VEHICLE THEFT", "WARRANTS", "WEAPON LAWS"])], axis=1) submission.head()<save_to_csv>
x = np.array(df.drop('label',axis=1)) /255 y = np.array(df.label) test = np.array(df2)/255 enc = OneHotEncoder(sparse=False) y= y.reshape(( -1,1)) y = enc.fit_transform(y) print(x.shape) print(y.shape) print(test.shape )
Digit Recognizer
11,541,596
submission.to_csv("kaggle_submission.csv", index=False )<set_options>
x_train,x_test,y_train,y_test = tts(x_2d,y,test_size = 0.15, random_state=42) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape )
Digit Recognizer
11,541,596
%matplotlib inline<load_from_csv>
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from keras import applications from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from keras.callbacks import ReduceLROnPlateau
Digit Recognizer
11,541,596
train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip', parse_dates= ['Dates']) test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip', parse_dates= ['Dates'], index_col = 'Id' )<concatenate>
model = Sequential() model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Conv2D(128, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary()
Digit Recognizer
11,541,596
x = list(test) t_data= train[x] join = pd.concat([t_data, test]) merge = join.copy() <count_missing_values>
callbacks = [ keras.callbacks.EarlyStopping( monitor='val_loss', min_delta=1e-5, patience=15, verbose=1) ]
Digit Recognizer
11,541,596
t= train['Category'] merge.isnull().sum()<categorify>
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(x_2d,y,batch_size=64,epochs=600,validation_data=(x_test,y_test))
Digit Recognizer
11,541,596
LB = LabelEncoder() tar = LB.fit_transform(t) print(LB.classes_ )<feature_engineering>
scores = model.evaluate(x_test, y_test, verbose = 10) print(scores )
Digit Recognizer
11,541,596
date = pd.to_datetime(join['Dates']) merge['Date'] = date.dt.date merge['Year'] = date.dt.year merge['Month'] = date.dt.month merge['Day'] = date.dt.day merge['Hour'] = date.dt.hour merge.drop('Dates', axis = 1, inplace = True )<groupby>
predictions=model.predict(test_2d) pre=predictions.argmax(axis=-1 )
Digit Recognizer
11,541,596
<categorify><EOS>
submission = pd.Series(pre,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),submission],axis = 1) submission.to_csv("final_submission_lenet5.csv",index=False) submission.head()
Digit Recognizer
11,054,002
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
np.random.seed(0) sns.set(style='white', context='notebook', palette='deep') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Digit Recognizer
11,054,002
merge.drop('PdDistrict', axis = 1, inplace = True )<categorify>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
11,054,002
merge['DayWeek'] = lb.fit_transform(merge['DayOfWeek']) merge[['DayOfWeek', 'DayWeek']].head(10 )<drop_column>
tmp = train["label"] X_train, X_test, y_train, y_test = train_test_split(train.drop(labels=["label"],axis=1), tmp, test_size = 0.01, random_state = 0 )
Digit Recognizer
11,054,002
merge.drop('DayOfWeek', axis = 1, inplace = True )<feature_engineering>
y_train = to_categorical(y_train, num_classes = 10) y_test = to_categorical(y_test, num_classes = 10) plt.imshow(X_train[0][:,:,0],cmap='gray' )
Digit Recognizer
11,054,002
merge['Block'] = merge['Address'].str.contains('block', case = False) merge['ST'] = merge['Address'].str.contains('ST', case = False) merge.drop('Address', axis = 1, inplace = True )<feature_engineering>
epochs = 30 batchsize = 16 steps_per_epoch = X_train.shape[0] / batchsize learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, factor=0.5, min_lr=0.00001) dataset = ImageDataGenerator() dataset.fit(X_train )
Digit Recognizer
11,054,002
medX= merge[merge['X'] < -120.5]['X'].median() medY = merge[merge['Y'] < 90]['Y'].median() merge.loc[merge['X'] >= -120.5, 'X'] = medX merge.loc[merge['Y'] >= 90, 'Y'] = medY<feature_engineering>
Digit Recognizer
11,054,002
merge['X+Y'] = merge['X'] + merge['Y'] merge['X-Y'] = merge['X'] - merge['Y']<drop_column>
Digit Recognizer
11,054,002
merge.drop('Date', axis = 1, inplace = True )<split>
Digit Recognizer
11,054,002
data_train = merge[:train.shape[0]] data_test = merge[train.shape[0]:]<train_model>
Digit Recognizer
11,054,002
trains = lg.Dataset(data_train, label = tar, categorical_feature=['PdDis', 'DayWeek']) params = { 'boosting':'gbdt', 'objective':'multiclass', 'num_class':39, 'max_delta_step':0.9, 'min_data_in_leaf': 20, 'learning_rate': 0.4, 'max_bin': 480, 'num_leaves': 45, 'verbose' : 1 } bst = lg.train(params, trains, 120 )<predict_on_test>
Digit Recognizer
11,054,002
prediction = bst.predict(data_test )<save_to_csv>
Digit Recognizer
11,054,002
result1 = pd.DataFrame(prediction, columns = LB.inverse_transform(np.linspace(0, 38, 39, dtype='int16')) ,index=data_test.index) result1.head() result1.to_csv('final_submission', index_label = 'Id' )<set_options>
Digit Recognizer
11,054,002
pd.set_option('max_colwidth', 40 )<feature_engineering>
model = Sequential() model.add(Conv2D(32,kernel_size=3,padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(32,kernel_size=3,padding='same',activation='relu', input_shape=(28,28,1))) model.add(MaxPool2D()) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Conv2D(64,kernel_size=3,padding='same',activation='relu', input_shape=(28,28,1))) model.add(Conv2D(64,kernel_size=3,padding='same',activation='relu', input_shape=(28,28,1))) model.add(MaxPool2D()) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
Digit Recognizer
11,054,002
MAX_LEN = 96 PATH = '.. /input/tf-roberta/' tokenizer = tokenizers.ByteLevelBPETokenizer( vocab_file=PATH+'vocab-roberta-base.json', merges_file=PATH+'merges-roberta-base.txt', lowercase=True, add_prefix_space=True ) EPOCHS = 3 BATCH_SIZE = 32 PAD_ID = 1 SEED = 88888 LABEL_SMOOTHING = 0.1 tf.random.set_seed(SEED) np.random.seed(SEED) sentiment_id = {'positive': 1313, 'negative': 2430, 'neutral': 7974} train = pd.read_csv('.. /input/tweet-sentiment-extraction/train.csv' ).fillna('') for k in range(train.shape[0]): train.loc[k,'text'] = re.sub(r"(http|https|ftp|ftps)\:\/\/[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\/\S*)?", " ", train.loc[k,'text']) train.loc[k,'text'] = re.sub(r'https?://\S+', ' ', train.loc[k,'text']) train.loc[k,'text'] = re.sub(r"(www.[a-z.\/0-9]*)", " ", train.loc[k,'text']) train.loc[k,'text'] = re.sub(' +', ' ', train.loc[k,'text']) train.loc[k,'selected_text'] = re.sub(r"(http|https|ftp|ftps)\:\/\/[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\/\S*)?", " ", train.loc[k,'selected_text']) train.loc[k,'selected_text'] = re.sub(r'https?://\S+', ' ', train.loc[k,'selected_text']) train.loc[k,'selected_text'] = re.sub(r"(www.[a-z.\/0-9]*)", " ", train.loc[k,'selected_text']) train.loc[k,'selected_text'] = re.sub(' +', ' ', train.loc[k,'selected_text']) train.head()<feature_engineering>
print("-------------------------------------------------------------------") print('my model') history = model.fit_generator(augmented_dataset.flow(X_train,y_train,batch_size=batchsize), epochs=epochs, validation_data=(X_test,y_test), steps_per_epoch=X_train.shape[0]//batchsize, callbacks=[learning_rate_reduction]) print("OuO") fig, ax = plt.subplots(2,1) ax[0].plot(history.history['loss'], color='b', label="Training loss") legend = ax[0].legend(loc='best', shadow=True) ax[1].plot(history.history['accuracy'], color='b', label="Training accuracy") legend = ax[1].legend(loc='best', shadow=True )
Digit Recognizer
11,054,002
test = pd.read_csv('.. /input/tweet-sentiment-extraction/test.csv' ).fillna('') for k in range(test.shape[0]): test.loc[k,'text'] = re.sub(r"(http|https|ftp|ftps)\:\/\/[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\/\S*)?", " ", test.loc[k,'text']) test.loc[k,'text'] = re.sub(r'https?://\S+', ' ', test.loc[k,'text']) test.loc[k,'text'] = re.sub(r"(www.[a-z.\/0-9]*)", " ", test.loc[k,'text']) test.loc[k,'text'] = re.sub(' +', ' ', test.loc[k,'text']) ct = test.shape[0] input_ids_t = np.ones(( ct,MAX_LEN),dtype='int32') attention_mask_t = np.zeros(( ct,MAX_LEN),dtype='int32') token_type_ids_t = np.zeros(( ct,MAX_LEN),dtype='int32') for k in range(test.shape[0]): text1 = " "+" ".join(test.loc[k,'text'].split()) enc = tokenizer.encode(text1) s_tok = sentiment_id[test.loc[k,'sentiment']] input_ids_t[k,:len(enc.ids)+3] = [0, s_tok] + enc.ids + [2] attention_mask_t[k,:len(enc.ids)+3] = 1<init_hyperparams>
predictions = model.predict(test) pre_res = np.argmax(predictions,axis = 1) res = pd.Series(pre_res,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),res],axis = 1) submission.to_csv("mnist_submission.csv",index=False )
Digit Recognizer
10,881,722
Dropout_new = 0.15 n_split = 5 lr = 3e-5<train_model>
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") train_data.head()
Digit Recognizer
10,881,722
def save_weights(model, dst_fn): weights = model.get_weights() with open(dst_fn, 'wb')as f: pickle.dump(weights, f) def load_weights(model, weight_fn): with open(weight_fn, 'rb')as f: weights = pickle.load(f) model.set_weights(weights) return model def loss_fn(y_true, y_pred): ll = tf.shape(y_pred)[1] y_true = y_true[:, :ll] loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=LABEL_SMOOTHING) loss = tf.reduce_mean(loss) return loss def build_model() : ids = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32) att = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32) tok = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32) padding = tf.cast(tf.equal(ids, PAD_ID), tf.int32) lens = MAX_LEN - tf.reduce_sum(padding, -1) max_len = tf.reduce_max(lens) ids_ = ids[:, :max_len] att_ = att[:, :max_len] tok_ = tok[:, :max_len] config = RobertaConfig.from_pretrained(PATH+'config-roberta-base.json') bert_model = TFRobertaModel.from_pretrained(PATH+'pretrained-roberta-base.h5',config=config) x = bert_model(ids_,attention_mask=att_,token_type_ids=tok_) x1 = tf.keras.layers.Dropout(Dropout_new )(x[0]) x1 = tf.keras.layers.Conv1D(768, 2,padding='same' )(x1) x1 = tf.keras.layers.LeakyReLU()(x1) x1 = tf.keras.layers.Conv1D(64, 2,padding='same' )(x1) x1 = tf.keras.layers.Dense(1 )(x1) x1 = tf.keras.layers.Flatten()(x1) x1 = tf.keras.layers.Activation('softmax' )(x1) x2 = tf.keras.layers.Dropout(Dropout_new )(x[0]) x2 = tf.keras.layers.Conv1D(768, 2,padding='same' )(x2) x2 = tf.keras.layers.LeakyReLU()(x2) x2 = tf.keras.layers.Conv1D(64, 2, padding='same' )(x2) x2 = tf.keras.layers.Dense(1 )(x2) x2 = tf.keras.layers.Flatten()(x2) x2 = tf.keras.layers.Activation('softmax' )(x2) model = tf.keras.models.Model(inputs=[ids, att, tok], outputs=[x1,x2]) optimizer = tf.keras.optimizers.Adam(learning_rate=lr) model.compile(loss=loss_fn, optimizer=optimizer) x1_padded = tf.pad(x1, [[0, 0], [0, MAX_LEN - max_len]], constant_values=0.) x2_padded = tf.pad(x2, [[0, 0], [0, MAX_LEN - max_len]], constant_values=0.) padded_model = tf.keras.models.Model(inputs=[ids, att, tok], outputs=[x1_padded,x2_padded]) return model, padded_model<string_transform>
pd.get_dummies(train_data["label"] ).values
Digit Recognizer
10,881,722
def jaccard(str1, str2): a = set(str1.lower().split()) b = set(str2.lower().split()) if(len(a)==0)&(len(b)==0): return 0.5 c = a.intersection(b) return float(len(c)) /(len(a)+ len(b)- len(c))<define_variables>
train_data_labels = pd.get_dummies(train_data["label"] ).values train_data_pixels =(train_data.iloc[:,1:].values)/255.0 test_data_pixels =(test_data.values)/255.0
Digit Recognizer
10,881,722
ct = train.shape[0] input_ids = np.ones(( ct,MAX_LEN),dtype='int32') attention_mask = np.zeros(( ct,MAX_LEN),dtype='int32') token_type_ids = np.zeros(( ct,MAX_LEN),dtype='int32') start_tokens = np.zeros(( ct,MAX_LEN),dtype='int32') end_tokens = np.zeros(( ct,MAX_LEN),dtype='int32') for k in range(train.shape[0]): text1 = " "+" ".join(train.loc[k,'text'].split()) text2 = " ".join(train.loc[k,'selected_text'].split()) idx = text1.find(text2) chars = np.zeros(( len(text1))) chars[idx:idx+len(text2)]=1 if text1[idx-1]==' ': chars[idx-1] = 1 enc = tokenizer.encode(text1) offsets = []; idx=0 for t in enc.ids: w = tokenizer.decode([t]) offsets.append(( idx,idx+len(w))) idx += len(w) toks = [] for i,(a,b)in enumerate(offsets): sm = np.sum(chars[a:b]) if sm>0: toks.append(i) s_tok = sentiment_id[train.loc[k,'sentiment']] input_ids[k,:len(enc.ids)+3] = [0, s_tok] + enc.ids + [2] attention_mask[k,:len(enc.ids)+3] = 1 if len(toks)>0: start_tokens[k,toks[0]+2] = 1 end_tokens[k,toks[-1]+2] = 1<define_variables>
x_train,x_test,y_train,y_test = train_test_split(train_data_pixels,train_data_labels,test_size=0.2,random_state=42 )
Digit Recognizer
10,881,722
%%time jac = []; VER='v0'; DISPLAY=1 oof_start = np.zeros(( input_ids.shape[0],MAX_LEN)) oof_end = np.zeros(( input_ids.shape[0],MAX_LEN)) preds_start_train = np.zeros(( input_ids.shape[0],MAX_LEN)) preds_end_train = np.zeros(( input_ids.shape[0],MAX_LEN)) preds_start = np.zeros(( input_ids_t.shape[0],MAX_LEN)) preds_end = np.zeros(( input_ids_t.shape[0],MAX_LEN)) skf = StratifiedKFold(n_splits=n_split,shuffle=True,random_state=SEED) for fold,(idxT,idxV)in enumerate(skf.split(input_ids,train.sentiment.values)) : print(' print(' print(' K.clear_session() model, padded_model = build_model() inpT = [input_ids[idxT,], attention_mask[idxT,], token_type_ids[idxT,]] targetT = [start_tokens[idxT,], end_tokens[idxT,]] inpV = [input_ids[idxV,],attention_mask[idxV,],token_type_ids[idxV,]] targetV = [start_tokens[idxV,], end_tokens[idxV,]] shuffleV = np.int32(sorted(range(len(inpV[0])) , key=lambda k:(inpV[0][k] == PAD_ID ).sum() , reverse=True)) inpV = [arr[shuffleV] for arr in inpV] targetV = [arr[shuffleV] for arr in targetV] weight_fn = '%s-roberta-%i.h5'%(VER,fold) for epoch in range(1, EPOCHS + 1): shuffleT = np.int32(sorted(range(len(inpT[0])) , key=lambda k:(inpT[0][k] == PAD_ID ).sum() + np.random.randint(-3, 3), reverse=True)) num_batches = math.ceil(len(shuffleT)/ BATCH_SIZE) batch_inds = np.random.permutation(num_batches) shuffleT_ = [] for batch_ind in batch_inds: shuffleT_.append(shuffleT[batch_ind * BATCH_SIZE:(batch_ind + 1)* BATCH_SIZE]) shuffleT = np.concatenate(shuffleT_) inpT = [arr[shuffleT] for arr in inpT] targetT = [arr[shuffleT] for arr in targetT] model.fit(inpT, targetT, epochs=epoch, initial_epoch=epoch - 1, batch_size=BATCH_SIZE, verbose=DISPLAY, callbacks=[], validation_data=(inpV, targetV), shuffle=False) save_weights(model, weight_fn) print('Loading model...') load_weights(model, weight_fn) print('Predicting OOF...') oof_start[idxV,],oof_end[idxV,] = padded_model.predict([input_ids[idxV,],attention_mask[idxV,],token_type_ids[idxV,]],verbose=DISPLAY) print('Predicting all Train for Outlier analysis...') preds_train = padded_model.predict([input_ids,attention_mask,token_type_ids],verbose=DISPLAY) preds_start_train += preds_train[0]/skf.n_splits preds_end_train += preds_train[1]/skf.n_splits print('Predicting Test...') preds = padded_model.predict([input_ids_t,attention_mask_t,token_type_ids_t],verbose=DISPLAY) preds_start += preds[0]/skf.n_splits preds_end += preds[1]/skf.n_splits all = [] for k in idxV: a = np.argmax(oof_start[k,]) b = np.argmax(oof_end[k,]) if a>b: st = train.loc[k,'text'] else: text1 = " "+" ".join(train.loc[k,'text'].split()) enc = tokenizer.encode(text1) st = tokenizer.decode(enc.ids[a-2:b-1]) all.append(jaccard(st,train.loc[k,'selected_text'])) jac.append(np.mean(all)) print('>>>> FOLD %i Jaccard ='%(fold+1),np.mean(all)) print()<compute_test_metric>
def build_model() : model = Sequential() model.add(Input(shape=(28,28,1,))) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(Conv2D(filters=64,kernel_size=(6,6),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=128,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(Conv2D(filters=128,kernel_size=(6,6),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(filters=128,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=256,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(Conv2D(filters=256,kernel_size=(6,6),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(filters=256,kernel_size=(3,3),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2),padding="same")) model.add(Dropout(0.25)) model.add(Conv2D(filters=32,kernel_size=(2,2),padding="same")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(SpatialDropout2D(0.25)) model.add(Flatten()) model.add(Dense(256)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10)) model.add(Activation("softmax")) optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer ,metrics=["accuracy"], loss = "categorical_crossentropy") return model
Digit Recognizer
10,881,722
print('>>>> OVERALL 5Fold CV Jaccard =',np.mean(jac)) print(jac )<string_transform>
train_image_generator = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, brightness_range=None, shear_range=0.1, zoom_range=0.1, channel_shift_range=0.0, fill_mode="nearest", cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0, dtype=None, ) train_image_generator.fit(x_train )
Digit Recognizer
10,881,722
all = [] for k in range(input_ids_t.shape[0]): a = np.argmax(preds_start[k,]) b = np.argmax(preds_end[k,]) if a>b: st = test.loc[k,'text'] else: text1 = " "+" ".join(test.loc[k,'text'].split()) enc = tokenizer.encode(text1) st = tokenizer.decode(enc.ids[a-2:b-1]) all.append(st )<save_to_csv>
callback_lr = ReduceLROnPlateau(monitor='val_acc',patience=2,factor=0.5,min_lr=0.00001,verbose=1) epochs = 50 batch_size = 256 history = model.fit_generator(train_image_generator.flow(x_train,y_train, batch_size=batch_size), epochs = epochs, validation_data =(x_test,y_test), verbose = 1, steps_per_epoch=x_train.shape[0] // batch_size , callbacks=[callback_lr] )
Digit Recognizer
10,881,722
test['selected_text'] = all test[['textID','selected_text']].to_csv('submission.csv',index=False) test.sample(10 )<import_modules>
model.fit(x_test,y_test,epochs=10,batch_size=512,validation_data=(x_train,y_train),callbacks=[callback_lr] )
Digit Recognizer
10,881,722
<load_from_csv><EOS>
ypred = model.predict(test_data_pixels) ypred = ypred.argmax(-1) subimageid = [x for x in range(1,len(test_data_pixels)+1)] submission = pd.DataFrame({"ImageId":subimageid,"Label":ypred}) submission.to_csv("submission.csv",index=False )
Digit Recognizer
10,953,593
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams>
%reload_ext autoreload %autoreload 2
Digit Recognizer
10,953,593
lgb_params = { "objective" : "binary", "metric" : "auc", "boosting": 'gbdt', "max_depth" : 1, "num_leaves" : 13, "learning_rate" : 0.03, "bagging_freq": 5, "bagging_fraction" : 0.4, "feature_fraction" : 0.05, "min_data_in_leaf": 80, "min_sum_hessian_in_leaf": 10, "tree_learner": "serial", "boost_from_average": "false", "bagging_seed" : 42, "verbosity" : 1, "seed": 42 }<train_model>
train=pd.read_csv(inputs/"train.csv") train.head(3 )
Digit Recognizer
10,953,593
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=44000) oof = np.zeros(len(train)) predictions = np.zeros(len(test)) for fold_,(trn_idx, val_idx)in enumerate(folds.split(train.values, y.values)) : print("Fold {}".format(fold_)) trn_data = lgb.Dataset(train.iloc[trn_idx], label=y.iloc[trn_idx]) val_data = lgb.Dataset(train.iloc[val_idx], label=y.iloc[val_idx]) num_round = 15000 clf = lgb.train(lgb_params, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=1000, early_stopping_rounds = 200) oof[val_idx] = clf.predict(train.iloc[val_idx], num_iteration=clf.best_iteration) predictions += clf.predict(test, num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(roc_auc_score(y, oof)) )<load_from_csv>
test=pd.read_csv(inputs/"test.csv") test.head(3 )
Digit Recognizer
10,953,593
sub_file = pd.read_csv('/kaggle/input/santander-customer-transaction-prediction/sample_submission.csv' )<merge>
tfms = get_transforms(do_flip=False) tr=Path(".. /train") te=Path(".. /test" )
Digit Recognizer
10,953,593
sub = pd.DataFrame() sub['ID_code'] = test_index sub['target'] = predictions final_sub = pd.merge(sub_file, sub, on='ID_code', how='left')[['ID_code', 'target_y']] final_sub = final_sub.fillna(0 ).rename(columns={'target_y': 'target'}) final_sub<save_to_csv>
sorted(os.listdir(tr))
Digit Recognizer
10,953,593
final_sub.to_csv('Finalsub.csv', index=False )<load_from_csv>
for index, row in train.iterrows() : label,digit = row[0], row[1:] filepath = tr/str(label) filename = f"{index}.jpg" digit = digit.values digit = digit.reshape(28,28) digit = digit.astype(np.uint8) img = Image.fromarray(digit) img.save(filepath/filename)
Digit Recognizer
10,953,593
train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data['Age'].fillna(train_data['Age'].mean() , inplace=True) test_data['Age'].fillna(test_data['Age'].mean() , inplace=True) train_data['Fare'].fillna(train_data['Fare'].mean() , inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean() , inplace=True )<prepare_x_and_y>
for index, digit in test.iterrows() : filepath = te filename = f"{index}.jpg" digit = digit.values digit = digit.reshape(28,28) digit = digit.astype(np.uint8) img = Image.fromarray(digit) img.save(filepath/filename )
Digit Recognizer
10,953,593
features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Fare"] train_y = train_data["Survived"] train_x = pd.get_dummies(train_data[features]) test_x = pd.get_dummies(test_data[features] )<train_model>
data = ImageDataBunch.from_folder(path=".. /train",test=".. /test",ds_tfms=tfms, valid_pct=0.2,bs=32,size=24 ).normalize(imagenet_stats )
Digit Recognizer
10,953,593
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(train_x, train_y) test_y = model.predict(test_x )<save_to_csv>
data.show_batch(rows=3 ,figsize=(5,5))
Digit Recognizer
10,953,593
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': test_y}) output.to_csv('submission.csv', index=False )<import_modules>
learn = cnn_learner(data, models.resnet50, metrics=accuracy, model_dir = Path('.. /kaggle/input/ResNet-50'), callback_fns=ShowGraph )
Digit Recognizer
10,953,593
import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import VotingClassifier<load_from_csv>
learn.fit_one_cycle(5 )
Digit Recognizer
10,953,593
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv' )<count_missing_values>
learn.save("501" )
Digit Recognizer
10,953,593
missing_values = train_data.isna().any() print('Columns which have missing values: {0}'.format(missing_values[missing_values == True].index.tolist()))<count_missing_values>
learn.unfreeze() learn.fit_one_cycle(5,max_lr=slice(1e-3,1e-1))
Digit Recognizer
10,953,593
print("Percentage of missing values in `Age` column: {0:.2f}".format(100.*(train_data.Age.isna().sum() /len(train_data)))) print("Percentage of missing values in `Cabin` column: {0:.2f}".format(100.*(train_data.Cabin.isna().sum() /len(train_data)))) print("Percentage of missing values in `Embarked` column: {0:.2f}".format(100.*(train_data.Embarked.isna().sum() /len(train_data))))<count_duplicates>
learn.save("502" )
Digit Recognizer
10,953,593
duplicates = train_data.duplicated().sum() print('Duplicates in train data: {0}'.format(duplicates))<count_unique_values>
class_score,y=learn.get_preds(DatasetType.Test )
Digit Recognizer
10,953,593
categorical = train_data.nunique().sort_values(ascending=True) print('Categorical variables in train data: {0}'.format(categorical))<drop_column>
probs= class_score[0].tolist() [f"{index}: {probs[index]}" for index in range(len(probs)) ]
Digit Recognizer
10,953,593
for data in [train_data, test_data]: data.drop(['Cabin'], axis=1, inplace=True) data.drop(['Ticket', 'Fare'], axis=1, inplace=True )<drop_column>
class_score=np.argmax(class_score,axis=1 )
Digit Recognizer
10,953,593
for data in [train_data, test_data]: data['Title'] = data.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() data['Woman_Or_Boy'] =(data.Title == 'Master')|(data.Sex == 'female') data.drop('Title', axis=1, inplace=True) data.drop('Name', axis=1, inplace=True )<categorify>
samplesub=pd.read_csv(inputs/"sample_submission.csv") samplesub.head()
Digit Recognizer
10,953,593
label_encoder = LabelEncoder() for data in [train_data, test_data]: data['Sex'] = label_encoder.fit_transform(data['Sex']) data['Woman_Or_Boy'] = label_encoder.fit_transform(data['Woman_Or_Boy'] )<categorify>
ImageId = [os.path.splitext(path)[0] for path in os.listdir(te)] ImageId = [int(path)for path in ImageId] ImageId = [ID+1 for ID in ImageId] ImageId[:5]
Digit Recognizer