kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,757,999
clf_mlp = MLPClassifier(activation = "logistic", hidden_layer_sizes=(200,), random_state=0) param_grid = { 'batch_size' : [batchsize] , 'max_iter': [600], 'alpha': [1e-4], 'solver': ['sgd'], 'learning_rate_init': [0.05,0.06],'tol': [1e-4] } GridCV_MLP = GridSearchCV(clf_mlp, param_grid, verbose=1, cv=3) GridCV_MLP.fit(X_train,y_train) score_grid_MLP = get_best_score(GridCV_MLP )<predict_on_test>
x_train = train_dataset.map(lambda image, label : image) train_preds = model.predict(x_train) train_preds = np.argmax(train_preds, axis=-1) print(classification_report(y_train, train_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
11,757,999
pred_val_mlp = GridCV_MLP.predict(X_val )<compute_test_metric>
x_valid = valid_dataset.map(lambda image, label : image) valid_preds = model.predict(x_valid) valid_preds = np.argmax(valid_preds, axis=-1) print(classification_report(y_valid, valid_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
11,757,999
acc_mlp = print_validation_report(y_val, pred_val_mlp )<categorify>
x_train_samp, y_train_samp = dataset_to_numpy_util(train_dataset, 9) train_samp_preds = model.predict(x_train_samp, batch_size=9) display_9_images_with_predictions(x_train_samp, train_samp_preds, y_train_samp )
Petals to the Metal - Flower Classification on TPU
11,757,999
y_train = to_categorical(y_train, 10) y_val_10 = to_categorical(y_val, 10 )<choose_model_class>
x_valid_samp, y_valid_samp = dataset_to_numpy_util(valid_dataset, 9) valid_samp_preds = model.predict(x_valid_samp, batch_size=9) display_9_images_with_predictions(x_valid_samp, valid_samp_preds, y_valid_samp )
Petals to the Metal - Flower Classification on TPU
11,757,999
def dense_model_0() : model = Sequential() model.add(Dense(10, input_dim=784, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model<train_model>
x_test = test_dataset.map(lambda image, idnum: image) test_preds = model.predict(x_test) test_preds = np.argmax(test_preds, axis=-1 )
Petals to the Metal - Flower Classification on TPU
11,757,999
model_dense_0.fit(X_train, y_train, epochs=50, batch_size=batchsize )<predict_on_test>
test_ids_ds = test_dataset.map(lambda image, idnum:idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') submission = pd.DataFrame(test_ids, columns=['id']) submission['label'] = test_preds submission.to_csv('submission.csv', index=False) display(submission.head(10))
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_dense0 = model_dense_0.predict_classes(X_val )<compute_test_metric>
import re import math import numpy as np import seaborn as sns from kaggle_datasets import KaggleDatasets from matplotlib import pyplot as plt from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.callbacks import LearningRateScheduler from tensorflow.keras.metrics import TruePositives, FalsePositives, FalseNegatives
Petals to the Metal - Flower Classification on TPU
11,704,220
acc_fc0 = print_validation_report(y_val, pred_val_dense0 )<choose_model_class>
!pip install efficientnet
Petals to the Metal - Flower Classification on TPU
11,704,220
def dense_model_1() : model = Sequential() model.add(Dense(100, input_dim=784, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model<train_model>
AUTO = tf.data.experimental.AUTOTUNE AUTO
Petals to the Metal - Flower Classification on TPU
11,704,220
history_dense_1 = model_dense_1.fit(X_train, y_train, validation_data=(X_val,y_val_10), epochs=50, batch_size=batchsize )<compute_train_metric>
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu )
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_dense1 = model_dense_1.predict_classes(X_val) plot_confusion_matrix(y_val, pred_val_dense1) print(classification_report(y_val, pred_val_dense1)) acc_fc1 = accuracy_score(y_val, pred_val_dense1) print(acc_fc1 )<choose_model_class>
IMAGE_SIZE = [512, 512] EPOCHS = 30 LEARNING_RATE = 1e-3 TTA_NUM = 5 BATCH_SIZE = 32 * strategy.num_replicas_in_sync print("Batch size used: ", BATCH_SIZE)
Petals to the Metal - Flower Classification on TPU
11,704,220
def dense_model_2() : model = Sequential() model.add(Dense(100, input_dim=784, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model<train_model>
GCS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started') GCS_PATH_SELECT = { 192: GCS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_PATH + '/tfrecords-jpeg-512x512' } GCS_FPATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
Petals to the Metal - Flower Classification on TPU
11,704,220
history_dense_2 = model_dense_2.fit(X_train, y_train, validation_data=(X_val,y_val_10), epochs=50, batch_size=batchsize )<compute_train_metric>
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_FPATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_FPATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_FPATH + '/test/*.tfrec' )
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_dense2 = model_dense_2.predict_classes(X_val) plot_confusion_matrix(y_val, pred_val_dense2) print(classification_report(y_val, pred_val_dense2)) acc_fc2 = accuracy_score(y_val, pred_val_dense2) print(acc_fc2 )<choose_model_class>
SKIP_VALIDATION = True if SKIP_VALIDATION: TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES
Petals to the Metal - Flower Classification on TPU
11,704,220
def dense_model_3() : model = Sequential() model.add(Dense(100, activation='relu', input_dim=784)) model.add(Dense(200, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model<train_model>
CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose'] print(f"No of Flower classes in dataset: {len(CLASSES)}" )
Petals to the Metal - Flower Classification on TPU
11,704,220
history_dense_3 = model_dense_3.fit(X_train, y_train, validation_data=(X_val,y_val_10), epochs=50, batch_size=batchsize )<compute_train_metric>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.7 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_dense3 = model_dense_3.predict_classes(X_val) plot_confusion_matrix(y_val, pred_val_dense3) print(classification_report(y_val, pred_val_dense3)) acc_fc3 = accuracy_score(y_val, pred_val_dense3) print(acc_fc3 )<prepare_x_and_y>
def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show()
Petals to the Metal - Flower Classification on TPU
11,704,220
X_train = X_train.values.reshape(X_train.shape[0], img_rows, img_cols, 1) X_val = X_val.values.reshape(X_val.shape[0], img_rows, img_cols, 1) input_shape =(img_rows, img_cols, 1 )<define_variables>
def dataset_to_numpy_util(dataset, N): dataset = dataset.unbatch().batch(N) for images, labels in dataset: numpy_images = images.numpy() numpy_labels = labels.numpy() break; return numpy_images, numpy_labels
Petals to the Metal - Flower Classification on TPU
11,704,220
batchsize = 128 epochs = 12<feature_engineering>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def data_augment(image, label, seed=2020): image = tf.image.random_flip_left_right(image, seed=seed) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_train_valid_datasets() : dataset = load_dataset(TRAINING_FILENAMES + VALIDATION_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
11,704,220
activation = 'relu' adadelta = Adadelta() loss = categorical_crossentropy<choose_model_class>
models = [] histories = []
Petals to the Metal - Flower Classification on TPU
11,704,220
def cnn_model_1(activation): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation=activation, input_shape=input_shape)) model.add(Conv2D(64,(3, 3), activation=activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation=activation)) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss=loss, optimizer=adadelta, metrics=['accuracy']) return model<find_best_params>
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH =(NUM_TRAINING_IMAGES + NUM_VALIDATION_IMAGES)// BATCH_SIZE print('Dataset: {} training images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES+NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
11,704,220
model_cnn_1 = cnn_model_1(activation) model_cnn_1.summary()<train_model>
def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_training_dataset_preview(ordered=True): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def dataset_to_numpy_util(dataset, N): dataset = dataset.unbatch().batch(N) for images, labels in dataset: numpy_images = images.numpy() numpy_labels = labels.numpy() break; return numpy_images, numpy_labels
Petals to the Metal - Flower Classification on TPU
11,704,220
history_cnn_1 = model_cnn_1.fit(X_train, y_train, validation_data=(X_val,y_val_10), epochs=epochs, batch_size=batchsize, verbose=1 )<compute_train_metric>
train_dataset = get_training_dataset_preview(ordered=True) y_train = next(iter(train_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy() print('Number of training images %d' % NUM_TRAINING_IMAGES )
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_cnn1 = model_cnn_1.predict_classes(X_val) plot_confusion_matrix(y_val, pred_val_cnn1) print(classification_report(y_val, pred_val_cnn1)) acc_cnn1 = accuracy_score(y_val, pred_val_cnn1) print(acc_cnn1) <define_variables>
display_batch_of_images(next(iter(train_dataset.unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
11,704,220
batch_size=90 epochs=30 <choose_model_class>
test_dataset = get_test_dataset() test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset )
Petals to the Metal - Flower Classification on TPU
11,704,220
def cnn_model_2(optimizer,loss): model = Sequential() model.add(Conv2D(32,(3, 3), padding = 'Same', activation="relu", input_shape=input_shape)) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Conv2D(32,(3, 3), activation="relu")) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Flatten()) model.add(Dense(256, activation=activation)) model.add(Dense(10, activation='softmax')) model.compile(optimizer = optimizer, loss = loss, metrics = ['accuracy']) return model<compute_train_metric>
display_batch_of_images(next(test_batch))
Petals to the Metal - Flower Classification on TPU
11,704,220
model_cnn_2 = cnn_model_2(adadelta, categorical_crossentropy) model_cnn_2.summary()<train_model>
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear/ 180. c1 = tf.math.cos(rotation) c2 = tf.math.sin(rotation) one = tf.constant([1], dtype='float32') zero = tf.constant([0], dtype='float32') rotation_mat = tf.reshape(tf.concat([c1, s1, zero, -s1, c1, zero, \ zero, zero, one], axis=0), [3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_mat = tf.reshape(tf.concat([one, s2, zero, zero, c2, \ zero, zero, zero, one], axis=0), [3,3]) zoom_mat = tf.reshape(tf.concat([one/height_zoom, zero, zero, zero, \ oneb/width_zoom, zero, zero, zero, one], axis=0), [3,3]) shift_mat = tf.reshape(tf.concat([one_zero, height_shift, zero, one, width_shift, zero, \ zero, one], axis=0), [3,3]) return K.dot(K.dot(rotation_mat, shear_mat), K.dot(zoom_mat, shift_mat))
Petals to the Metal - Flower Classification on TPU
11,704,220
history_cnn_2 = model_cnn_2.fit(X_train, y_train, validation_data=(X_val,y_val_10), epochs=epochs, batch_size=batchsize, verbose=1 )<compute_train_metric>
def transform(image, label): DIM = 512 XIM = DIM%2 rot = 15.* tf.random.normal([1], dtype='float32') shr = 5.*tf.random.normal([1], dtype='float32') h_zoom = 1.0 + tf.random.normal([1], dtype='float32')/ 1.0 w_zoom = 1.0 + tf.random.normal([1], dtype='float32')/1.0 h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]), label
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_val_cnn2 = model_cnn_2.predict_classes(X_val) plot_confusion_matrix(y_val, pred_val_cnn2) print(classification_report(y_val, pred_val_cnn2)) acc_cnn2 = accuracy_score(y_val, pred_val_cnn2) print(acc_cnn2 )<load_from_csv>
with strategy.scope() : enet = efn.EfficientNetB3( input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3), weights='imagenet', include_top=False ) enet.trainable = True model1 = tf.keras.Sequential([ enet, tf.keras.layers.GlobalMaxPooling2D(name="Layer1"), tf.keras.layers.Dropout(0.) , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) model1.compile( optimizer=tf.keras.optimizers.Adam(lr=0.0001), loss = 'sparse_categorical_crossentropy', metrics = "sparse_categorical_accuracy" ) model1.summary() models.append(model1 )
Petals to the Metal - Flower Classification on TPU
11,704,220
sample_submission = pd.read_csv('.. /input/sample_submission.csv') if mode == "edit" : X = X[:nr_samples//2] y = y[:nr_samples//2] X_test = X_test[:nr_samples//2] sample_submission = sample_submission[:nr_samples//2]<train_on_grid>
tf.keras.utils.plot_model( model1, to_file='model.png', show_shapes=True, show_layer_names=True, )
Petals to the Metal - Flower Classification on TPU
11,704,220
print(GridCV_Perceptron.best_params_) GridCV_Perceptron.best_estimator_.fit(X,y )<save_to_csv>
Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"Enet_model.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True,mode='max') train_history1 = model1.fit( get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, Checkpoint, keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=1e-2, patience=2, verbose=1, )], ) histories.append(train_history1)
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_test_perc = GridCV_Perceptron.best_estimator_.predict(X_test) result_perc = pd.DataFrame({'ImageId':sample_submission.ImageId, 'Label':pred_test_perc}) result_perc.to_csv("subm_perc.csv",index=False )<train_on_grid>
with strategy.scope() : densenet = tf.keras.applications.DenseNet201(input_shape=[*IMAGE_SIZE, 3], weights='imagenet', include_top=False) densenet.trainable = True model2 = tf.keras.Sequential([ densenet, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) model2.compile( optimizer=tf.keras.optimizers.Adam() , loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) model2.summary()
Petals to the Metal - Flower Classification on TPU
11,704,220
print(GridCV_LR.best_params_) GridCV_LR.best_estimator_.fit(X,y )<save_to_csv>
tf.keras.utils.plot_model( model1, to_file='model.png', show_shapes=True, show_layer_names=True, )
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_test_lr = GridCV_LR.best_estimator_.predict(X_test) result_lr = pd.DataFrame({'ImageId':sample_submission.ImageId, 'Label':pred_test_lr}) result_lr.to_csv("subm_lr.csv",index=False )<train_model>
Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"Dnet_model.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True,mode='max') train_history2 = model2.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=30, callbacks = [lr_callback, Checkpoint, keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=1e-2, patience=2, verbose=1, )]) histories.append(train_history2 )
Petals to the Metal - Flower Classification on TPU
11,704,220
clf_knn.fit(X,y )<save_to_csv>
if not SKIP_VALIDATION: cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): cm_probabilities = alpha*m1+(1-alpha)*m2 cm_predictions = np.argmax(cm_probabilities, axis=-1) scores.append(f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_test_knn = clf_knn.predict(X_test) result_knn = pd.DataFrame({'ImageId':sample_submission.ImageId, 'Label':pred_test_knn}) result_knn.to_csv("subm_knn.csv",index=False )<train_on_grid>
if not SKIP_VALIDATION: cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels=range(len(CLASSES))) score = f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') precision = precision_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') recall = recall_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') display_confusion_matrix(cmat, score, precision, recall) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall))
Petals to the Metal - Flower Classification on TPU
11,704,220
print(GridCV_RF.best_params_) GridCV_RF.best_estimator_.fit(X,y )<save_to_csv>
def predict_tta(model, n_iter): probs = [] for i in range(n_iter): test_ds = get_test_dataset(ordered=True) test_images_ds = test_ds.map(lambda image, idnum: image) probs.append(model.predict(test_images_ds,verbose=0)) return probs
Petals to the Metal - Flower Classification on TPU
11,704,220
pred_test_rf = GridCV_RF.best_estimator_.predict(X_test) result_rf = pd.DataFrame({'ImageId':sample_submission.ImageId, 'Label':pred_test_rf}) result_rf.to_csv("subm_rf.csv",index=False )<train_model>
test_ds = get_test_dataset(ordered=True) print('Calculating predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probs1 = np.mean(predict_tta(model1, TTA_NUM), axis=0) probs2 = np.mean(predict_tta(model2, TTA_NUM), axis=0) probabilities = best_alpha*probs1 +(1-best_alpha)*probs2 predictions = np.argmax(probabilities, axis=-1 )
Petals to the Metal - Flower Classification on TPU
11,704,220
<save_to_csv><EOS>
print('Generating submission file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='')
Petals to the Metal - Flower Classification on TPU
11,043,234
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<normalization>
warnings.filterwarnings('ignore') os.environ["WANDB_API_KEY"] = "0"
Contradictory, My Dear Watson
11,043,234
testforfilna =preprocessing.scale(testforfilna )<feature_engineering>
def Init_TPU() : try: resolver = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) REPLICAS = strategy.num_replicas_in_sync print("Connected to TPU Successfully: TPUs Initialised with Replicas:",REPLICAS) return strategy except ValueError: print("Connection to TPU Falied") print("Using default strategy for CPU and single GPU") strategy = tf.distribute.get_strategy() return strategy strategy=Init_TPU()
Contradictory, My Dear Watson
11,043,234
data["FamSize"] = data.SibSp +data.Parch data['Alone'] = data.FamSize == 0 data.Alone data_test["FamSize"] = data_test.SibSp +data_test.Parch data_test['Alone'] = data_test.FamSize == 0<drop_column>
path = '.. /input/contradictory-my-dear-watson/'
Contradictory, My Dear Watson
11,043,234
data.drop("AgeGroup", axis = 1, inplace =True) data_test.drop("AgeGroup", axis = 1, inplace =True )<count_missing_values>
train_url = os.path.join(path,'train.csv') train_data = pd.read_csv(train_url, header='infer') sample_sub_url = os.path.join(path,'sample_submission.csv') sample_sub = pd.read_csv(sample_sub_url, header='infer') test_url = os.path.join(path,'test.csv') test_data = pd.read_csv(test_url, header='infer' )
Contradictory, My Dear Watson
11,043,234
data.isna().sum()<count_missing_values>
print("Records per Label: ", train_data.groupby('label' ).size() )
Contradictory, My Dear Watson
11,043,234
data_test.isna().sum()<find_best_model_class>
print("Records per Language: ", train_data.groupby('language' ).size() )
Contradictory, My Dear Watson
11,043,234
lr = LogisticRegression(C=2) lr.fit(data.drop('Survived', axis=1), data.Survived) kf = KFold(n_splits=10,shuffle=True,random_state=42) scores = cross_val_score(lr,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scores.mean() <drop_column>
gc.collect()
Contradictory, My Dear Watson
11,043,234
data_test.drop(['Status_Col', 'Status_Dona', 'Status_Dr'], axis= 1, inplace=True )<train_on_grid>
transformer_model = 'jplu/tf-xlm-roberta-large' tokenizer = XLMRobertaTokenizer.from_pretrained(transformer_model )
Contradictory, My Dear Watson
11,043,234
KNN = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=7, p=2, weights='uniform') KNN.fit(data.drop('Survived', axis=1), data.Survived) scoresKNN = cross_val_score(KNN,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scoresKNN.mean() <train_on_grid>
tokenizer.convert_tokens_to_ids(list(tokenizer.tokenize("Elementary, My Dear Watson!")) )
Contradictory, My Dear Watson
11,043,234
tr = DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=5, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort=False, random_state=None, splitter='best') tr.fit(data.drop('Survived', axis=1), data.Survived) scorestr = cross_val_score(tr,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scorestr.mean()<train_on_grid>
train = train_data[['premise','hypothesis']].values.tolist() test = test_data[['premise','hypothesis']].values.tolist()
Contradictory, My Dear Watson
11,043,234
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=5, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=20, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False) rf.fit(data.drop('Survived', axis=1), data.Survived) scoresrf = cross_val_score(rf,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scoresrf.mean()<train_on_grid>
max_len = 80 train_encode = tokenizer.batch_encode_plus(train, pad_to_max_length=True, max_length=max_len) test_encode = tokenizer.batch_encode_plus(test, pad_to_max_length=True, max_length=max_len )
Contradictory, My Dear Watson
11,043,234
gr = GradientBoostingClassifier(learning_rate=0.1, n_estimators=250,max_depth=10) gr.fit(data.drop('Survived', axis=1), data.Survived) scores_gr = cross_val_score(tr,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scores_gr.mean()<train_on_grid>
test_size = 0.1 x_train, x_val, y_train, y_val = train_test_split(train_encode['input_ids'], train_data.label.values, test_size=test_size) x_test = test_encode['input_ids']
Contradictory, My Dear Watson
11,043,234
mlpc = MLPClassifier(max_iter=5000) mlpc.fit(data.drop('Survived', axis=1), data.Survived) scores_mlpc = cross_val_score(mlpc,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scores_mlpc.mean()<drop_column>
gc.collect()
Contradictory, My Dear Watson
11,043,234
data.drop(['Status_OTHER'], inplace=True, axis=1) data_test.drop(['Status_Rev'], inplace=True, axis=1 )<train_on_grid>
AUTO = tf.data.experimental.AUTOTUNE batch_size = 16 * strategy.num_replicas_in_sync train_ds =(tf.data.Dataset.from_tensor_slices(( x_train, y_train)).repeat().shuffle(2048 ).batch(batch_size ).prefetch(AUTO)) val_ds =(tf.data.Dataset.from_tensor_slices(( x_val, y_val)).batch(batch_size ).prefetch(AUTO)) test_ds =(tf.data.Dataset.from_tensor_slices(x_test ).batch(batch_size))
Contradictory, My Dear Watson
11,043,234
XGBC = XGBClassifier(n_estimators=1000, learning_rate=0.05, max_depth=3) XGBC.fit(data.drop('Survived', axis=1), data.Survived); XGBC_scores = cross_val_score(XGBC,data.drop(['Survived'], axis=1), data.Survived, cv = kf); XGBC_scores.mean()<save_to_csv>
gc.collect()
Contradictory, My Dear Watson
11,043,234
preds = XGBC.predict(data_test) submission = pd.DataFrame({ "PassengerId": ID, "Survived": preds }) submission.to_csv('titanic.csv', index=False )<import_modules>
def build_model(strategy,transformer): with strategy.scope() : transformer_encoder = TFXLMRobertaModel.from_pretrained(transformer) input_layer = Input(shape=(max_len,), dtype=tf.int32, name="input_layer") sequence_output = transformer_encoder(input_layer)[0] cls_token = sequence_output[:, 0, :] output_layer = Dense(3, activation='softmax' )(cls_token) model = Model(inputs=input_layer, outputs=output_layer) model.compile( Adam(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) return model model = build_model(strategy,transformer_model )
Contradictory, My Dear Watson
11,043,234
from sklearn.model_selection import GridSearchCV<import_modules>
epochs = 30 n_steps = len(train_data)// batch_size model.fit(train_ds, steps_per_epoch = n_steps, validation_data = val_ds, epochs = epochs )
Contradictory, My Dear Watson
11,043,234
from sklearn.model_selection import GridSearchCV<train_on_grid>
gc.collect()
Contradictory, My Dear Watson
11,043,234
lgbm = LGBMClassifier(n_estimators=1000, random_state=5) lgbm.fit(data.drop('Survived', axis=1), data.Survived) scores_lgbm = cross_val_score(lgbm,data.drop(['Survived'], axis=1), data.Survived, cv = kf) scores_lgbm.mean()<train_on_grid>
prediction = model.predict(test_ds, verbose=0) sample_sub['prediction'] = prediction.argmax(axis=1 )
Contradictory, My Dear Watson
11,043,234
<find_best_params><EOS>
sample_sub.to_csv("submission.csv", index=False )
Contradictory, My Dear Watson
11,016,766
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<train_on_grid>
df_submit = pd.read_csv(".. /input/mberttrain10epochs/submission(1 ).csv")
Contradictory, My Dear Watson
11,016,766
<train_on_grid><EOS>
df_submit.to_csv("submission.csv", index=False) df_submit.head()
Contradictory, My Dear Watson
10,997,559
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<import_modules>
pip install pyspellchecker
Contradictory, My Dear Watson
10,997,559
from sklearn.linear_model import Lasso<import_modules>
import pandas as pd from spellchecker import SpellChecker import regex as re
Contradictory, My Dear Watson
10,997,559
from sklearn.linear_model import Lasso<train_on_grid>
warnings.filterwarnings("ignore" )
Contradictory, My Dear Watson
10,997,559
ls = LogisticRegression(C=0.000002) ls.fit(data.drop('Survived', axis=1), data.Survived) ls.coef_<import_modules>
train = pd.read_csv('.. /input/contradictory-my-dear-watson/train.csv') test = pd.read_csv('.. /input/contradictory-my-dear-watson/test.csv' )
Contradictory, My Dear Watson
10,997,559
import numpy as np import pandas as pd import tensorflow as tf from skimage import io, transform from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score<import_modules>
eng = train.loc[train.language == 'English']
Contradictory, My Dear Watson
10,997,559
from keras.models import Sequential from keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense, BatchNormalization from keras.utils import to_categorical from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import RMSprop from keras.callbacks import ReduceLROnPlateau from keras.datasets import mnist<load_from_csv>
spell = SpellChecker() spell.word_frequency.load_text_file('.. /input/english-words/words_alpha.txt') spell.word_frequency.load_words([''] )
Contradictory, My Dear Watson
10,997,559
train_data = pd.read_csv(".. /input/train.csv") test_data = pd.read_csv(".. /input/test.csv") print(train_data.shape, test_data.shape )<concatenate>
eng['premise_misspelled'] = eng.premise.apply(lambda sentence: tuple(spell.unknown(re.split('[!\:;,.\-\% \b\s() "/$0-9]',re.sub('['\[\]]', '', sentence)))) )
Contradictory, My Dear Watson
10,997,559
( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data() x_train1 = np.concatenate(( x_test1, x_train1)) y_train1 = np.concatenate(( y_test1, y_train1)) x_train1 = x_train1.reshape(( x_train1.shape[0], 28, 28, 1)) print(x_train1.shape, y_train1.shape )<prepare_x_and_y>
misspelled_df = eng.loc[eng.premise_misspelled !=() ] list(misspelled_df.premise_misspelled )
Contradictory, My Dear Watson
10,997,559
x = np.array(train_data.drop(['label'], axis = 1)) y = np.array(train_data['label']) test_data = np.array(test_data) x = x.reshape(( x.shape[0], 28, 28, 1)) test_data = test_data.reshape(test_data.shape[0], 28, 28, 1) x = np.concatenate(( x, x_train1)) y = np.concatenate(( y, y_train1)) x = x/255 test_data = test_data/255 y = to_categorical(y, num_classes = 10) print(x.shape, y.shape )<split>
pd.set_option('display.max_colwidth', -1 )
Contradictory, My Dear Watson
10,997,559
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.10, shuffle = True) print(x_train.shape, y_train.shape, x_test.shape, y_test.shape )<choose_model_class>
err1 = misspelled_df.loc[eng.premise.str.contains('\?\?', case = False)] err2 = misspelled_df.loc[eng.premise.str.contains("\xad", case = False)] print(len(err1), len(err2))
Contradictory, My Dear Watson
10,997,559
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(3,3), activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(3,3), activation ='relu')) model.add(MaxPool2D(( 2,2))) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3), activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3), activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3), activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3), activation ='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(128, activation = "relu")) model.add(Dropout(0.30)) model.add(Dense(10, activation = "softmax")) optimizer = RMSprop(lr = 0.01, rho = 0.9, epsilon = 1e-08, decay = 0.0) model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy']) model.summary()<define_variables>
reduced_1 = pd.concat([misspelled_df, err1, err1] ).drop_duplicates(keep=False) reduced_2 = pd.concat([reduced_1, err2, err2] ).drop_duplicates(keep=False )
Contradictory, My Dear Watson
10,997,559
datagen = ImageDataGenerator( rotation_range = 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1,) train_batch = datagen.flow(x, y, batch_size = 64) val_batch = datagen.flow(x_test, y_test, batch_size = 64 )<choose_model_class>
correction = err1.premise.apply(lambda sentence: re.sub('\?\?', 'e', sentence)) correct_rhone = correction.apply(lambda sentence: re.sub('Rhene', 'Rhone', sentence)) correct_ataturk = correct_rhone.apply(lambda sentence: re.sub('Ataterk', 'Ataturk', sentence)) correct_madrileno = correct_ataturk.apply(lambda sentence: re.sub('Madrileeo', 'Madrileno', sentence)) correct_alacahoyuk = correct_madrileno.apply(lambda sentence: re.sub('Alacaheyek', 'Alacahoyuk', sentence)) correct_alcudia = correct_alacahoyuk.apply(lambda sentence: re.sub('Alcedia', 'Alcudia', sentence)) correction1 = correct_alcudia
Contradictory, My Dear Watson
10,997,559
learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_loss', patience = 3, verbose = 1, factor = 0.1, min_lr = 0.00001 )<train_model>
for i in correction1.index: eng.loc[eng.index == i, 'premise'] = correction1.loc[correction1.index == i]
Contradictory, My Dear Watson
10,997,559
history = model.fit_generator(generator = train_batch, epochs = 100, steps_per_epoch = len(train_batch), validation_data = val_batch, validation_steps = len(val_batch), verbose = 1, callbacks = [learning_rate_reduction] )<save_to_csv>
err2 = eng.loc[eng.premise.str.contains("\?\xad", case = False)] err2
Contradictory, My Dear Watson
10,997,559
res = model.predict_classes(test_data, batch_size = 64) result = pd.Series(res, name = 'Label') submission = pd.concat([pd.Series(range(1, 28001), name = 'ImageId'), result], axis = 1) submission.to_csv('Submission.csv', index = False )<compute_test_metric>
correction2 = err2.premise.apply(lambda sentence: re.sub('\?\xad', '', sentence)) for i in correction2.index: eng.loc[eng.index == i, 'premise'] = correction2.loc[correction2.index == i]
Contradictory, My Dear Watson
10,997,559
res = model.evaluate(x, y, batch_size = 1024) print(res[1]*100 )<set_options>
eng['hypothesis_misspelled'] = eng.hypothesis.apply(lambda sentence: tuple(spell.unknown(re.split('[\?!\:;,.\-\% \b\s() "/$0-9]',re.sub('['\[\]]', '', sentence))))) misspelled_hyp_df = eng.loc[eng.hypothesis_misspelled !=() ] list(misspelled_hyp_df.hypothesis_misspelled )
Contradictory, My Dear Watson
10,997,559
%matplotlib inline<import_modules>
eng.loc[eng.hypothesis.str.contains('Ile de R', case = False)]
Contradictory, My Dear Watson
10,997,559
from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import RMSprop from keras.callbacks import ReduceLROnPlateau from sklearn.model_selection import train_test_split<load_from_csv>
eng.loc[eng.hypothesis == 'Ile de R is no longer part of the attraction.', 'hypothesis'] = 'Ile de Re is no longer part of the attraction.' eng.loc[eng.hypothesis == 'Ile de R.', 'hypothesis'] = 'Ile de Re.'
Contradictory, My Dear Watson
10,997,559
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<prepare_x_and_y>
square_brackets = eng.loc[eng.premise.str.contains('[\[\]]', case = False)] for i in square_brackets.index: eng.loc[eng.index == i, 'premise'] = re.sub('[\[\]]', '', str(eng.loc[eng.index == i].premise.values[0])) ampersands = eng.loc[eng.premise.str.contains('\&amp', case = False)] for i in ampersands.index: eng.loc[eng.index == i, 'premise'] = re.sub('\&amp', ' and ', str(eng.loc[eng.index == i].premise.values[0]))
Contradictory, My Dear Watson
10,997,559
X_train=train.drop('label',axis=1) y_train=train['label']<count_values>
eng.drop(columns=['premise_misspelled', 'hypothesis_misspelled']) for i in eng.index: train.loc[train.index == i] = eng.loc[eng.index == i]
Contradictory, My Dear Watson
10,997,559
sns.countplot(x=y_train) y_train.value_counts()<count_missing_values>
eng_test = test.loc[test.language == 'English']
Contradictory, My Dear Watson
10,997,559
X_train.isnull().any().value_counts()<count_missing_values>
err1 = eng_test.loc[eng_test.premise.str.contains('\?\?', case = False)] correction = err1.premise.apply(lambda sentence: re.sub('\?\?', 'e', sentence)) correct_alacahoyuk = correction.apply(lambda sentence: re.sub('Alacaheyek', 'Alacahoyuk', sentence)) correct_madrileno = correct_alacahoyuk.apply(lambda sentence: re.sub('Madrileeo', 'Madrileno', sentence)) correct_alcudia = correct_madrileno.apply(lambda sentence: re.sub('Alcedia', 'Alcudia', sentence)) correct_ataturk = correct_alcudia.apply(lambda sentence: re.sub('Ataterk', 'Ataturk', sentence)) for i in correct_ataturk.index: eng_test.loc[eng_test.index == i, 'premise'] = correct_ataturk.loc[correct_ataturk.index == i]
Contradictory, My Dear Watson
10,997,559
test.isnull().any().value_counts()<normalization>
err2 = eng_test.loc[eng_test.premise.str.contains('\?\xad', case = False)] correction = err2.premise.apply(lambda sentence: re.sub('\?\xad', '', sentence)) for i in correction.index: eng_test.loc[eng_test.index == i, 'premise'] = correction.loc[correction.index == i]
Contradictory, My Dear Watson
10,997,559
X_train = X_train / 255.0 test = test / 255.0<categorify>
square_brackets = eng_test.loc[eng_test.premise.str.contains('[\[\]]', case = False)] for i in square_brackets.index: eng_test.loc[eng_test.index == i, 'premise'] = re.sub('[\[\]]', '', str(eng_test.loc[eng_test.index == i].premise.values[0]))
Contradictory, My Dear Watson
10,997,559
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )<categorify>
ampersands = eng_test.loc[eng_test.premise.str.contains('\&amp', case = False)] for i in ampersands.index: eng_test.loc[eng_test.index == i, 'premise'] = re.sub('\&amp', ' and ', str(eng_test.loc[eng_test.index == i].premise.values[0]))
Contradictory, My Dear Watson
10,997,559
y_train=to_categorical(y_train,num_classes=10 )<split>
for i in eng_test.index: test.loc[test.index == i] = eng_test.loc[eng_test.index == i]
Contradictory, My Dear Watson
10,997,559
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=42 )<choose_model_class>
train.to_csv('train_cleaned.csv',index=False) test.to_csv('test_cleaned.csv',index=False )
Contradictory, My Dear Watson
10,997,559
model=Sequential() model.add(Conv2D(filters=32,kernel_size=(5,5),padding='Same',activation='relu',input_shape=(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))<choose_model_class>
!pip install git+https://github.com/ssut/py-googletrans.git
Contradictory, My Dear Watson
10,997,559
optimizer = RMSprop() model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"]) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )<init_hyperparams>
from googletrans import Translator from dask import bag, diagnostics import numpy as np
Contradictory, My Dear Watson
10,997,559
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )<define_variables>
def translate(words, dest): dest_choices = ['zh-cn', 'ar', 'fr', 'sw', 'ur', 'vi', 'ru', 'hi', 'el', 'th', 'es', 'de', 'tr', 'bg' ] if not dest: dest = np.random.choice(dest_choices) translator = Translator() decoded = translator.translate(words, dest=dest ).text return decoded def trans_parallel(df, dest): premise_bag = bag.from_sequence(df.premise.tolist() ).map(translate, dest) hypo_bag = bag.from_sequence(df.hypothesis.tolist() ).map(translate, dest) with diagnostics.ProgressBar() : premises = premise_bag.compute() hypos = hypo_bag.compute() df[['premise', 'hypothesis']] = list(zip(premises, hypos)) return df eng_trans = train.loc[train.lang_abv == "en"].copy() \ .pipe(trans_parallel, dest=None) non_eng_trans = train.loc[train.lang_abv != "en"].copy() \ .pipe(trans_parallel, dest='en') eng_trans[['lang_abv', 'language']] = [['mx', 'Mixed']]*len(eng) non_eng_trans[['lang_abv', 'language']] = [['en', 'English']]*len(non_eng_trans) train = train.append([eng_trans, non_eng_trans]) train.reset_index train.shape
Contradictory, My Dear Watson
10,997,559
batch_size=30 nb_train=len(X_train) nb_val=len(X_val )<train_model>
!pip install git+https://github.com/makcedward/nlpaug
Contradictory, My Dear Watson
10,997,559
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = 20,verbose = 1, steps_per_epoch=nb_train // batch_size, validation_data =(X_val,y_val), validation_steps=nb_val//batch_size ,callbacks=[learning_rate_reduction]) model.save('Digit_recognizer.h5') model.save_weights('Weight_file_for_Digit_recognizer.h5' )<predict_on_test>
import nlpaug.augmenter.word as naw import nlpaug.flow as nafc from nlpaug.util import Action
Contradictory, My Dear Watson
10,997,559
prob_pred= model.predict(test) results = np.argmax(prob_pred,axis = 1 )<save_to_csv>
text = train.premise.values[0] model = 'distilbert-base-uncased' ins_aug = naw.ContextualWordEmbsAug( model_path=model, action="insert") print("Original:") print(text) print("Augmented Text:") for i in range(10): augmented_text = ins_aug.augment(text) print(augmented_text )
Contradictory, My Dear Watson
10,997,559
results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("my_submission.csv",index=False )<import_modules>
syn_aug = naw.SynonymAug(aug_src='wordnet') augmented_text = syn_aug.augment(text) print("Original:") print(text) print("Augmented Text:") for i in range(10): augmented_text = syn_aug.augment(text) print(augmented_text )
Contradictory, My Dear Watson
10,997,559
import numpy as np import pandas as pd from scipy.stats import skew from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from mlxtend.regressor import StackingRegressor from sklearn.metrics import mean_squared_error from math import sqrt<load_from_csv>
def insert_augment(words, model): ins_aug = naw.ContextualWordEmbsAug( model_path=model, action="insert") augmented_text = ins_aug.augment(words) return augmented_text def ins_aug_parallel(df, model): premise_bag = bag.from_sequence(df.premise.tolist() ).map(insert_augment, model) hypo_bag = bag.from_sequence(df.hypothesis.tolist() ).map(insert_augment, model) with diagnostics.ProgressBar() : premises = premise_bag.compute() hypos = hypo_bag.compute() df[['premise', 'hypothesis']] = list(zip(premises, hypos)) return df eng_ins_aug = train.loc[train.lang_abv == "en"].copy() \ .pipe(ins_aug_parallel, model='distilbert-base-uncased') train = train.append([eng_ins_aug]) train.to_csv('train_cleaned_ins.csv',index=False)
Contradictory, My Dear Watson
10,997,559
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv' )<drop_column>
def synonym_augment(words): syn_aug = naw.SynonymAug( aug_src = 'wordnet') augmented_text = syn_aug.augment(words) return augmented_text def syn_aug_parallel(df): premise_bag = bag.from_sequence(df.premise.tolist() ).map(synonym_augment) hypo_bag = bag.from_sequence(df.hypothesis.tolist() ).map(synonym_augment) with diagnostics.ProgressBar() : premises = premise_bag.compute() hypos = hypo_bag.compute() df[['premise', 'hypothesis']] = list(zip(premises, hypos)) return df eng_syn_aug = train.loc[train.lang_abv == "en"].copy() \ .pipe(syn_aug_parallel) train = train.append([eng_syn_aug]) train.to_csv('train_cleaned_ins_syn.csv',index=False)
Contradictory, My Dear Watson
10,997,559
df_train = df_train.drop( df_train[(df_train['GrLivArea']>4000)&(df_train['SalePrice']<300000)].index )<concatenate>
plt.style.use('fivethirtyeight') warnings.filterwarnings('ignore')
Contradictory, My Dear Watson
10,997,559
all_data = pd.concat(( df_train.loc[:,'MSSubClass':'SaleCondition'], df_test.loc[:,'MSSubClass':'SaleCondition']))<drop_column>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print('Running on TPU ', tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )
Contradictory, My Dear Watson
10,997,559
all_data = all_data.drop(['Utilities'], axis=1 )<data_type_conversions>
MODEL = 'jplu/tf-xlm-roberta-large' EPOCHS = 8 MAX_LEN = 96 BATCH_SIZE= 16 * strategy.num_replicas_in_sync AUTO = tf.data.experimental.AUTOTUNE
Contradictory, My Dear Watson