kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,377,373
<groupby><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,357,670
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<groupby>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,357,670
all_data.iloc[891:,:].groupby('honorific' ).size()<define_variables>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,357,670
name_map={ "Capt": "Officer", "Col": "Officer", "Major": "Officer", "Jonkheer": "Royalty", "Don": "Royalty", "Sir" : "Royalty", "Dr": "Officer", "Rev": "Officer", "the": "Royalty", "Dona": "Royalty", "Mme": "Mrs", "Mlle": "Miss", "Ms": "Mrs", "Mr" : "Mr", "Mrs" : "Mrs", "Miss" : "Miss", "Master" : "Master", "Lady" : "Royalty"} all_data.honorific.replace(name_map,inplace=True )<groupby>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
13,357,670
all_data.groupby(['honorific','Sex'] ).Survived.agg(['mean','size'] )<groupby>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,357,670
all_data.groupby(['honorific','Sex'] ).Age.mean()<categorify>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,357,670
def newage(cols): title=cols[0] Sex=cols[1] Age=cols[2] if pd.isnull(Age): if title=='Master' and Sex=="male": return 5.48 elif title=='Miss' and Sex=='female': return 21.80 elif title=='Mr' and Sex=='male': return 32.25 elif title=='Mrs' and Sex=='female': return 36.87 elif title=='officer' and Sex=='female': return 49 elif title=='Officer' and Sex=='male': return 46.14 elif title=='Royalty' and Sex=='female': return 40 else: return 42.33 else: return Age<feature_engineering>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model1 = get_model(MobileNet) model1.load_weights("/kaggle/input/rashirdan24-11-start-with-pre-train-mobilenet/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,357,670
all_data.Age = all_data[['honorific','Sex','Age']].apply(newage,axis=1 )<count_values>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model2 = get_model(EfficientNetB1) model2.load_weights("/kaggle/input/rashirdan-2-start-with-pre-train-efficientnetb1/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,357,670
all_data.family_name.value_counts().head(10 )<concatenate>
val_dataset = get_validation_dataset() images_ds = val_dataset.map(lambda image, label: image) labels_ds = val_dataset.map(lambda image, label: label ).unbatch() val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): val_probabilities = alpha*m1+(1-alpha)*m2 val_predictions = np.argmax(val_probabilities, axis=-1) scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
13,357,670
<categorify><EOS>
test_ds = get_test_dataset(ordered=True) best_alpha = 0.48 print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,720,567
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<import_modules>
!pip install efficientnet --quiet
Petals to the Metal - Flower Classification on TPU
12,720,567
from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.model_selection import KFold from lightgbm import LGBMClassifier import lightgbm as lgb import optuna<categorify>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
12,720,567
all_data_one_hot = pd.get_dummies(all_data[['Survived','Pclass','Sex','Age','Fare','Embarked','family_size','Cabin_class', 'Ticket_first_letter','family_survival']], columns=['Ticket_first_letter','Cabin_class','Sex','Pclass','Embarked'] )<choose_model_class>
GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started') print(GCS_DS_PATH )
Petals to the Metal - Flower Classification on TPU
12,720,567
def objective(trial): params = { 'objective': 'binary', 'metric': 'binary_logloss', 'verbosity': -1, 'boosting_type': 'gbdt', 'learning_rate':trial.suggest_loguniform('learning_rate',0.01,0.05), 'num_leaves': trial.suggest_int('num_leaves', 2, 256), 'min_child_samples': trial.suggest_int('min_child_samples', 10, 60), 'max_depth':trial.suggest_int('max_depth',3,8) } kf = KFold(n_splits=5,shuffle=True,random_state=1) accuracy_scores = [] for train_index,valid_index in kf.split(train): X_train, X_valid = train.drop(columns='Survived' ).iloc[train_index],train.drop(columns='Survived' ).iloc[valid_index] y_train, y_valid = train.Survived.iloc[train_index],train.Survived.iloc[valid_index] lgb_train = lgb.Dataset(X_train,y_train) gbm = lgb.train(params,lgb_train) y_pred = np.round(gbm.predict(X_valid)).astype(int) accuracy_scores.append(accuracy_score(y_valid,y_pred)) return np.mean(accuracy_scores) study = optuna.create_study(direction='maximize',sampler=optuna.samplers.RandomSampler(seed=1)) optuna.logging.disable_default_handler() study.optimize(objective, n_trials=100 )<find_best_params>
IMAGE_SIZE = [224, 224] GCS_PATH = GCS_DS_PATH + '/tfrecords-jpeg-224x224' AUTO = tf.data.experimental.AUTOTUNE TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
12,720,567
print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value))<init_hyperparams>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
12,720,567
params = { 'objective': 'binary', 'metric': 'binary_logloss', 'verbosity': -1, 'boosting_type': 'gbdt' } params.update(trial.params )<train_model>
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def transform(image,label): DIM = IMAGE_SIZE[0] XDIM = DIM%2 rot = 15.* tf.random.normal([1],dtype='float32') shr = 5.* tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]),label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(transform, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset NUM_TRAINING_IMAGES = 12753 NUM_VALIDATION_IMAGES = 3712 NUM_TEST_IMAGES = 7382
Petals to the Metal - Flower Classification on TPU
12,720,567
kf = KFold(n_splits=5,shuffle=True,random_state=6) accuracy_scores = [] for train_index,valid_index in kf.split(train): X_train, X_valid = train.drop(columns='Survived' ).iloc[train_index],train.drop(columns='Survived' ).iloc[valid_index] y_train, y_valid = train.Survived.iloc[train_index],train.Survived.iloc[valid_index] lgb_train = lgb.Dataset(X_train,y_train) gbm = lgb.train(params,lgb_train,num_boost_round=100) y_pred = np.round(gbm.predict(X_valid)).astype(int) accuracy_scores.append(accuracy_score(y_valid,y_pred))<create_dataframe>
BATCH_SIZE = 32 * strategy.num_replicas_in_sync ds_train = get_training_dataset() ds_valid = get_validation_dataset() ds_test = get_test_dataset() print("Training:", ds_train) print("Validation:", ds_valid) print("Test:", ds_test )
Petals to the Metal - Flower Classification on TPU
12,720,567
all_train_data = lgb.Dataset(train.drop(columns=['Survived']),label=train.Survived )<train_model>
with strategy.scope() : pretrained_model1 = efficientnet.EfficientNetB7( weights='noisy-student', include_top=False , input_shape=[*IMAGE_SIZE, 3]) pretrained_model1.trainable = True model1 = tf.keras.Sequential([ pretrained_model1, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) pretrained_model2 = tf.keras.applications.Xception( weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE,3]) pretrained_model2.trainable = True model2 = tf.keras.Sequential([ pretrained_model2, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) pretrained_model3 = tf.keras.applications.InceptionResNetV2( weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE,3]) pretrained_model3.trainable = True model3 = tf.keras.Sequential([ pretrained_model3, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) pretrained_model4 = tf.keras.applications.DenseNet201( weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE,3]) pretrained_model4.trainable = True model4 = tf.keras.Sequential([ pretrained_model4, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ])
Petals to the Metal - Flower Classification on TPU
12,720,567
gbm_final = lgb.train(train_set=all_train_data, params = params, num_boost_round=100 )<predict_on_test>
model1.compile( optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], ) model2.compile( optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], ) model3.compile( optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], ) model4.compile( optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], )
Petals to the Metal - Flower Classification on TPU
12,720,567
y_pred = np.round(gbm_final.predict(test.drop(columns=['Survived'])) ).astype(int )<load_from_csv>
def exponential_lr(epoch, start_lr = 0.00001, min_lr = 0.00001, max_lr = 0.00005, rampup_epochs = 5, sustain_epochs = 0, exp_decay = 0.8): def lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay): if epoch < rampup_epochs: lr =(( max_lr - start_lr)/ rampup_epochs * epoch + start_lr) elif epoch < rampup_epochs + sustain_epochs: lr = max_lr else: lr =(( max_lr - min_lr)* exp_decay**(epoch - rampup_epochs - sustain_epochs)+ min_lr) return lr return lr(epoch,start_lr,min_lr,max_lr,rampup_epochs,sustain_epochs,exp_decay) lr_callback = tf.keras.callbacks.LearningRateScheduler(exponential_lr, verbose=True )
Petals to the Metal - Flower Classification on TPU
12,720,567
submission_df = pd.read_csv('.. /input/titanic/gender_submission.csv' )<prepare_output>
EPOCHS = 15 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE history1 = model1.fit( ds_train, validation_data=ds_valid, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback], )
Petals to the Metal - Flower Classification on TPU
12,720,567
submission_df['Survived'] = y_pred<save_to_csv>
history2 = model2.fit( ds_train, validation_data=ds_valid, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback], )
Petals to the Metal - Flower Classification on TPU
12,720,567
JST = timezone(timedelta(hours=+9), 'JST') ts = datetime.now(JST ).strftime('%y%m%d%H%M') submission_df.to_csv(( ts+'lgb.csv'),index=False )<train_model>
history3 = model3.fit( ds_train, validation_data=ds_valid, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback], )
Petals to the Metal - Flower Classification on TPU
12,720,567
kf = KFold(n_splits=5,shuffle=True,random_state=6) mean_accuracy = {} thresh_holds = np.arange(0.2,0.6,0.01) for thresh_hold in thresh_holds: accuracy_scores = [] for train_index,valid_index in kf.split(train): X_train, X_valid = train.drop(columns='Survived' ).iloc[train_index],train.drop(columns='Survived' ).iloc[valid_index] y_train, y_valid = train.Survived.iloc[train_index],train.Survived.iloc[valid_index] lgb_train = lgb.Dataset(X_train,y_train) gbm = lgb.train(params,lgb_train,num_boost_round=100) y_pred = np.where(gbm.predict(X_valid)<thresh_hold,0,1) accuracy_scores.append(accuracy_score(y_valid,y_pred)) mean_accuracy[thresh_hold] = np.mean(accuracy_scores )<install_modules>
history4 = model4.fit( ds_train, validation_data=ds_valid, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback], )
Petals to the Metal - Flower Classification on TPU
12,720,567
<load_from_url>
test_ds = get_test_dataset(ordered=True) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities3 = model3.predict(test_images_ds) probabilities4 = model4.predict(test_images_ds) probabilities = probabilities1+probabilities2+probabilities3+probabilities4 predictions = np.argmax(probabilities, axis=-1) print(predictions )
Petals to the Metal - Flower Classification on TPU
12,720,567
<load_from_csv><EOS>
print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt( 'submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='', ) !head submission.csv
Petals to the Metal - Flower Classification on TPU
12,687,093
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<count_values>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,687,093
hasCabin =(train["Cabin"].notnull().astype('int')) sns.set_style('whitegrid') sns.countplot(x='Survived', hue=hasCabin, data=train) print("Percentage by has Cabin: ", train["Survived"][hasCabin == 1].value_counts(normalize=True)[1] * 100) print("Percentage of hasn't Cabin: ", train["Survived"][hasCabin == 0].value_counts(normalize=True)[1] * 100 )<data_type_conversions>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,687,093
train["Age"].fillna(train["Age"].median() , inplace = True) test["Age"].fillna(test["Age"].median() , inplace = True) train["Fare"].fillna(train["Fare"].median() , inplace = True) test["Fare"].fillna(test["Fare"].median() , inplace = True) train["Embarked"].fillna(( train["Embarked"].append(test["Embarked"])).mode() , inplace = True) test["Embarked"].fillna(( train["Embarked"].append(test["Embarked"])).mode() , inplace = True) drop_column = ["PassengerId","Cabin", "Ticket"] train.drop(drop_column, axis=1, inplace = True) print(train.isnull().sum()) print("-"*10) print(test.isnull().sum() )<feature_engineering>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
12,687,093
train["FamilySize"] = train["SibSp"] + train["Parch"] + 1 test["FamilySize"] = test["SibSp"] + test["Parch"] + 1 train["IsAlone"] = 1 test["IsAlone"] = 1 train["IsAlone"].loc[train["FamilySize"] > 1] = 0 test["IsAlone"].loc[test["FamilySize"] > 1] = 0 train["Title"] = train["Name"].str.split(",", expand = True)[1].str.split(".", expand = True)[0] test["Title"] = test["Name"].str.split(",", expand = True)[1].str.split(".", expand = True)[0] train["FareBin"] = pd.qcut(train["Fare"], 4) test["FareBin"] = pd.qcut(test["Fare"], 4) train["AgeBin"] = pd.cut(train["Age"].astype(int), 5) test["AgeBin"] = pd.cut(test["Age"].astype(int), 5 )<feature_engineering>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
12,687,093
stat_min = 10 title_names =(train["Title"].value_counts() < stat_min) title_names_t =(test["Title"].value_counts() < stat_min) train["Title"] = train["Title"].apply(lambda x: "Misc" if title_names.loc[x] == True else x) test["Title"] = test["Title"].apply(lambda x: "Misc" if title_names_t.loc[x] == True else x) print(train["Title"].value_counts()) print("-"*10) train.info() test.info() train.sample(10 )<categorify>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,687,093
label = preprocessing.LabelEncoder() train["Sex_Code"] = label.fit_transform(train["Sex"]) test["Sex_Code"] = label.fit_transform(test["Sex"]) train["Embarked_Code"] = label.fit_transform(train["Embarked"].astype(str)) test["Embarked_Code"] = label.fit_transform(test["Embarked"].astype(str)) train["Title_Code"] = label.fit_transform(train["Title"]) test["Title_Code"] = label.fit_transform(test["Title"]) train["AgeBin_Code"] = label.fit_transform(train["AgeBin"]) test["AgeBin_Code"] = label.fit_transform(test["AgeBin"]) train["FareBin_Code"] = label.fit_transform(train["FareBin"]) test["FareBin_Code"] = label.fit_transform(test["FareBin"] )<define_variables>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(DenseNet201) model1.load_weights("/kaggle/input/start-with-densenet201/my_densenet_201.h5" )
Petals to the Metal - Flower Classification on TPU
12,687,093
Target = ["Survived"] train_x = ["Sex", "Pclass", "Embarked", "Title", "SibSp", "Parch", "Age", "Fare", "FamilySize", "IsAlone"] train_x_calc = ["Sex_Code", "Pclass", "Embarked_Code", "Title_Code", "SibSp", "Parch", "Age", "Fare"] train_x_bin = ["Sex_Code", "Pclass", "Embarked_Code", "Title_Code", "FamilySize", "AgeBin_Code", "FareBin_Code"]<categorify>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(EfficientNetB7) model2.load_weights("/kaggle/input/start-with-pre-train/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
12,687,093
train_dummy = pd.get_dummies(train[train_x]) train_x_dummy = train_dummy.columns.tolist() train_xy_dummy = Target + train_x_dummy print("Dummy X Y: ", train_xy_dummy, ' ') train_dummy.head()<categorify>
val_dataset = get_validation_dataset() images_ds = val_dataset.map(lambda image, label: image) labels_ds = val_dataset.map(lambda image, label: label ).unbatch() val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): val_probabilities = alpha*m1+(1-alpha)*m2 val_predictions = np.argmax(val_probabilities, axis=-1) scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
12,687,093
test_dummy = pd.get_dummies(test[train_x]) test_x_dummy = train_dummy.columns.tolist() test_xy_dummy = Target + test_x_dummy print("Dummy X Y: ", test_xy_dummy, ' ') test_dummy.head()<split>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
10,264,576
x_train, x_test, y_train, y_test = train_test_split(train[train_x_calc], train[Target], test_size = 0.22, random_state = 0) x_train_bin, x_test_bin, y_train_bin, y_test_bin = train_test_split(train[train_x_bin], train[Target], test_size = 0.22, random_state = 0) x_train_dummy, x_test_dummy, y_train_dummy, y_test_dummy = \ train_test_split(train_dummy[train_x_dummy], train[Target], test_size = 0.22, random_state = 0) print("Original Train Shape: {}".format(train.shape)) print("Train Shape: {}".format(np.asarray(x_train ).shape)) print("Test Shape: {}".format(np.asarray(x_test ).shape))<normalization>
import os import numpy as np import torch from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import pandas as pd from kaggle_datasets import KaggleDatasets import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from PIL import Image, ImageFile import matplotlib.pyplot as plt
Petals to the Metal - Flower Classification on TPU
10,264,576
<train_model>
if 'google.colab' in sys.modules: %tensorflow_version 2.x print("Tensorflow version " + tf.__version__) AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
10,264,576
gaussian = GaussianNB() gaussian.fit(x_train_dummy, y_train_dummy) y_hat_train = gaussian.predict(x_train_dummy) y_hat_test = gaussian.predict(x_test_dummy) acc_gaussian = gaussian.score(x_test_dummy, y_test_dummy) print('Gaussian Naive Bayes train accuracy score: ', gaussian.score(x_train_dummy, y_train_dummy)) print('Gaussian Naive Bayes test accuracy score: ', acc_gaussian) print('Gaussian Naive Bayes train accuracy score: ', round(accuracy_score(y_hat_train, y_train_dummy)* 100, 2)) print('Gaussian Naive Bayes test accuracy score: ', round(accuracy_score(y_hat_test, y_test_dummy)* 100, 2))<train_model>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.cluster_spec().as_dict() ['worker']) except ValueError: tpu = None gpus = tf.config.experimental.list_logical_devices("GPU") if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) elif len(gpus)> 1: strategy = tf.distribute.MirroredStrategy(gpus) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
10,264,576
logreg = LogisticRegression() logreg.fit(x_train_dummy, y_train_dummy) y_hat_train = logreg.predict(x_train_dummy) y_hat_test = logreg.predict(x_test_dummy) acc_logreg = logreg.score(x_test_dummy, y_test_dummy) print('Logistic Regression train accuracy score: ', logreg.score(x_train_dummy, y_train_dummy)) print('Logistic Regression test accuracy score: ', acc_logreg )<compute_train_metric>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
10,264,576
svc = SVC() svc.fit(x_train_dummy, y_train_dummy) y_hat_train = svc.predict(x_train_dummy) y_hat_test = svc.predict(x_test_dummy) cv_score = cross_val_score(svc, x_train_dummy, y_train_dummy, cv=10) plt.plot(cv_score) acc_svc = svc.score(x_test_dummy, y_test_dummy) print('Support Vector Machines train accuracy score: ', svc.score(x_train_dummy, y_train_dummy)) print('Support Vector Machines test accuracy score: ', acc_svc) print('Support Vector Machines cross validation train accuracy score: ', cv_score.mean() )<train_model>
EPOCHS = 50 IMAGE_SIZE = [512, 512] FLOWERS_DATASETS = { 512: 'gs://kds-f0a1db95190f5af9d47fb82f7af36915a50096ee81e54178f8c49016/tfrecords-jpeg-512x512/*/*.tfrec', } assert IMAGE_SIZE[0] == IMAGE_SIZE[1], "only square images are supported" assert IMAGE_SIZE[0] in FLOWERS_DATASETS, "this image size is not supported" MIXED_PRECISION = False if MIXED_PRECISION: if tpu: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') else: policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') tf.config.optimizer.set_jit(True) tf.keras.mixed_precision.experimental.set_policy(policy) print('Mixed precision enabled') if strategy.num_replicas_in_sync == 8: BATCH_SIZE = 16 * strategy.num_replicas_in_sync VALIDATION_BATCH_SIZE = 16 * strategy.num_replicas_in_sync start_lr = 0.00001 min_lr = 0.00001 max_lr = 0.00005 * strategy.num_replicas_in_sync rampup_epochs = 5 sustain_epochs = 0 exp_decay =.8 elif strategy.num_replicas_in_sync == 1: BATCH_SIZE = 16 VALIDATION_BATCH_SIZE = 16 start_lr = 0.00001 min_lr = 0.00001 max_lr = 0.0002 rampup_epochs = 5 sustain_epochs = 0 exp_decay =.8 else: BATCH_SIZE = 8 * strategy.num_replicas_in_sync VALIDATION_BATCH_SIZE = 8 * strategy.num_replicas_in_sync start_lr = 0.00001 min_lr = 0.00001 max_lr = 0.00002 * strategy.num_replicas_in_sync rampup_epochs = 7 sustain_epochs = 0 exp_decay =.8 def lrfn(epoch): def lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay): if epoch < rampup_epochs: lr =(max_lr - start_lr)/rampup_epochs * epoch + start_lr elif epoch < rampup_epochs + sustain_epochs: lr = max_lr else: lr =(max_lr - min_lr)* exp_decay**(epoch-rampup_epochs-sustain_epochs)+ min_lr return lr return lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay) lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lrfn(epoch), verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, [lrfn(x)for x in rng]) print(y[0], y[-1] )
Petals to the Metal - Flower Classification on TPU
10,264,576
linear_svc = LinearSVC() linear_svc.fit(x_train_dummy, y_train_dummy) y_hat_train = linear_svc.predict(x_train_dummy) y_hat_test = linear_svc.predict(x_test_dummy) acc_linear_svc = linear_svc.score(x_test_dummy, y_test_dummy) print('Linear SVC train accuracy score: ', linear_svc.score(x_train_dummy, y_train_dummy)) print('Linear SVC test accuracy score: ', acc_linear_svc )<train_model>
def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
10,264,576
perceptron = Perceptron() perceptron.fit(x_train_dummy, y_train_dummy) y_hat_train = perceptron.predict(x_train_dummy) y_hat_test = perceptron.predict(x_test_dummy) acc_perceptron = perceptron.score(x_test_dummy, y_test_dummy) print('Perceptron train accuracy score: ', perceptron.score(x_train_dummy, y_train_dummy)) print('Perceptron test accuracy score: ', acc_perceptron )<train_model>
TRAIN_FILENAMES = tf.io.gfile.glob(GCS_DS_PATH +'/tfrecords-jpeg-512x512/train/*.tfrec') TRAIN_STEPS = count_data_items(TRAIN_FILENAMES)// BATCH_SIZE NUM_TEST_IMAGES = 7382
Petals to the Metal - Flower Classification on TPU
10,264,576
decisiontree = DecisionTreeClassifier() decisiontree.fit(x_train_dummy, y_train_dummy) y_hat_train = decisiontree.predict(x_train_dummy) y_hat_test = decisiontree.predict(x_test_dummy) acc_decisiontree = decisiontree.score(x_test_dummy, y_test_dummy) print('Decision Tree train accuracy score: ', decisiontree.score(x_train_dummy, y_train_dummy)) print('Decision Tree test accuracy score: ', acc_decisiontree )<train_model>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) return dataset
Petals to the Metal - Flower Classification on TPU
10,264,576
randomforest = RandomForestClassifier() randomforest.fit(x_train_dummy, y_train_dummy) y_hat_train = randomforest.predict(x_train_dummy) y_hat_test = randomforest.predict(x_test_dummy) acc_randomforest = randomforest.score(x_test_dummy, y_test_dummy) print('Random Forest train accuracy score: ', randomforest.score(x_train_dummy, y_train_dummy)) print('Random Forest test accuracy score: ', acc_randomforest )<train_model>
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix))
Petals to the Metal - Flower Classification on TPU
10,264,576
knn = KNeighborsClassifier() knn.fit(x_train_dummy, y_train_dummy) y_hat_train = knn.predict(x_train_dummy) y_hat_test = knn.predict(x_test_dummy) acc_knn = knn.score(x_test_dummy, y_test_dummy) print('KNN train accuracy score: ', knn.score(x_train_dummy, y_train_dummy)) print('KNN test accuracy score: ', acc_knn )<train_model>
def data_transform(image,label): DIM = IMAGE_SIZE[0] XDIM = DIM%2 rot = 15.* tf.random.normal([1],dtype='float32') shr = 5.* tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]),label
Petals to the Metal - Flower Classification on TPU
10,264,576
sgd = SGDClassifier() sgd.fit(x_train_dummy, y_train_dummy) y_hat_train = sgd.predict(x_train_dummy) y_hat_test = sgd.predict(x_test_dummy) acc_sgd = sgd.score(x_test_dummy, y_test_dummy) print('SGD train accuracy score: ', sgd.score(x_train_dummy, y_train_dummy)) print('SGD test accuracy score: ', acc_sgd )<train_model>
def random_blockout(img, sl=0.1, sh=0.2, rl=0.4): p=random.random() if p>=0.25: w, h, c = IMAGE_SIZE[0], IMAGE_SIZE[1], 3 origin_area = tf.cast(h*w, tf.float32) e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32) e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32) e_height_h = tf.minimum(e_size_h, h) e_width_h = tf.minimum(e_size_h, w) erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32) erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32) erase_area = tf.zeros(shape=[erase_height, erase_width, c]) erase_area = tf.cast(erase_area, tf.uint8) pad_h = h - erase_height pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32) pad_bottom = pad_h - pad_top pad_w = w - erase_width pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32) pad_right = pad_w - pad_left erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1) erase_mask = tf.squeeze(erase_mask, axis=0) erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32)) return tf.cast(erased_img, img.dtype) else: return tf.cast(img, img.dtype )
Petals to the Metal - Flower Classification on TPU
10,264,576
gbk = GradientBoostingClassifier() gbk.fit(x_train_dummy, y_train_dummy) y_hat_train = gbk.predict(x_train_dummy) y_hat_test = gbk.predict(x_test_dummy) acc_gbk = gbk.score(x_test_dummy, y_test_dummy) print('SGD train accuracy score: ', gbk.score(x_train_dummy, y_train_dummy)) print('SGD test accuracy score: ', acc_gbk )<train_model>
def get_training_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/train/*.tfrec'), labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.map(data_transform, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/val/*.tfrec'), labeled=True, ordered=False) dataset = dataset.batch(VALIDATION_BATCH_SIZE) dataset = dataset.prefetch(AUTO) opt = tf.data.Options() opt.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA dataset = dataset.with_options(opt) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/test/*.tfrec'), labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) return dataset training_dataset = get_training_dataset() validation_dataset = get_validation_dataset()
Petals to the Metal - Flower Classification on TPU
10,264,576
ada = AdaBoostClassifier() ada.fit(x_train_dummy, y_train_dummy) y_hat_train = ada.predict(x_train_dummy) y_hat_test = ada.predict(x_test_dummy) acc_ada = ada.score(x_test_dummy, y_test_dummy) print('Ada Boost train accuracy score: ', ada.score(x_train_dummy, y_train_dummy)) print('Ada Boost accuracy score: ', acc_ada )<create_dataframe>
def dataset_to_numpy_util(dataset, N): dataset = dataset.unbatch().batch(N) for images, labels in dataset: numpy_images = images.numpy() numpy_labels = labels.numpy() break; return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): label = np.argmax(label, axis=-1) correct_label = np.argmax(correct_label, axis=-1) correct =(label == correct_label) return "{} [{}{}{}]".format(label, str(correct), ', shoud be ' if not correct else '', correct_label if not correct else ''), correct def display_one_flower(image, title, subplot, red=False): plt.subplot(subplot) plt.axis('off') plt.imshow(image) plt.title(title, fontsize=16, color='red' if red else 'black') return subplot+1 def display_9_images_from_dataset(dataset): subplot=331 plt.figure(figsize=(13,13)) images, labels = dataset_to_numpy_util(dataset, 9) for i, image in enumerate(images): title = labels[i] subplot = display_one_flower(image, title, subplot) if i >= 8: break; plt.tight_layout() plt.subplots_adjust(wspace=0.1, hspace=0.1) plt.show()
Petals to the Metal - Flower Classification on TPU
10,264,576
models = pd.DataFrame({ "Model": ["SVM", "KNN", "Logistic Regression", "Random Forest", "Naive Bayes", "Perceptron", "Linear SVC", "Decision Tree", "SGD", "GBK", "Ada Boost"], "Score": [acc_svc, acc_knn, acc_logreg, acc_randomforest, acc_gaussian, acc_perceptron, acc_linear_svc, acc_decisiontree, acc_sgd, acc_gbk, acc_ada] }) models.sort_values(by="Score", ascending=False )<choose_model_class>
display_9_images_from_dataset(training_dataset )
Petals to the Metal - Flower Classification on TPU
10,264,576
eclf_hard = VotingClassifier(estimators = [("DecisionTree", decisiontree), ("GBK", gbk), ("Logistic", logreg), ("Linear SVC", linear_svc), ("Ada Boost", ada), ("KNN", knn), ('SVM', svc)], voting="hard", weights=[6, 2, 1, 0.8, 3.5, 5, 4]) eclf_hard.fit(x_train_dummy, y_train_dummy) print('Emsemble Voting train accuracy score: ', eclf_hard.score(x_train_dummy, y_train_dummy)) print('Emsemble Voting accuracy score: ', eclf_hard.score(x_test_dummy, y_test_dummy))<train_on_grid>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
10,264,576
mlp = MLPClassifier(max_iter=50) parameter_space = { 'hidden_layer_sizes': [(50,50,50),(50,100,50),(100,)], 'activation': ['tanh', 'relu'], 'solver': ['lbfgs', 'adam'], 'alpha': [0.0001, 0.05], 'learning_rate': ['constant','adaptive'], } gridsearch = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3) gridsearch.fit(x_train_dummy, y_train_dummy) print("Best parameters found: ", gridsearch.best_params_) means = gridsearch.cv_results_['mean_test_score'] stds = gridsearch.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, gridsearch.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params)) print('Results on the train set:') print(classification_report(y_train, gridsearch.predict(x_train_dummy))) print('Results on the test set:') print(classification_report(y_test, gridsearch.predict(x_test_dummy))) <train_model>
with strategy.scope() : pretrained_model = efn.EfficientNetB7(input_shape=[*IMAGE_SIZE, 3], weights='noisy-student', include_top=False) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(104, activation='softmax') ]) model.compile( optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
10,264,576
xgbc = XGBClassifier() xgbc.fit(x_train_dummy, y_train_dummy) y_hat_train = xgbc.predict(x_train_dummy) y_hat_test = xgbc.predict(x_test_dummy) print('Xgboost train accuracy score: ', xgbc.score(x_train_dummy, y_train_dummy)) print('Xgboost test accuracy score: ', xgbc.score(x_test_dummy, y_test_dummy)) print('Results on the test set: ') print(classification_report(y_test_dummy, y_hat_test))<save_to_csv>
historical = model.fit(training_dataset, steps_per_epoch=TRAIN_STEPS, epochs= EPOCHS, callbacks=[lr_callback], validation_data=validation_dataset )
Petals to the Metal - Flower Classification on TPU
10,264,576
<save_to_csv><EOS>
test_ds = get_test_dataset(ordered=True) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
14,014,727
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<save_to_csv>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
14,014,727
predictions = eclf_hard.predict(test_dummy) output = pd.DataFrame({ 'PassengerId' : test["PassengerId"], "Survived": predictions }) output.to_csv("ensmble_submission.csv", index=False )<save_to_csv>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
14,014,727
predictions = gridsearch.predict(test_dummy) output = pd.DataFrame({ 'PassengerId' : test["PassengerId"], "Survived": predictions }) output.to_csv("gd_mlp_submission.csv", index=False )<train_model>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
14,014,727
<compute_test_metric>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
14,014,727
<load_from_csv>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
14,014,727
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()<load_from_csv>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(VGG16) model1.load_weights("/kaggle/input/start-with-pre-train-c4af96/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
14,014,727
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()<count_missing_values>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(EfficientNetB7) model2.load_weights("/kaggle/input/more-data-with-efficientnetb7/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
14,014,727
<data_type_conversions><EOS>
test_ds = get_test_dataset(ordered=True) best_alpha = 0.48 print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,959,204
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<drop_column>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,959,204
train_data = train_data.drop(['Cabin'], axis=1) test_data = test_data.drop(['Cabin'], axis=1) train_data = train_data.drop(['Name'], axis=1) test_data = test_data.drop(['Name'], axis=1) train_data = train_data.drop(['Ticket'], axis=1) test_data = test_data.drop(['Ticket'], axis=1) train_data = train_data.drop(['PassengerId'], axis=1) train_data.head()<feature_engineering>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,959,204
mean_value = 29 data = [train_data, test_data] for dataset in data: dataset['Age'] = dataset['Age'].fillna(mean_value) print(train_data['Age'].isnull().sum()) print(test_data['Age'].isnull().sum() )<feature_engineering>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
12,959,204
data = [train_data, test_data] for dataset in data: dataset['Age'] = dataset['Age'].astype(int) dataset.loc[ dataset['Age'] <= 11, 'Age'] = 0 dataset.loc[(dataset['Age'] > 11)&(dataset['Age'] <= 18), 'Age'] = 1 dataset.loc[(dataset['Age'] > 18)&(dataset['Age'] <= 22), 'Age'] = 2 dataset.loc[(dataset['Age'] > 22)&(dataset['Age'] <= 27), 'Age'] = 3 dataset.loc[(dataset['Age'] > 27)&(dataset['Age'] <= 33), 'Age'] = 4 dataset.loc[(dataset['Age'] > 33)&(dataset['Age'] <= 40), 'Age'] = 5 dataset.loc[(dataset['Age'] > 40)&(dataset['Age'] <= 66), 'Age'] = 6 dataset.loc[ dataset['Age'] > 66, 'Age'] = 6 train_data['Age'].value_counts()<data_type_conversions>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
12,959,204
top_value = 'S' data = [train_data, test_data] for dataset in data: dataset['Embarked'] = dataset['Embarked'].fillna(top_value) print(train_data['Embarked'].isnull().sum()) print(test_data['Embarked'].isnull().sum() )<categorify>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,959,204
ports = {"S": 0, "C": 1, "Q": 2} data = [train_data, test_data] for dataset in data: dataset['Embarked'] = dataset['Embarked'].map(ports )<data_type_conversions>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(InceptionV3) model1.load_weights(".. /input/notebook1cf87d6468/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
12,959,204
mean_value = 32 data = [train_data, test_data] for dataset in data: dataset['Fare'] = dataset['Fare'].fillna(mean_value) print(train_data['Fare'].isnull().sum()) print(test_data['Fare'].isnull().sum() )<feature_engineering>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(EfficientNetB7) model2.load_weights(".. /input/fork-of-notebook1cf87d6468-2965ed/this_is_my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
12,959,204
<categorify><EOS>
test_ds = get_test_dataset(ordered=True) best_alpha = 0.48 print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,689,410
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<train_model>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,689,410
X_train = train_data.drop("Survived", axis=1) Y_train = train_data["Survived"] X_test = test_data.drop("PassengerId", axis=1 ).copy() decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) decision_tree.score(X_train, Y_train )<save_to_csv>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0, EfficientNetB1
Petals to the Metal - Flower Classification on TPU
12,689,410
prediction = decision_tree.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': prediction}) output.to_csv('my_submission.csv', index=False) print("Your submission was saved." )<set_options>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,689,410
warnings.filterwarnings("ignore" )<load_from_csv>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started") MORE_IMAGES_GCS_DS_PATH = KaggleDatasets().get_gcs_path('tf-flower-photo-tfrec' )
Petals to the Metal - Flower Classification on TPU
12,689,410
df_train=pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") df_test=pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") df_sub=pd.read_csv(".. /input/covid19-global-forecasting-week-3/submission.csv" )<load_from_csv>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] MOREIMAGES_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') IMAGENET_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/imagenet' + MOREIMAGES_PATH + '/*.tfrec') INATURELIST_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/inaturalist' + MOREIMAGES_PATH + '/*.tfrec') OPENIMAGE_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/openimage' + MOREIMAGES_PATH + '/*.tfrec') TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES + IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES SEED = 2020
Petals to the Metal - Flower Classification on TPU
12,689,410
print(df_train.shape) print(df_test.shape) print(df_sub.shape )<count_unique_values>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = random.randint(1,3) coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,689,410
print(f"Unique Countries: {len(df_train.Country_Region.unique())}") train_dates=list(df_train.Date.unique()) print(f"Period : {len(df_train.Date.unique())} days") print(f"From : {df_train.Date.min() } To : {df_train.Date.max() }" )<count_values>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 8 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,689,410
print(f"Unique Regions: {df_train.shape[0]/75}") df_train.Country_Region.value_counts()<feature_engineering>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB7) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
12,689,410
print(f"Number of rows without Country_Region : {df_train.Country_Region.isna().sum() }") df_train["UniqueRegion"]=df_train.Country_Region df_train.UniqueRegion[df_train.Province_State.isna() ==False]=df_train.Province_State+" , "+df_train.Country_Region df_train[df_train.Province_State.isna() ==False]<drop_column>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
12,689,410
df_train.drop(labels=["Id","Province_State","Country_Region"], axis=1, inplace=True) df_train<define_variables>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
12,689,410
<feature_engineering><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,508,579
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<feature_engineering>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,508,579
%%time final_df=pd.DataFrame(columns=["Date","ConfirmedCases","Fatalities","UniqueRegion"]) for region in df_train.UniqueRegion.unique() : df_temp=df_train[df_train.UniqueRegion==region].reset_index() df_temp["Delta"]=1.0 size_train=df_temp.shape[0] for i in range(1,df_temp.shape[0]): if(df_temp.ConfirmedCases[i-1]>0): df_temp.Delta[i]=df_temp.ConfirmedCases[i]/df_temp.ConfirmedCases[i-1] n=5 delta_avg=df_temp.tail(n ).Delta.mean() delta_list=df_temp.tail(n ).Delta death_rate=df_temp.tail(1 ).Fatalities.sum() /df_temp.tail(1 ).ConfirmedCases.sum() df_test_app=df_test_temp[df_test_temp.UniqueRegion==region] df_test_app=df_test_app[df_test_app.Date>df_temp.Date.max() ] X=np.arange(1,n+1 ).reshape(-1,1) Y=delta_list model=LinearRegression() model.fit(X,Y) df_temp=pd.concat([df_temp,df_test_app]) df_temp=df_temp.reset_index() for i in range(size_train, df_temp.shape[0]): n=n+1 df_temp.Delta[i]=max(1,model.predict(np.array([n] ).reshape(-1,1)) [0]) df_temp.ConfirmedCases[i]=round(df_temp.ConfirmedCases[i-1]*df_temp.Delta[i],0) df_temp.Fatalities[i]=round(death_rate*df_temp.ConfirmedCases[i],0) size_test=df_temp.shape[0]-df_test_temp[df_test_temp.UniqueRegion==region].shape[0] df_temp=df_temp.iloc[size_test:,:] df_temp=df_temp[["Date","ConfirmedCases","Fatalities","UniqueRegion"]] final_df=pd.concat([final_df,df_temp], ignore_index=True) final_df.shape<save_to_csv>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB7
Petals to the Metal - Flower Classification on TPU
12,508,579
df_sub.Fatalities=final_df.Fatalities df_sub.ConfirmedCases=final_df.ConfirmedCases df_sub.to_csv("submission.csv", index=None )<import_modules>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,508,579
print(tf.__version__ )<load_from_csv>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
12,508,579
train_pd = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_pd = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv' )<define_variables>
IMAGE_SIZE = [331, 331] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync NUM_TRAINING_IMAGES = 12753 NUM_TEST_IMAGES = 7382 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
Petals to the Metal - Flower Classification on TPU
12,508,579
no_of_rows, no_of_cols = train_pd.shape<count_missing_values>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) return dataset def get_training_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/train/*.tfrec'), labeled=True) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) return dataset def get_validation_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/val/*.tfrec'), labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() return dataset def get_test_dataset(ordered=False): dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-512x512/test/*.tfrec'), labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) return dataset training_dataset = get_training_dataset() validation_dataset = get_validation_dataset()
Petals to the Metal - Flower Classification on TPU
12,508,579
print("keyword column analysis") print("-" * 30) no_na_keywords, _ = train_pd[train_pd['keyword'].isna() ].shape print(f"number of rows w/ value: NaN: {no_na_keywords} i.e {(no_na_keywords/no_of_rows)*100}%") print(f"number of rows w/0 value: NaN: {no_of_rows - no_na_keywords} i.e {(( no_of_rows-no_na_keywords)/no_of_rows)*100}%") print() print('*' * 80) print() print("location column analysis") print("-" * 30) no_na_location, _ = train_pd[train_pd['location'].isna() ].shape print(f"number of rows w/ value: NaN: {no_na_location} i.e {(no_na_location/no_of_rows)*100}%") print(f"number of rows w/0 value: NaN: {no_of_rows - no_na_location} i.e.{(( no_of_rows-no_na_location)/no_of_rows)*100}%" )<drop_column>
LR_START = 0.00001 LR_MAX = 0.0001 LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 8 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,508,579
train_pd = train_pd.drop(['location'], axis=1) train_pd = train_pd[train_pd['keyword'].notna() ] train_pd = train_pd.reset_index(drop=True) print(train_pd.head()) print(f"Number of rows: {len(train_pd)}" )<drop_column>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB7) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) history = model.fit(training_dataset, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=validation_dataset )
Petals to the Metal - Flower Classification on TPU
12,508,579
test_pd = test_pd.drop(['location'], axis=1) test_pd = test_pd.fillna(value={'keyword': ''}) print(test_pd.head()) print(f"Number of rows: {len(test_pd)}" )<choose_model_class>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
12,508,579
eng_stopwords = set(stopwords.words('english')) stemmer = PorterStemmer()<categorify>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,096,421
def parse_tweet(tweet): tweet = tweet.lstrip().rstrip() tweet = re.sub("http:\S+", "", tweet) tweet = re.sub("https:\S+", "", tweet) tweet = re.sub(r"[,.; tweet = tweet.replace(" ", " ") tweet = tweet.lower() tweet = tweet.replace('...', '...' ).strip() tweet = tweet.replace("'", " ' " ).strip() tweet = re.sub(r"\x89Û_", "", tweet) tweet = re.sub(r"\x89ÛÒ", "", tweet) tweet = re.sub(r"\x89ÛÓ", "", tweet) tweet = re.sub(r"\x89ÛÏWhen", "When", tweet) tweet = re.sub(r"\x89ÛÏ", "", tweet) tweet = re.sub(r"China\x89Ûªs", "China's", tweet) tweet = re.sub(r"let\x89Ûªs", "let's", tweet) tweet = re.sub(r"\x89Û÷", "", tweet) tweet = re.sub(r"\x89Ûª", "", tweet) tweet = re.sub(r"\x89Û\x9d", "", tweet) tweet = re.sub(r"å_", "", tweet) tweet = re.sub(r"\x89Û¢", "", tweet) tweet = re.sub(r"\x89Û¢åÊ", "", tweet) tweet = re.sub(r"fromåÊwounds", "from wounds", tweet) tweet = re.sub(r"åÊ", "", tweet) tweet = re.sub(r"åÈ", "", tweet) tweet = re.sub(r"JapÌ_n", "Japan", tweet) tweet = re.sub(r"Ì©", "e", tweet) tweet = re.sub(r"å¨", "", tweet) tweet = re.sub(r"Surṳ", "Suruc", tweet) tweet = re.sub(r"he's", "he is", tweet) tweet = re.sub(r"there's", "there is", tweet) tweet = re.sub(r"We're", "We are", tweet) tweet = re.sub(r"That's", "That is", tweet) tweet = re.sub(r"won't", "will not", tweet) tweet = re.sub(r"they're", "they are", tweet) tweet = re.sub(r"Can't", "Cannot", tweet) tweet = re.sub(r"wasn't", "was not", tweet) tweet = re.sub(r"don\x89Ûªt", "do not", tweet) tweet = re.sub(r"aren't", "are not", tweet) tweet = re.sub(r"isn't", "is not", tweet) tweet = re.sub(r"What's", "What is", tweet) tweet = re.sub(r"haven't", "have not", tweet) tweet = re.sub(r"hasn't", "has not", tweet) tweet = re.sub(r"There's", "There is", tweet) tweet = re.sub(r"He's", "He is", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"You're", "You are", tweet) tweet = re.sub(r"I'M", "I am", tweet) tweet = re.sub(r"shouldn't", "should not", tweet) tweet = re.sub(r"wouldn't", "would not", tweet) tweet = re.sub(r"i'm", "I am", tweet) tweet = re.sub(r"I\x89Ûªm", "I am", tweet) tweet = re.sub(r"I'm", "I am", tweet) tweet = re.sub(r"Isn't", "is not", tweet) tweet = re.sub(r"Here's", "Here is", tweet) tweet = re.sub(r"you've", "you have", tweet) tweet = re.sub(r"you\x89Ûªve", "you have", tweet) tweet = re.sub(r"we're", "we are", tweet) tweet = re.sub(r"what's", "what is", tweet) tweet = re.sub(r"couldn't", "could not", tweet) tweet = re.sub(r"we've", "we have", tweet) tweet = re.sub(r"it\x89Ûªs", "it is", tweet) tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet) tweet = re.sub(r"It\x89Ûªs", "It is", tweet) tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet) tweet = re.sub(r"who's", "who is", tweet) tweet = re.sub(r"I\x89Ûªve", "I have", tweet) tweet = re.sub(r"y'all", "you all", tweet) tweet = re.sub(r"can\x89Ûªt", "cannot", tweet) tweet = re.sub(r"would've", "would have", tweet) tweet = re.sub(r"it'll", "it will", tweet) tweet = re.sub(r"we'll", "we will", tweet) tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet) tweet = re.sub(r"We've", "We have", tweet) tweet = re.sub(r"he'll", "he will", tweet) tweet = re.sub(r"Y'all", "You all", tweet) tweet = re.sub(r"Weren't", "Were not", tweet) tweet = re.sub(r"Didn't", "Did not", tweet) tweet = re.sub(r"they'll", "they will", tweet) tweet = re.sub(r"they'd", "they would", tweet) tweet = re.sub(r"DON'T", "DO NOT", tweet) tweet = re.sub(r"That\x89Ûªs", "That is", tweet) tweet = re.sub(r"they've", "they have", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"should've", "should have", tweet) tweet = re.sub(r"You\x89Ûªre", "You are", tweet) tweet = re.sub(r"where's", "where is", tweet) tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet) tweet = re.sub(r"we'd", "we would", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"weren't", "were not", tweet) tweet = re.sub(r"They're", "They are", tweet) tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet) tweet = re.sub(r"you\x89Ûªll", "you will", tweet) tweet = re.sub(r"I\x89Ûªd", "I would", tweet) tweet = re.sub(r"let's", "let us", tweet) tweet = re.sub(r"&gt;", ">", tweet) tweet = re.sub(r"&lt;", "<", tweet) tweet = re.sub(r"&amp;", "&", tweet) tweet = re.sub(r"w/e", "whatever", tweet) tweet = re.sub(r"w/", "with", tweet) tweet = re.sub(r"USAgov", "USA government", tweet) tweet = re.sub(r"recentlu", "recently", tweet) tweet = re.sub(r"Ph0tos", "Photos", tweet) tweet = re.sub(r"exp0sed", "exposed", tweet) tweet = re.sub(r"<3", "love", tweet) tweet = re.sub(r"amageddon", "armageddon", tweet) tweet = re.sub(r"Trfc", "Traffic", tweet) tweet = re.sub(r"8/5/2015", "2015-08-05", tweet) tweet = re.sub(r"chest/torso", "chest / torso", tweet) tweet = re.sub(r"WindStorm", "Wind Storm", tweet) tweet = re.sub(r"8/6/2015", "2015-08-06", tweet) tweet = re.sub(r"10:38PM", "10:38 PM", tweet) tweet = re.sub(r"10:30pm", "10:30 PM", tweet) tweet = re.sub(r"MH370:", "MH370 :", tweet) tweet = re.sub(r"PM:", "Prime Minister :", tweet) tweet = re.sub(r"Legionnaires:", "Legionnaires :", tweet) tweet = re.sub(r"Latest:", "Latest :", tweet) tweet = re.sub(r"Crash:", "Crash :", tweet) tweet = re.sub(r"News:", "News :", tweet) tweet = re.sub(r"derailment:", "derailment :", tweet) tweet = re.sub(r"attack:", "attack :", tweet) tweet = re.sub(r"Saipan:", "Saipan :", tweet) tweet = re.sub(r"Photo:", "Photo :", tweet) tweet = re.sub(r"Funtenna:", "Funtenna :", tweet) tweet = re.sub(r"quiz:", "quiz :", tweet) tweet = re.sub(r"VIDEO:", "VIDEO :", tweet) tweet = re.sub(r"MP:", "MP :", tweet) tweet = re.sub(r"UTC2015-08-05", "UTC 2015-08-05", tweet) tweet = re.sub(r"California:", "California :", tweet) tweet = re.sub(r"horror:", "horror :", tweet) tweet = re.sub(r"Past:", "Past :", tweet) tweet = re.sub(r"Time2015-08-06", "Time 2015-08-06", tweet) tweet = re.sub(r"here:", "here :", tweet) tweet = re.sub(r"fires.", "fires.", tweet) tweet = re.sub(r"Forest:", "Forest :", tweet) tweet = re.sub(r"Cramer:", "Cramer :", tweet) tweet = re.sub(r"Chile:", "Chile :", tweet) tweet = re.sub(r"link:", "link :", tweet) tweet = re.sub(r"crash:", "crash :", tweet) tweet = re.sub(r"Video:", "Video :", tweet) tweet = re.sub(r"Bestnaijamade:", "bestnaijamade :", tweet) tweet = re.sub(r"NWS:", "National Weather Service :", tweet) tweet = re.sub(r".caught", ".caught", tweet) tweet = re.sub(r"Hobbit:", "Hobbit :", tweet) tweet = re.sub(r"2015:", "2015 :", tweet) tweet = re.sub(r"post:", "post :", tweet) tweet = re.sub(r"BREAKING:", "BREAKING :", tweet) tweet = re.sub(r"Island:", "Island :", tweet) tweet = re.sub(r"Med:", "Med :", tweet) tweet = re.sub(r"97/Georgia", "97 / Georgia", tweet) tweet = re.sub(r"Here:", "Here :", tweet) tweet = re.sub(r"horror;", "horror ;", tweet) tweet = re.sub(r"people;", "people ;", tweet) tweet = re.sub(r"refugees;", "refugees ;", tweet) tweet = re.sub(r"Genocide;", "Genocide ;", tweet) tweet = re.sub(r".POTUS", ".POTUS", tweet) tweet = re.sub(r"Collision-No", "Collision - No", tweet) tweet = re.sub(r"Rear-", "Rear -", tweet) tweet = re.sub(r"Broadway:", "Broadway :", tweet) tweet = re.sub(r"Correction:", "Correction :", tweet) tweet = re.sub(r"UPDATE:", "UPDATE :", tweet) tweet = re.sub(r"Times:", "Times :", tweet) tweet = re.sub(r"RT:", "RT :", tweet) tweet = re.sub(r"Police:", "Police :", tweet) tweet = re.sub(r"Training:", "Training :", tweet) tweet = re.sub(r"Hawaii:", "Hawaii :", tweet) tweet = re.sub(r"Selfies:", "Selfies :", tweet) tweet = re.sub(r"Content:", "Content :", tweet) tweet = re.sub(r"101:", "101 :", tweet) tweet = re.sub(r"story:", "story :", tweet) tweet = re.sub(r"injured:", "injured :", tweet) tweet = re.sub(r"poll:", "poll :", tweet) tweet = re.sub(r"Guide:", "Guide :", tweet) tweet = re.sub(r"Update:", "Update :", tweet) tweet = re.sub(r"alarm:", "alarm :", tweet) tweet = re.sub(r"floods:", "floods :", tweet) tweet = re.sub(r"Flood:", "Flood :", tweet) tweet = re.sub(r"MH370;", "MH370 ;", tweet) tweet = re.sub(r"life:", "life :", tweet) tweet = re.sub(r"crush:", "crush :", tweet) tweet = re.sub(r"now:", "now :", tweet) tweet = re.sub(r"Vote:", "Vote :", tweet) tweet = re.sub(r"Catastrophe.", "Catastrophe.", tweet) tweet = re.sub(r"library:", "library :", tweet) tweet = re.sub(r"Bush:", "Bush :", tweet) tweet = re.sub(r";ACCIDENT", "; ACCIDENT", tweet) tweet = re.sub(r"accident:", "accident :", tweet) tweet = re.sub(r"Taiwan;", "Taiwan ;", tweet) tweet = re.sub(r"Map:", "Map :", tweet) tweet = re.sub(r"failure:", "failure :", tweet) tweet = re.sub(r"150-Foot", "150 - Foot", tweet) tweet = re.sub(r"failure:", "failure :", tweet) tweet = re.sub(r"prefer:", "prefer :", tweet) tweet = re.sub(r"CNN:", "CNN :", tweet) tweet = re.sub(r"Oops:", "Oops :", tweet) tweet = re.sub(r"Disco:", "Disco :", tweet) tweet = re.sub(r"Disease:", "Disease :", tweet) tweet = re.sub(r"Grows:", "Grows :", tweet) tweet = re.sub(r"projected:", "projected :", tweet) tweet = re.sub(r"Pakistan.", "Pakistan.", tweet) tweet = re.sub(r"ministers:", "ministers :", tweet) tweet = re.sub(r"Photos:", "Photos :", tweet) tweet = re.sub(r"Disease:", "Disease :", tweet) tweet = re.sub(r"pres:", "press :", tweet) tweet = re.sub(r"winds.", "winds.", tweet) tweet = re.sub(r"MPH.", "MPH.", tweet) tweet = re.sub(r"PHOTOS:", "PHOTOS :", tweet) tweet = re.sub(r"Time2015-08-05", "Time 2015-08-05", tweet) tweet = re.sub(r"Denmark:", "Denmark :", tweet) tweet = re.sub(r"Articles:", "Articles :", tweet) tweet = re.sub(r"Crash:", "Crash :", tweet) tweet = re.sub(r"casualties.:", "casualties.:", tweet) tweet = re.sub(r"Afghanistan:", "Afghanistan :", tweet) tweet = re.sub(r"Day:", "Day :", tweet) tweet = re.sub(r"AVERTED:", "AVERTED :", tweet) tweet = re.sub(r"sitting:", "sitting :", tweet) tweet = re.sub(r"Multiplayer:", "Multiplayer :", tweet) tweet = re.sub(r"Kaduna:", "Kaduna :", tweet) tweet = re.sub(r"favorite:", "favorite :", tweet) tweet = re.sub(r"home:", "home :", tweet) tweet = re.sub(r"just:", "just :", tweet) tweet = re.sub(r"Collision-1141", "Collision - 1141", tweet) tweet = re.sub(r"County:", "County :", tweet) tweet = re.sub(r"Duty:", "Duty :", tweet) tweet = re.sub(r"page:", "page :", tweet) tweet = re.sub(r"Attack:", "Attack :", tweet) tweet = re.sub(r"Minecraft:", "Minecraft :", tweet) tweet = re.sub(r"wounds;", "wounds ;", tweet) tweet = re.sub(r"Shots:", "Shots :", tweet) tweet = re.sub(r"shots:", "shots :", tweet) tweet = re.sub(r"Gunfire:", "Gunfire :", tweet) tweet = re.sub(r"hike:", "hike :", tweet) tweet = re.sub(r"Email:", "Email :", tweet) tweet = re.sub(r"System:", "System :", tweet) tweet = re.sub(r"Radio:", "Radio :", tweet) tweet = re.sub(r"King:", "King :", tweet) tweet = re.sub(r"upheaval:", "upheaval :", tweet) tweet = re.sub(r"tragedy;", "tragedy ;", tweet) tweet = re.sub(r"HERE:", "HERE :", tweet) tweet = re.sub(r"terrorism:", "terrorism :", tweet) tweet = re.sub(r"police:", "police :", tweet) tweet = re.sub(r"Mosque:", "Mosque :", tweet) tweet = re.sub(r"Rightways:", "Rightways :", tweet) tweet = re.sub(r"Brooklyn:", "Brooklyn :", tweet) tweet = re.sub(r"Arrived:", "Arrived :", tweet) tweet = re.sub(r"Home:", "Home :", tweet) tweet = re.sub(r"Earth:", "Earth :", tweet) tweet = re.sub(r"three:", "three :", tweet) tweet = re.sub(r"IranDeal", "Iran Deal", tweet) tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet) tweet = re.sub(r"camilacabello97", "camila cabello", tweet) tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet) tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet) tweet = re.sub(r"TrapMusic", "Trap Music", tweet) tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet) tweet = re.sub(r"PantherAttack", "Panther Attack", tweet) tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet) tweet = re.sub(r"socialnews", "social news", tweet) tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet) tweet = re.sub(r"onlinecommunities", "online communities", tweet) tweet = re.sub(r"humanconsumption", "human consumption", tweet) tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet) tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet) tweet = re.sub(r"facialabuse", "facial abuse", tweet) tweet = re.sub(r"LakeCounty", "Lake County", tweet) tweet = re.sub(r"BeingAuthor", "Being Author", tweet) tweet = re.sub(r"withheavenly", "with heavenly", tweet) tweet = re.sub(r"thankU", "thank you", tweet) tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet) tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet) tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet) tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet) tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet) tweet = re.sub(r"animalrescue", "animal rescue", tweet) tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet) tweet = re.sub(r"aRmageddon", "armageddon", tweet) tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet) tweet = re.sub(r"GodsLove", "God's Love", tweet) tweet = re.sub(r"bookboost", "book boost", tweet) tweet = re.sub(r"ibooklove", "I book love", tweet) tweet = re.sub(r"NestleIndia", "Nestle India", tweet) tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet) tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet) tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet) tweet = re.sub(r"weathernetwork", "weather network", tweet) tweet = re.sub(r"withBioterrorism&use", "with Bioterrorism & use", tweet) tweet = re.sub(r"Hostage&2", "Hostage & 2", tweet) tweet = re.sub(r"GOPDebate", "GOP Debate", tweet) tweet = re.sub(r"RickPerry", "Rick Perry", tweet) tweet = re.sub(r"frontpage", "front page", tweet) tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet) tweet = re.sub(r"ViralSpell", "Viral Spell", tweet) tweet = re.sub(r"til_now", "until now", tweet) tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet) tweet = re.sub(r"ZippedNews", "Zipped News", tweet) tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet) tweet = re.sub(r"53inch", "53 inch", tweet) tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet) tweet = re.sub(r"abstorm", "Alberta Storm", tweet) tweet = re.sub(r"Beyhive", "Beyonce hive", tweet) tweet = re.sub(r"IDFire", "Idaho Fire", tweet) tweet = re.sub(r"DETECTADO", "Detected", tweet) tweet = re.sub(r"RockyFire", "Rocky Fire", tweet) tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet) tweet = re.sub(r"NickCannon", "Nick Cannon", tweet) tweet = re.sub(r"FaroeIslands", "Faroe Islands", tweet) tweet = re.sub(r"yycstorm", "Calgary Storm", tweet) tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet) tweet = re.sub(r"ArtistsUnited", "Artists United", tweet) tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet) tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet) words = [x.lstrip().rstrip() for x in tweet.split() ] words = [x for x in words if x and x not in eng_stopwords] if not words: return "" words = ['user' if x.startswith('@')else x for x in words] tweet = " ".join(words) tweet = re.sub(r"[,.;@ return tweet<feature_engineering>
!pip install tfa-nightly
Petals to the Metal - Flower Classification on TPU
12,096,421
train_pd['combined_filtered_data'] = train_pd['keyword'] + ' ' + train_pd['text'] train_pd['combined_filtered_data'] = train_pd['combined_filtered_data'].apply(lambda x: parse_tweet(x)) train_pd.head()<feature_engineering>
!pip install -q pyyaml h5py
Petals to the Metal - Flower Classification on TPU
12,096,421
test_pd['combined_filtered_data'] = test_pd['keyword'] + ' ' + test_pd['text'] test_pd['combined_filtered_data'] = test_pd['combined_filtered_data'].apply(lambda x: parse_tweet(x)) test_pd.head()<data_type_conversions>
import tensorflow as tf from kaggle_datasets import KaggleDatasets from matplotlib import pyplot as plt import numpy as np import random import tensorflow_addons as tfa
Petals to the Metal - Flower Classification on TPU
12,096,421
test_pd = test_pd.fillna(value={'combined_filtered_data': ''} )<define_variables>
random.seed(1 )
Petals to the Metal - Flower Classification on TPU
12,096,421
max_len = max(map(len, train_pd['combined_filtered_data'])) max_len = max(max_len, max(map(len, test_pd['combined_filtered_data']))) print(max_len )<define_variables>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
12,096,421
embedding_dim = 300 trunc_type='post' padding_type='post' oov_tok = "<OOV>"<train_model>
AUTO = tf.data.experimental.AUTOTUNE ignore_order = tf.data.Options() ignore_order.experimental_deterministic = False LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64)} UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } IMAGE_SIZE = [512,512] EPOCHS = 20 BATCH_SIZE = 16 * strategy.num_replicas_in_sync
Petals to the Metal - Flower Classification on TPU
12,096,421
tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(train_pd['combined_filtered_data']) tokenizer.fit_on_texts(test_pd['combined_filtered_data']) word_index = tokenizer.word_index vocab_size = len(word_index )<string_transform>
training_data = tf.io.gfile.glob(GCS_DS_PATH + "/tfrecords-jpeg-512x512/train/*.tfrec") validation_data = tf.io.gfile.glob(GCS_DS_PATH + "/tfrecords-jpeg-512x512/val/*.tfrec") testing_data = tf.io.gfile.glob(GCS_DS_PATH + "/tfrecords-jpeg-512x512/test/*.tfrec") NUM_CLASSES = 104 NUM_TRAINING_IMAGES = 12753 NUM_TEST_IMAGES = 7382 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
Petals to the Metal - Flower Classification on TPU
12,096,421
X = tokenizer.texts_to_sequences(train_pd['combined_filtered_data']) X_padded = pad_sequences(X, maxlen=max_len, padding=padding_type, truncating=trunc_type )<prepare_x_and_y>
def augment_data(image , label): check = random.randint(0,9) if check == 1: image = flip(image) elif check == 2: image = color(image) elif check == 3: image = rotate(image) elif check == 4: image = shear(image) elif check == 5: image = random_all(image) return image , label
Petals to the Metal - Flower Classification on TPU
12,096,421
split = int(1.0 * len(X_padded)) trainX, trainY = X_padded[:split], train_pd['target'][:split] valX, valY = X_padded[split:], train_pd['target'][split:]<string_transform>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image
Petals to the Metal - Flower Classification on TPU
12,096,421
testX = tokenizer.texts_to_sequences(test_pd['combined_filtered_data']) testX = pad_sequences(testX, maxlen=max_len, padding=padding_type, truncating=trunc_type )<load_pretrained>
def read_labeled_tfrecord(record): record = tf.io.parse_single_example(record , LABELED_TFREC_FORMAT) image = decode_image(record['image']) label = tf.cast(record['class'] , tf.int32) return image , label
Petals to the Metal - Flower Classification on TPU