kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,858,092
print(train_df['Fare'].isnull().sum()) print(test_df['Fare'].isnull().sum() )<categorify>
one_batch = next(ds_iter) display_batch_of_images(one_batch )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Fare'].fillna(train_df.groupby(['Embarked', 'Pclass'])['Fare'].transform('median'), inplace=True )<count_missing_values>
[*IMAGE_SIZE, 3]
Petals to the Metal - Flower Classification on TPU
13,858,092
test_df['Fare'].isnull().sum()<data_type_conversions>
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3 )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Fare'] = pd.qcut(data['Fare'], 10 ).astype('category' ).cat.codes<count_values>
class LearningRateTracking(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): keys = list(logs.keys()) print("End epoch {} of training; got log keys: {}".format(epoch, keys))
Petals to the Metal - Flower Classification on TPU
13,858,092
train_df['Fare'].value_counts()<normalization>
use_efficientnet = False if use_efficientnet: !pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,858,092
minMaxScaler = MinMaxScaler() for data in train_test_data: data['Fare'] = minMaxScaler.fit_transform(data[['Fare']] )<count_values>
weight_per_class = True if weight_per_class: gc.enable() def get_training_dataset_raw() : dataset = load_dataset(TRAINING_FILENAMES, labeled = True, ordered = False) return dataset raw_training_dataset = get_training_dataset_raw() label_counter = Counter() for images, labels in raw_training_dataset: label_counter.update([labels.numpy() ]) del raw_training_dataset TARGET_NUM_PER_CLASS = 122 def get_weight_for_class(class_id): counting = label_counter[class_id] weight = TARGET_NUM_PER_CLASS / counting return weight weight_per_class = {class_id: get_weight_for_class(class_id)for class_id in range(104)}
Petals to the Metal - Flower Classification on TPU
13,858,092
train_df['Fare'].value_counts()<count_values>
using_ensemble_models = False
Petals to the Metal - Flower Classification on TPU
13,858,092
print(train_df['Cabin'].value_counts()) print('-'*80) print(train_df['Cabin'].unique().shape) print('-'*80) print(train_df['Cabin'].str[:1].value_counts() )<count_values>
if not using_ensemble_models: with strategy.scope() : pretrained_model = tf.keras.applications.MobileNetV2( include_top=False , weights='imagenet', input_shape=[*IMAGE_SIZE, 3] ) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ] )
Petals to the Metal - Flower Classification on TPU
13,858,092
print(test_df['Cabin'].str[:1].value_counts() )<feature_engineering>
if not using_ensemble_models: model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Cabin'] = data['Cabin'].str[:1]<categorify>
if not using_ensemble_models: EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
Petals to the Metal - Flower Classification on TPU
13,858,092
cabin_mapping={"A":1, "B":2, "C":3, "D":4, "E":5, "F":6, "G":7, "T":8} for data in train_test_data: data['Cabin'] = data['Cabin'].map(cabin_mapping )<count_values>
if not using_ensemble_models: def exponential_lr(epoch, start_lr = 0.00001, min_lr = 0.00001, max_lr = 0.00005 * strategy.num_replicas_in_sync, rampup_epochs = 5, sustain_epochs = 0, exp_decay = 0.75): def lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay): if epoch < rampup_epochs: lr =(( max_lr - start_lr)/ rampup_epochs * epoch + start_lr) elif epoch < rampup_epochs + sustain_epochs: lr = max_lr else: lr =(( max_lr - min_lr)* exp_decay**(epoch - rampup_epochs - sustain_epochs)+ min_lr) return lr return lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay) lr_callback = tf.keras.callbacks.LearningRateScheduler(exponential_lr, verbose=True) rng = [i for i in range(EPOCHS)] y = [exponential_lr(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
13,858,092
print(train_df['Cabin'].value_counts()) print('-'*80) print(train_df.groupby(['Pclass'])['Cabin'].median() )<categorify>
if not using_ensemble_models: history = model.fit( ds_train, validation_data=ds_valid, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback, checkpoint], class_weight = weight_per_class )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Cabin'].fillna(data.groupby(['Pclass'])['Cabin'].transform('median'), inplace=True )<count_missing_values>
if not using_ensemble_models: model.load_weights(checkpoint_filepath )
Petals to the Metal - Flower Classification on TPU
13,858,092
print(train_df['Cabin'].isnull().sum()) print('-'*80) print(train_df['Cabin'].value_counts() )<normalization>
print(checkpoint_filepath) tflite_model_name = checkpoint_filepath.replace('.h5', '.tflite') tflite_model_name
Petals to the Metal - Flower Classification on TPU
13,858,092
minMaxScaler = MinMaxScaler() for data in train_test_data: data['Cabin'] = minMaxScaler.fit_transform(data[['Cabin']] )<count_missing_values>
def get_pretrained_model(model_name, image_dataset_weights, trainable=True): pretrained_model= model_name( include_top=False , weights=image_dataset_weights, input_shape=[*IMAGE_SIZE, 3] ) pretrained_model.trainable = trainable model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) return model
Petals to the Metal - Flower Classification on TPU
13,858,092
print(train_df['Embarked'].isnull().sum()) print(test_df['Embarked'].isnull().sum() )<data_type_conversions>
if using_ensemble_models: with strategy.scope() : model_EB7 = get_pretrained_model(EfficientNetB7, 'noisy-student', trainable=True) model_EB7.load_weights('.. /input/models/Petals_to_the_Metal-70K_images-trainable_True-EfficientNetB7.h5' )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Embarked'] = data['Embarked'].fillna('S' )<data_type_conversions>
if using_ensemble_models: with strategy.scope() : model_D201 = get_pretrained_model(tf.keras.applications.DenseNet201, 'imagenet', trainable=True) model_D201.load_weights('.. /input/models/Petals_to_the_Metal-70K_images-trainable_True-DenseNet201.h5' )
Petals to the Metal - Flower Classification on TPU
13,858,092
for data in train_test_data: data['Embarked'] = data['Embarked'].astype('category' ).cat.codes<normalization>
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
Petals to the Metal - Flower Classification on TPU
13,858,092
minMaxScaler = MinMaxScaler() for data in train_test_data: data['Embarked'] = minMaxScaler.fit_transform(data[['Embarked']] )<prepare_x_and_y>
if using_ensemble_models: cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model_EB7.predict(images_ds) m2 = model_D201.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): cm_probabilities = alpha*m1+(1-alpha)*m2 cm_predictions = np.argmax(cm_probabilities, axis=-1) scores.append(f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')) print("Correct labels: ", cm_correct_labels.shape, cm_correct_labels) print("Predicted labels: ", cm_predictions.shape, cm_predictions) plt.plot(scores) best_alpha = np.argmax(scores)/100 cm_probabilities = best_alpha*m1+(1-best_alpha)*m2 cm_predictions = np.argmax(cm_probabilities, axis=-1)
Petals to the Metal - Flower Classification on TPU
13,858,092
y_train_s = train_df['Survived'] x_train_df = train_df.drop('Survived', axis=1 )<split>
if using_ensemble_models: print(best_alpha, max(scores))
Petals to the Metal - Flower Classification on TPU
13,858,092
x_train, x_test, y_train, y_test = train_test_split(x_train_df, y_train_s, test_size=0.2, random_state=10 )<compute_train_metric>
if using_ensemble_models: test_ds = get_test_dataset(ordered=True) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model_EB7.predict(test_images_ds) probabilities2 = model_D201.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt( 'submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='', ) !head submission.csv
Petals to the Metal - Flower Classification on TPU
13,858,092
def cross_val_score_result(estimator, x, y, scoring, cv): clf_scores = cross_val_score(estimator, x, y, scoring=scoring, cv=cv) clf_scores_mean = np.round(np.mean(clf_scores), 4) return clf_scores_mean<choose_model_class>
cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() if using_ensemble_models: print('using_ensemble_models') probabilities1 = model_EB7.predict(images_ds) probabilities2 = model_D201.predict(images_ds) cm_probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 else: cm_probabilities = model.predict(images_ds) cm_predictions = np.argmax(cm_probabilities, axis=-1) labels = range(len(CLASSES)) cmat = confusion_matrix( cm_correct_labels, cm_predictions, labels=labels, ) cmat =(cmat.T / cmat.sum(axis=1)).T
Petals to the Metal - Flower Classification on TPU
13,858,092
classifiers = [ DecisionTreeClassifier() , RandomForestClassifier() , GradientBoostingClassifier() , KNeighborsClassifier() , SVC() , LGBMClassifier() , XGBClassifier() , AdaBoostClassifier() ]<find_best_params>
score = f1_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', ) precision = precision_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', ) recall = recall_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', ) display_confusion_matrix(cmat, score, precision, recall )
Petals to the Metal - Flower Classification on TPU
13,858,092
best_clf_score = 0 best_clf = None clf_name = [] clf_mean_score = [] for clf in classifiers: current_clf_score = cross_val_score_result(clf, x_train, y_train, 'accuracy', 10) clf_name.append(clf.__class__.__name__) clf_mean_score.append(current_clf_score) if current_clf_score > best_clf_score: best_clf_score = current_clf_score best_clf = clf<train_on_grid>
model_performance_report = pd.DataFrame(columns=['model-family', 'model', 'epochs', 'arg min loss', 'arg max accuracy', 'min loss', 'max accuracy', 'f1', 'precision', 'recall']) model_performance_report.loc[len(model_performance_report)]={ 'model-family': 'VGG', 'model':'VGG16', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':3.47, 'max accuracy':0.23, 'f1':0.123, 'precision':0.146, 'recall':0.226} model_performance_report.loc[len(model_performance_report)]={ 'model-family': 'DenseNet', 'model':'DenseNet201', 'epochs':12, 'arg min loss':11, 'arg max accuracy':10, 'min loss':1.31, 'max accuracy':0.74, 'f1':0.643, 'precision':0.761, 'recall':0.599} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'InceptionResNet', 'model':'InceptionResNetV2', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':1.57, 'max accuracy':0.66, 'f1':0.513, 'precision':0.640, 'recall':0.480} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'Inception', 'model':'InceptionV3', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':1.48, 'max accuracy':0.69, 'f1':0.581, 'precision':0.728, 'recall':0.538} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'MobileNet', 'model':'MobileNet', 'epochs':12, 'arg min loss':11, 'arg max accuracy':10, 'min loss':1.11, 'max accuracy':0.76, 'f1':0.717, 'precision':0.798, 'recall':0.679} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'MobileNet', 'model':'MobileNetV2', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':1.26, 'max accuracy':0.72, 'f1':0.650, 'precision':0.763, 'recall':0.606} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'NASNetMobile', 'model':'NASNetMobile', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':2.69, 'max accuracy':0.38, 'f1':0.224, 'precision':0.401, 'recall':0.203} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'ResNet50', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':3.85, 'max accuracy':0.12, 'f1':0.017, 'precision':0.035, 'recall':0.025} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R101V2', 'epochs':12, 'arg min loss':11, 'arg max accuracy':9, 'min loss':0.87, 'max accuracy':0.83, 'f1':0.775, 'precision':0.842, 'recall':0.741} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'VGG', 'model':'VGG19', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':3.58, 'max accuracy':0.21, 'f1':0.031, 'precision':0.036, 'recall':0.048} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'Xception', 'model':'Xception', 'epochs':12, 'arg min loss':11, 'arg max accuracy':11, 'min loss':1.43, 'max accuracy':0.71, 'f1':0.575, 'precision':0.712, 'recall':0.536} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R2 30e', 'epochs':30, 'arg min loss':29, 'arg max accuracy':28, 'min loss':0.83, 'max accuracy':0.83, 'f1':0.788, 'precision':0.863, 'recall':0.753} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R101V2 1,2,3+OF', 'epochs':30, 'arg min loss':26, 'arg max accuracy':27, 'min loss':0.52, 'max accuracy':0.88, 'f1':0.864, 'precision':0.916, 'recall':0.842} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,2', 'epochs':30, 'arg min loss':29, 'arg max accuracy':29, 'min loss':0.92, 'max accuracy':0.81, 'f1':0.767, 'precision':0.833, 'recall':0.732} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D201 1,2,4', 'epochs':30, 'arg min loss':29, 'arg max accuracy':27, 'min loss':0.92, 'max accuracy':0.82, 'f1':0.772, 'precision':0.846, 'recall':0.734} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R101V2 1,2,4', 'epochs':30, 'arg min loss':29, 'arg max accuracy':28, 'min loss':0.66, 'max accuracy':0.85, 'f1':0.829, 'precision':0.870, 'recall':0.802} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R101V2 1,2,4,5', 'epochs':30, 'arg min loss':29, 'arg max accuracy':23, 'min loss':0.66, 'max accuracy':0.86, 'f1':0.829, 'precision':0.883, 'recall':0.802} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,8', 'epochs':30, 'arg min loss':26, 'arg max accuracy':28, 'min loss':0.23, 'max accuracy':0.95, 'f1':0.945, 'precision':0.950, 'recall':0.946} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'R101V2 1,8', 'epochs':30, 'arg min loss':10, 'arg max accuracy':16, 'min loss':0.36, 'max accuracy':0.92, 'f1':0.909, 'precision':0.913, 'recall':0.911} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'D 1,2,8', 'epochs':30, 'arg min loss':10, 'arg max accuracy':11, 'min loss':0.21, 'max accuracy':0.95, 'f1':0.953, 'precision':0.960, 'recall':0.950} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'EfficientNet', 'model':'EB7 1,2,9,10', 'epochs':30, 'arg min loss':29, 'arg max accuracy':27, 'min loss':0.73, 'max accuracy':0.84, 'f1':0.779, 'precision':0.839, 'recall':0.755} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'EfficientNet', 'model':'EB7 +11', 'epochs':30, 'arg min loss':29, 'arg max accuracy':28, 'min loss':1.0, 'max accuracy':0.81, 'f1':0.775, 'precision':0.769, 'recall':0.821} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'EfficientNet', 'model':'EB7 1,2,8,9,10,11', 'epochs':30, 'arg min loss':15, 'arg max accuracy':18, 'min loss':0.25, 'max accuracy':0.96, 'f1':0.955, 'precision':0.950, 'recall':0.964} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'ResNet', 'model':'D 1,2,8,11', 'epochs':30, 'arg min loss':24, 'arg max accuracy':23, 'min loss':0.22, 'max accuracy':0.95, 'f1':0.956, 'precision':0.957, 'recall':0.958} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'Ensemble', 'model':'Ensemble EB7+D201', 'epochs':30, 'arg min loss':24, 'arg max accuracy':23, 'min loss':0.22, 'max accuracy':0.95, 'f1':0.962, 'precision':0.960, 'recall':0.966} extra_columns = ['total params', 'trainable params', 'non-trainable params','training time per epoch(sec)'] model_performance_report[extra_columns] = pd.DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=model_performance_report.index) model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,2,6', 'total params':18_521_768, 'trainable params':199_784, 'non-trainable params':18_321_984, 'training time per epoch(sec)':114, 'epochs':30, 'arg min loss':29, 'arg max accuracy':29, 'min loss':0.71, 'max accuracy':0.85, 'f1':0.826, 'precision':0.791, 'recall':0.890} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,2,6,12', 'total params':18_521_768, 'trainable params':199_784, 'non-trainable params':18_321_984, 'training time per epoch(sec)':114, 'epochs':30, 'arg min loss':29, 'arg max accuracy':29, 'min loss':0.71, 'max accuracy':0.85, 'f1':0.826, 'precision':0.791, 'recall':0.890} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,2,6,8', 'total params':18_521_768, 'trainable params':18_292_712, 'non-trainable params':229_056, 'training time per epoch(sec)':274, 'epochs':30, 'arg min loss':26, 'arg max accuracy':28, 'min loss':0.22, 'max accuracy':0.96, 'f1':0.948, 'precision':0.942, 'recall':0.957} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'DenseNet', 'model':'D 1,2,6,8,12', 'total params':18_521_768, 'trainable params':18_292_712, 'non-trainable params':229_056, 'training time per epoch(sec)':274, 'epochs':30, 'arg min loss':26, 'arg max accuracy':28, 'min loss':0.22, 'max accuracy':0.96, 'f1':0.948, 'precision':0.942, 'recall':0.957} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'EfficientNet', 'model':'EB7 1,2,6,8,9,10,11', 'total params':64_364_024, 'trainable params':64_053_304, 'non-trainable params':310_720, 'training time per epoch(sec)':511, 'epochs':30, 'arg min loss':20, 'arg max accuracy':28, 'min loss':0.24, 'max accuracy':0.96, 'f1':0.956, 'precision':0.949, 'recall':0.967} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'Ensemble', 'model':'Ensemble 6,12 EB7+D201', 'total params':82_885_792, 'trainable params':82_346_016, 'non-trainable params':539_776, 'training time per epoch(sec)':785, 'epochs':30, 'arg min loss':20, 'arg max accuracy':28, 'min loss':0.24, 'max accuracy':0.96, 'f1':0.962, 'precision':0.956, 'recall':0.971} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'MobileNet', 'model':'MobileNetV2 1,2,6', 'total params':2_391_208, 'trainable params':133_224, 'non-trainable params':2_257_984, 'training time per epoch(sec)':79, 'epochs':30, 'arg min loss':29, 'arg max accuracy':26, 'min loss':0.83, 'max accuracy':0.8, 'f1':0.781, 'precision':0.752, 'recall':0.850} model_performance_report.loc[len(model_performance_report)]={ 'model-family':'MobileNet', 'model':'MobileNetV2 1,2,6,8', 'total params':2_391_208, 'trainable params':2_357_096, 'non-trainable params':34_112, 'training time per epoch(sec)':102, 'epochs':30, 'arg min loss':24, 'arg max accuracy':27, 'min loss':0.27, 'max accuracy':0.95, 'f1':0.936, 'precision':0.929, 'recall':0.951}
Petals to the Metal - Flower Classification on TPU
13,858,092
lgbm_clf = LGBMClassifier() grid_param = { 'learning_rate':[0.005, 0.01, 0.015, 0.02], 'n_estimators':[100, 150, 200], 'bossting_type':['rf', 'gbdt', 'dart', 'goss'], 'max_depth':[10, 15, 20] } lgbm_grid = GridSearchCV(lgbm_clf, grid_param, cv=10) lgbm_grid.fit(x_train, y_train )<find_best_params>
model_performance_report = model_performance_report.sort_values(by='max accuracy') model_performance_report
Petals to the Metal - Flower Classification on TPU
13,858,092
print('best_param:', lgbm_grid.best_params_) print('best_score:{:.4f}'.format(lgbm_grid.best_score_))<predict_on_test>
dataset = get_validation_dataset() dataset = dataset.unbatch().batch(20) batch = iter(dataset )
Petals to the Metal - Flower Classification on TPU
13,858,092
test_pred = lgbm_grid.best_estimator_.predict(test_df) submission = pd.DataFrame({ 'PassengerId': test_df_PId, 'Survived': test_pred } )<save_to_csv>
images, labels = next(batch )
Petals to the Metal - Flower Classification on TPU
13,858,092
submission.to_csv('submission_test.csv', index=False )<load_from_csv>
if using_ensemble_models: probabilities1 = model_EB7.predict(images) probabilities2 = model_D201.predict(images) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 else: probabilities = model.predict(images )
Petals to the Metal - Flower Classification on TPU
13,858,092
check_submission = pd.read_csv('submission_test.csv') check_submission<import_modules>
predictions = np.argmax(probabilities, axis=-1) display_batch_of_images(( images, labels), predictions )
Petals to the Metal - Flower Classification on TPU
13,858,092
random.seed(42) <set_options>
mismatches = sum(cm_predictions!=cm_correct_labels) print('Number of mismatches on validation data: {} out of {} or({:.2%})'.format(mismatches, NUM_VALIDATION_IMAGES, mismatches/NUM_VALIDATION_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,858,092
warnings.filterwarnings("ignore" )<compute_test_metric>
cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() mismatches_images, mismatches_predictions, mismatches_labels = [], [], [] mismatches_dataset = tf.data.Dataset.from_tensors([]) val_batch = iter(cmdataset.unbatch().batch(1)) for image_index in range(NUM_VALIDATION_IMAGES): batch = next(val_batch) if cm_predictions[image_index] != cm_correct_labels[image_index]: print('Predicted vs Correct labels: {}, {}'.format(cm_predictions[image_index], cm_correct_labels[image_index]))
Petals to the Metal - Flower Classification on TPU
13,858,092
def prim_set(names): def pDiv(left, right): try: return left / right except ZeroDivisionError: return 1 def pPow(left, right): try: return abs(left)** min(float(right),8) except ZeroDivisionError: return 1 except OverflowError: return 1 def pSqrt(inp): return math.sqrt(abs(inp)) def abs_(inp): return abs(inp) pset = gp.PrimitiveSet("MAIN", len(names)) pset.addPrimitive(pDiv, 2) pset.addPrimitive(pPow, 2) pset.addPrimitive(pSqrt, 1) pset.addPrimitive(abs_, 1) pset.addPrimitive(operator.add, 2) pset.addPrimitive(operator.sub, 2) pset.addPrimitive(operator.mul, 2) pset.addPrimitive(operator.neg, 1) pset.addPrimitive(math.floor, 1) pset.addPrimitive(math.tanh, 1) pset.addPrimitive(math.sin, 1) pset.addPrimitive(math.cos, 1) pset.addPrimitive(max, 2) pset.addPrimitive(min, 2) for i, a in enumerate(pset.arguments): new_name = names[i] pset.arguments[i] = new_name pset.mapping[new_name] = pset.mapping[a] pset.mapping[new_name].value = new_name del pset.mapping[a] return pset<feature_engineering>
dataset = get_validation_dataset() dataset = dataset.unbatch().batch(20) batch = iter(dataset) images, labels = next(batch )
Petals to the Metal - Flower Classification on TPU
13,858,092
def get_title(name): title_search = re.search('([A-Za-z]+)\.', name) if title_search: return title_search.group(1) return "" def PrepData(data): data['IsNull'] = data.isnull().sum(axis=1) data['Ticket'] = data['Ticket'].str.lower().replace('\W', '') data.Sex.fillna(0, inplace=True) data.loc[data.Sex != 'male', 'Sex'] = 1 data.loc[data.Sex == 'male', 'Sex'] = 0 data['NameLen'] = data['Name'].apply(len) bin_num = 4 data['NameLen'] = pd.qcut(data['NameLen'], bin_num,labels=list(range(bin_num)) ).astype(float) data['Has_Cabin'] = data["Cabin"].apply(lambda x: 0 if type(x)== float else 1) data['FamilySize'] = data['SibSp'] + data['Parch'] + 1 data['isFamily'] = 1 data.loc[data['isFamily'] == 1, 'notAlone'] = 0 data['Title'] = data['Name'].apply(get_title) mapping = {'Mlle': 'Rare', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Rare', 'Rev': 'Mr', 'Don': 'Mr', 'Mme': 'Rare', 'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Rare', 'Ms': 'Miss', 'Dona': 'Rare'} data.replace({'Title': mapping}, inplace=True) title_mapping = {"Mr": 0, "Miss": 1, "Mrs": 2, "Master": 3, "Rare": 4} data['Title'] = data['Title'].map(title_mapping) data['Title'] = data['Title'].fillna(0) data['Embarked'].fillna(method='backfill', inplace=True) data['Embarked'] = data['Embarked'].map({'C': 1, 'Q': 2, 'S': 0} ).astype(int) data['Fare'] = data['Fare'].fillna(train['Fare'].median()) data.loc[ data['Fare'] <= 7.91, 'Fare'] = 0 data.loc[(data['Fare'] > 7.91)&(data['Fare'] <= 14.454), 'Fare'] = 1 data.loc[(data['Fare'] > 14.454)&(data['Fare'] <= 31), 'Fare'] = 2 data.loc[ data['Fare'] > 31, 'Fare'] = 3 data['Fare'] = data['Fare'].astype(int) data.Cabin.fillna('0', inplace=True) data.loc[data.Cabin.str[0] == 'A', 'Cabin'] = 1 data.loc[data.Cabin.str[0] == 'B', 'Cabin'] = 2 data.loc[data.Cabin.str[0] == 'C', 'Cabin'] = 3 data.loc[data.Cabin.str[0] == 'D', 'Cabin'] = 4 data.loc[data.Cabin.str[0] == 'E', 'Cabin'] = 5 data.loc[data.Cabin.str[0] == 'F', 'Cabin'] = 6 data.loc[data.Cabin.str[0] == 'G', 'Cabin'] = 7 data.loc[data.Cabin.str[0] == 'T', 'Cabin'] = 8 data['Cabin'] = data['Cabin'].astype(int) grouped = data.groupby(['Sex','Pclass', 'Title']) data['Age'] = grouped['Age'].apply(lambda x: x.fillna(x.median())) data['Age'] = data['Age'].astype(int) boy =(data['Name'].str.contains('Master')) |(( data['Sex']==0)&(data['Age']<13)) female = data['Sex']==1 boy_or_female = boy | female n_ticket = data[boy_or_female].groupby('Ticket' ).Survived.count() tick_surv = data[boy_or_female].groupby('Ticket' ).Survived.mean() data['Boy'] =(data['Name'].str.contains('Master')) |(( data['Sex']==0)&(data['Age']<13)) data['NTicket'] = data['Ticket'].replace(n_ticket) data.loc[~data.Ticket.isin(n_ticket.index),'NTicket']=0 data['TicketSurv'] = data['Ticket'].replace(tick_surv) data.loc[~data.Ticket.isin(tick_surv.index),'TicketSurv']=0 data['TicketSurv'].fillna(0, inplace=True) data.loc[ data['Age'] <= 16, 'Age'] = 5 data.loc[(data['Age'] > 16)&(data['Age'] <= 32), 'Age'] = 1 data.loc[(data['Age'] > 32)&(data['Age'] <= 48), 'Age'] = 2 data.loc[(data['Age'] > 48)&(data['Age'] <= 64), 'Age'] = 3 data.loc[ data['Age'] > 64, 'Age'] = 4 data['manual_tree'] = 0 data.loc[boy_or_female, 'manual_tree'] = 1 data.loc[(data['Sex'] == 1)& (data['Pclass'] == 3)& (data['Embarked'] == 0)& (data['Fare'] > 0), 'manual_tree'] = 0 data.loc[(data['Sex'] == 0)& (data['Title'] == 3), 'manual_tree'] = 1 tfidf_vec = TfidfVectorizer(max_features=15, token_pattern="\w+") svd = TruncatedSVD(n_components=10) tfidf_array = svd.fit_transform(tfidf_vec.fit_transform(data["Name"])) for i in range(tfidf_array.shape[1]): data.insert(len(data.columns), column = 'Name_' + str(i), value = tfidf_array [:,i]) tfidf_vec = TfidfVectorizer(max_features=5, analyzer="char") svd = TruncatedSVD(n_components=3) tfidf_array = svd.fit_transform(tfidf_vec.fit_transform(data["Ticket"])) for i in range(tfidf_array.shape[1]): data.insert(len(data.columns), column = 'Ticket_' + str(i), value = tfidf_array [:,i]) data['Ticket'] = data['Ticket'].str.extract('(\d+)', expand=False ).fillna(0 ).astype(float) data['Ticket'] = np.round(np.log1p(data['Ticket'])*10) data.drop(['Name'],1,inplace=True) return data.astype(float )<load_from_csv>
for i in range(3): display_batch_of_images(( images, labels), predictions, display_mismatches_only=True) images, labels = next(batch )
Petals to the Metal - Flower Classification on TPU
13,858,092
train = pd.read_csv(".. /input/titanic/train.csv", dtype={"Age": np.float64}, index_col='PassengerId') test = pd.read_csv(".. /input/titanic/test.csv", dtype={"Age": np.float64}, index_col='PassengerId' )<load_from_csv>
one_batch = next(ds_iter) display_batch_of_images(one_batch )
Petals to the Metal - Flower Classification on TPU
13,858,092
test_data_with_labels = pd.read_csv('.. /input/titanic-test-data/titanic.csv') test_data = pd.read_csv('.. /input/titanic/test.csv') for i, name in enumerate(test_data_with_labels['name']): if '"' in name: test_data_with_labels['name'][i] = re.sub('"', '', name) for i, name in enumerate(test_data['Name']): if '"' in name: test_data['Name'][i] = re.sub('"', '', name) survived = [] for name in test_data['Name']: survived.append(int(test_data_with_labels.loc[test_data_with_labels['name'] == name]['survived'].values[-1])) test['Survived'] = survived<data_type_conversions>
using_tta = False tta_iterations = 3
Petals to the Metal - Flower Classification on TPU
13,858,092
df = pd.concat(( train,test),0) target = train['Survived'].astype(float) df = PrepData(df) df['Ticket'] = df['Ticket'].astype(int ).astype('category') col_to_use = ['Embarked', 'Fare', 'Parch', 'Pclass', 'Sex', 'Age', 'NameLen', 'Has_Cabin', 'Cabin', 'FamilySize', 'isFamily', 'Title', 'TicketSurv', 'NTicket', 'Boy', 'manual_tree', 'Ticket_0', 'Ticket_1', 'Ticket_2', 'Name_0', 'Name_1', 'Name_2', 'Name_3', 'Name_4', 'Name_5', 'Name_6', 'Name_7', 'Name_8', 'Name_9'] df = pd.get_dummies(df[col_to_use]) df[col_to_use] += 0.0 mungedtrain = df[:train.shape[0]].copy() mungedtest = df[train.shape[0]:].copy() mytrain = mungedtrain.values.tolist() mytest = mungedtest.values.tolist()<define_search_space>
if using_tta: def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
13,858,092
GPhof = [] for n in [5,7,14,21,28,42,57]: hof, Tbox = mydeap(mungedtrain, target, seed=n, mxvl=7, ngen=121) GPhof.append(hof )<drop_column>
def predict_tta(model, tta_iterations): probs = [] for i in range(tta_iterations): print('TTA iteration ', i) test_ds = get_test_dataset(ordered=True) test_images_ds = test_ds.map(lambda image, idnum: image) if using_ensemble_models: print('using_ensemble_models') probabilities1 = model_EB7.predict(test_images_ds) probabilities2 = model_D201.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 probs.append(probabilities) else: probs.append(model.predict(test_images_ds,verbose=0)) return probs
Petals to the Metal - Flower Classification on TPU
13,858,092
test = test.reset_index()<compute_test_metric>
test_ds = get_test_dataset(ordered=True) test_images_ds = test_ds.map(lambda image, idnum: image) if using_tta: print('Computing predictions using TTA...') probabilities = np.mean(predict_tta(model, tta_iterations), axis=0) else: print('Computing predictions...') probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions )
Petals to the Metal - Flower Classification on TPU
13,858,092
<save_to_csv><EOS>
print('using_ensemble_models:', using_ensemble_models) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt( 'submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='', ) !head submission.csv
Petals to the Metal - Flower Classification on TPU
12,712,310
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<load_from_csv>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,712,310
df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_train.shape<load_from_csv>
Petals to the Metal - Flower Classification on TPU
12,712,310
df_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_test.shape<load_from_csv>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,712,310
df_sub = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') df_sub.shape<count_missing_values>
MODEL_CLASS = DenseNet201
Petals to the Metal - Flower Classification on TPU
12,712,310
df_train.isnull().sum()<define_variables>
GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started') GCS_DS_PATH_EXT = KaggleDatasets().get_gcs_path('tf-flower-photo-tfrec') IMAGE_SIZE = [512, 512] EPOCHS = 35 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] GCS_PATH_SELECT_EXT = { 192: '/tfrecords-jpeg-192x192', 224: '/tfrecords-jpeg-224x224', 331: '/tfrecords-jpeg-331x331', 512: '/tfrecords-jpeg-512x512' } GCS_PATH_EXT = GCS_PATH_SELECT_EXT[IMAGE_SIZE[0]] IMAGENET_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/imagenet' + GCS_PATH_EXT + '/*.tfrec') INATURELIST_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/inaturalist' + GCS_PATH_EXT + '/*.tfrec') OPENIMAGE_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/openimage' + GCS_PATH_EXT + '/*.tfrec') OXFORD_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/oxford_102' + GCS_PATH_EXT + '/*.tfrec') TENSORFLOW_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/tf_flowers' + GCS_PATH_EXT + '/*.tfrec') ADDITIONAL_TRAINING_FILENAMES = IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES + OXFORD_FILES + TENSORFLOW_FILES CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose'] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') TRAINING_FILENAMES = TRAINING_FILENAMES + ADDITIONAL_TRAINING_FILENAMES
Petals to the Metal - Flower Classification on TPU
12,712,310
women = df_train.loc[df_train.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )<define_variables>
def random_blockout(img, sl=0.1, sh=0.2, rl=0.4): p=random.random() if p>=0.25: w, h, c = IMAGE_SIZE[0], IMAGE_SIZE[1], 3 origin_area = tf.cast(h*w, tf.float32) e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32) e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32) e_height_h = tf.minimum(e_size_h, h) e_width_h = tf.minimum(e_size_h, w) erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32) erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32) erase_area = tf.zeros(shape=[erase_height, erase_width, c]) erase_area = tf.cast(erase_area, tf.uint8) pad_h = h - erase_height pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32) pad_bottom = pad_h - pad_top pad_w = w - erase_width pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32) pad_right = pad_w - pad_left erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1) erase_mask = tf.squeeze(erase_mask, axis=0) erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32)) return tf.cast(erased_img, img.dtype) else: return tf.cast(img, img.dtype)
Petals to the Metal - Flower Classification on TPU
12,712,310
men = df_train.loc[df_train.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )<categorify>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = random.randint(1,3) coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) image = random_blockout(image) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,712,310
df_train = df_train.fillna(-999) df_test = df_test.fillna(-999 )<split>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.75 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,712,310
X = df_train.drop(["Survived"], axis=1) y = df_train["Survived"] X_train, X_test, y_train, y_test = train_test_split(X,y, train_size=0.75, random_state=42 )<save_to_csv>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(MODEL_CLASS) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
12,712,310
<train_model>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_denceNet_201.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
12,712,310
cat_features_indices = np.where(X.dtypes != float)[0] model = CatBoostClassifier( eval_metric='Accuracy', loss_function='Logloss', use_best_model=True, random_seed=42, logging_level='Silent' ) model.fit(X_train,y_train,cat_features=cat_features_indices,eval_set=(X_test,y_test), plot=True )<compute_train_metric>
model = tf.keras.models.load_model('my_denceNet_201.h5' )
Petals to the Metal - Flower Classification on TPU
12,712,310
cv_params = model.get_params() cv_params.update({ 'loss_function': 'Logloss' }) cv_data = cv( Pool(X, y, cat_features=cat_features_indices), cv_params, plot=True )<compute_test_metric>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,742,576
print('Best validation accuracy score: {:.2f}±{:.2f} on step {}'.format( np.max(cv_data['test-Accuracy-mean']), cv_data['test-Accuracy-std'][np.argmax(cv_data['test-Accuracy-mean'])], np.argmax(cv_data['test-Accuracy-mean']) ))<compute_test_metric>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,742,576
print('Precise validation accuracy score: {}'.format(np.max(cv_data['test-Accuracy-mean'])) )<compute_test_metric>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
12,742,576
train_pool = Pool(X_train, y_train, cat_features=cat_features_indices) validate_pool = Pool(X_test, y_test, cat_features=cat_features_indices )<features_selection>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
12,742,576
feature_importances = model.get_feature_importance(train_pool) feature_names = X_train.columns for score, name in sorted(zip(feature_importances, feature_names), reverse=True): print('{}: {}'.format(name, score))<compute_train_metric>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,742,576
eval_metrics = model.eval_metrics(validate_pool, ['AUC'], plot=True )<train_model>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(DenseNet201) model1.load_weights("/kaggle/input/densenet201-aug-additional-data/my_denceNet_201.h5" )
Petals to the Metal - Flower Classification on TPU
12,742,576
<predict_on_test>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(Xception) model2.load_weights("/kaggle/input/xception-aug-additional-data/my_Xception.h5" )
Petals to the Metal - Flower Classification on TPU
12,742,576
<save_to_csv><EOS>
test_ds = get_test_dataset(ordered=True) best_alpha = 0.45 print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,540,576
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<set_options>
!pip install -q efficientnet print("Tensorflow version " + tf.__version__) print(" Imported packages" )
Petals to the Metal - Flower Classification on TPU
13,540,576
%matplotlib inline <load_from_csv>
def seed_everything(seed=0): random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) os.environ['TF_DETERMINISTIC_OPS'] = '1' seed = 0 seed_everything(seed )
Petals to the Metal - Flower Classification on TPU
13,540,576
df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv' )<groupby>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Device:', tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print('Number of replicas:', strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
13,540,576
embarked = df.groupby('Embarked' ).count() ['PassengerId'] embarked_max = embarked[embarked == embarked.max() ].index[0] df.loc[df['Embarked'].isnull() , 'Embarked'] = embarked_max embarked = test_df.groupby('Embarked' ).count() ['PassengerId'] embarked_max = embarked[embarked == embarked.max() ].index[0] test_df.loc[test_df['Embarked'].isnull() , 'Embarked'] = embarked_max<feature_engineering>
IMAGE_SIZE = [512, 512] HEIGHT = 512 WIDTH = 512 GCS_DS_PATH = KaggleDatasets().get_gcs_path() GCS_PATH = GCS_DS_PATH + '/tfrecords-jpeg-512x512' AUTO = tf.data.experimental.AUTOTUNE EPOCHS = 30 CHANNELS = 3 model_path = f'model_{HEIGHT}x{WIDTH}.h5' BATCH_SIZE = 16 * strategy.num_replicas_in_sync WARMUP_LEARNING_RATE = 1e-4 * strategy.num_replicas_in_sync LEARNING_RATE = 3e-5 * strategy.num_replicas_in_sync NUM_CLASSES = 104 ES_PATIENCE = 5 TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
13,540,576
avg = np.mean(df['Age']) test_avg = np.mean(test_df['Age']) df['Age'] = df['Age'].fillna(avg) test_df['Age'] = test_df['Age'].fillna(test_avg )<feature_engineering>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
13,540,576
test_avg = np.mean(test_df['Fare']) test_df['Fare'] = test_df['Fare'].fillna(test_avg )<categorify>
def transform_rotation(image, height, rotation): DIM = height XDIM = DIM%2 rotation = rotation * tf.random.uniform([1],dtype='float32') rotation = math.pi * rotation / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_shear(image, height, shear): DIM = height XDIM = DIM%2 shear = shear * tf.random.uniform([1],dtype='float32') shear = math.pi * shear / 180. one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_shift(image, height, h_shift, w_shift): DIM = height XDIM = DIM%2 height_shift = h_shift * tf.random.uniform([1],dtype='float32') width_shift = w_shift * tf.random.uniform([1],dtype='float32') one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(shift_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3] )
Petals to the Metal - Flower Classification on TPU
13,540,576
le = LabelEncoder() le.fit(df['Sex']) df['Sex label'] = le.transform(df['Sex']) le.fit(test_df['Sex']) test_df['Sex label'] = le.transform(test_df['Sex']) le.fit(df['Embarked']) df['Embarked label'] = le.transform(df['Embarked']) le.fit(test_df['Embarked']) test_df['Embarked label'] = le.transform(test_df['Embarked'] )<prepare_x_and_y>
def data_augment(image, label): image = tf.image.random_flip_left_right(image) return image, label def augment(image,label): p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_pixel = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_shift = tf.random.uniform([], 0, 1.0, dtype=tf.float32) p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32) if p_spatial >=.2: image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_up_down(image) if p_rotate >.75: image = tf.image.rot90(image, k=3) elif p_rotate >.5: image = tf.image.rot90(image, k=2) elif p_rotate >.25: image = tf.image.rot90(image, k=1) if p_rotation >=.3: image = transform_rotation(image, height=HEIGHT, rotation=45.) if p_shift >=.3: image = transform_shift(image, height=HEIGHT, h_shift=15., w_shift=15.) if p_shear >=.3: image = transform_shear(image, height=HEIGHT, shear=20.) if p_crop >.4: crop_size = tf.random.uniform([], int(HEIGHT*.7), HEIGHT, dtype=tf.int32) image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS]) elif p_crop >.7: if p_crop >.9: image = tf.image.central_crop(image, central_fraction=.7) elif p_crop >.8: image = tf.image.central_crop(image, central_fraction=.8) else: image = tf.image.central_crop(image, central_fraction=.9) image = tf.image.resize(image, size=[HEIGHT, WIDTH]) if p_pixel >=.2: if p_pixel >=.8: image = tf.image.random_saturation(image, lower=0, upper=2) elif p_pixel >=.6: image = tf.image.random_contrast(image, lower=.8, upper=2) elif p_pixel >=.4: image = tf.image.random_brightness(image, max_delta=.2) else: image = tf.image.adjust_gamma(image, gamma=.6) return image, label def get_training_dataset(aug='old'): if aug == 'new': aug_arg = augment else: aug_arg = data_augment dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(aug_arg, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_training_dataset_subset(ordered=False): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
13,540,576
x_tr = df[['Pclass', 'Sex label', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked label']] y_tr = df['Survived'] x_t = test_df[['Pclass', 'Sex label', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked label']] x_tr =(x_tr - np.mean(x_tr)) /np.std(x_tr) x_t =(x_t - np.mean(x_t)) /np.std(x_t )<choose_model_class>
def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def display_training_curves(training, validation, title, subplot): if subplot%10==1: plt.subplots(figsize=(10,10), facecolor=' plt.tight_layout() ax = plt.subplot(subplot) ax.set_facecolor(' ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.']) def display_confusion_matrix(cmat, score, precision, recall): plt.figure(figsize=(15,15)) ax = plt.gca() ax.matshow(cmat, cmap='Reds') ax.set_xticks(range(len(CLASSES))) ax.set_xticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_xticklabels() , rotation=45, ha="left", rotation_mode="anchor") ax.set_yticks(range(len(CLASSES))) ax.set_yticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_yticklabels() , rotation=45, ha="right", rotation_mode="anchor") titlestring = "" if score is not None: titlestring += 'f1 = {:.3f} '.format(score) if precision is not None: titlestring += ' precision = {:.3f} '.format(precision) if recall is not None: titlestring += ' recall = {:.3f} '.format(recall) if len(titlestring)> 0: ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':' plt.show()
Petals to the Metal - Flower Classification on TPU
13,540,576
parameters = { 'max_depth': range(2, 5, 1), 'n_estimators': range(100, 600, 10), 'learning_rate': [0.01, 0.05], 'colsample_bytree': [.9, 1] } gbm = XGBClassifier() model = GridSearchCV( estimator=gbm, param_grid=parameters, scoring = 'accuracy', n_jobs = 10, cv = 10, verbose=True ) <find_best_params>
def create_model(input_shape, NUM_CLASSES): pretrained_model = efn.EfficientNetB6( weights='noisy-student', include_top=False , input_shape=input_shape ) pretrained_model.trainable = False model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) return model
Petals to the Metal - Flower Classification on TPU
13,540,576
print("Лучшие параметры: ", model.best_params_) print("Лучшая точность: ", xgb_random.best_score_ )<train_model>
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) train_dataset = get_training_dataset_subset(ordered=True) y_train = next(iter(train_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy() print(f'Number of training images {NUM_TRAINING_IMAGES}') NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) valid_dataset = get_validation_dataset(ordered=True) y_valid = next(iter(valid_dataset.unbatch().map(lambda image, label: label ).batch(NUM_VALIDATION_IMAGES)) ).numpy() print(f'Number of validation images {NUM_VALIDATION_IMAGES}') NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print(f'Number of test images {NUM_TEST_IMAGES}') test_dataset = get_test_dataset(ordered=True )
Petals to the Metal - Flower Classification on TPU
13,540,576
model = XGBClassifier(n_estimators=180, max_depth = 3 , learning_rate= 0.01, colsample_bytree = 1) model.fit(x_tr, y_tr) predictions = model.predict(x_t) pr = pd.DataFrame(test_df['PassengerId']) pr['Survived'] = pd.DataFrame(predictions )<save_to_csv>
train_iter = iter(train_dataset.unbatch().batch(20)) train_batch = next(train_iter) display_batch_of_images(train_batch)
Petals to the Metal - Flower Classification on TPU
13,540,576
pr.to_csv('submission.csv', index = False )<import_modules>
valid_iter = iter(valid_dataset.unbatch().batch(20)) valid_batch = next(valid_iter) display_batch_of_images(valid_batch)
Petals to the Metal - Flower Classification on TPU
13,540,576
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
test_iter = iter(test_dataset.unbatch().batch(20)) test_batch = next(test_iter) display_batch_of_images(test_batch)
Petals to the Metal - Flower Classification on TPU
13,540,576
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<categorify>
display_batch_of_images(next(iter(get_training_dataset().unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
13,540,576
train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax') <feature_engineering>
display_batch_of_images(next(iter(get_training_dataset(aug='new' ).unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
13,540,576
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
with strategy.scope() : model = create_model(( None, None, CHANNELS), NUM_CLASSES) model.compile( optimizer=optimizers.Adam(lr=WARMUP_LEARNING_RATE), loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], ) model.summary()
Petals to the Metal - Flower Classification on TPU
13,540,576
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE warmup_history = model.fit(x=get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, validation_data=get_validation_dataset() , epochs=3, verbose=2 ).history
Petals to the Metal - Flower Classification on TPU
13,540,576
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
for layer in model.layers: layer.trainable = True checkpoint = ModelCheckpoint(model_path, monitor='val_lines', mode='min', save_best_only=True) es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) lr_callback = LearningRateScheduler(exponential_schedule_with_warmup, verbose=0) callback_list = [checkpoint, es, lr_callback] model.compile(optimizer=optimizers.Adam(lr=LEARNING_RATE), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
Petals to the Metal - Flower Classification on TPU
13,540,576
country = "Vietnam" df_val = df_val_1 df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<save_model>
history = model.fit( get_training_dataset() , validation_data=valid_dataset, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[lr_callback], verbose=2 ).history
Petals to the Metal - Flower Classification on TPU
13,540,576
animator.save('confirm_animation.gif', writer='imagemagick', fps=2) display(Image(url='confirm_animation.gif'))<feature_engineering>
cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() cm_probabilities = model.predict(images_ds) cm_predictions = np.argmax(cm_probabilities, axis=-1) labels = range(len(CLASSES)) cmat = confusion_matrix( cm_correct_labels, cm_predictions, labels=labels, ) cmat =(cmat.T / cmat.sum(axis=1)).T
Petals to the Metal - Flower Classification on TPU
13,540,576
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
score = f1_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', ) precision = precision_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', ) recall = recall_score( cm_correct_labels, cm_predictions, labels=labels, average='macro', )
Petals to the Metal - Flower Classification on TPU
13,540,576
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
dataset = get_validation_dataset() dataset = dataset.unbatch().batch(20) batch = iter(dataset )
Petals to the Metal - Flower Classification on TPU
13,540,576
method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA'] method_val = [df_val_1,df_val_2,df_val_3] for i in range(0,3): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )<save_to_csv>
images, labels = next(batch) probabilities = model.predict(images) predictions = np.argmax(probabilities, axis=-1) display_batch_of_images(( images, labels), predictions )
Petals to the Metal - Flower Classification on TPU
13,540,576
df_val = df_val_3 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0}) submission.to_csv('submission.csv', index=False) submission<filter>
test_ds = get_test_dataset(ordered=True) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions )
Petals to the Metal - Flower Classification on TPU
13,540,576
<import_modules><EOS>
print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt( 'submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='', )
Petals to the Metal - Flower Classification on TPU
13,081,600
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<load_from_csv>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,081,600
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )<concatenate>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,081,600
all_data = pd.concat([train,test],sort=False,ignore_index=True) all_data.info()<groupby>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
13,081,600
train.groupby('Pclass' ).Survived.agg(['mean','size'] )<groupby>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,081,600
train.groupby('Sex')['Survived'].mean()<feature_engineering>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,081,600
train['family_size'] = train.SibSp+train.Parch+1 all_data['family_size'] = all_data.SibSp+all_data.Parch+1<groupby>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) return model with strategy.scope() : model1 = get_model(InceptionV3) model1.load_weights("/kaggle/input/fork-of-start-with-pre-train/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,081,600
train.groupby('family_size' ).Survived.mean()<feature_engineering>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) return model with strategy.scope() : model2 = get_model(InceptionV3) model1.load_weights("/kaggle/input/start-with-pre-train/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,081,600
all_data.Fare[all_data.Fare.isnull() ] = train.Fare.median()<feature_engineering>
val_dataset = get_validation_dataset() images_ds = val_dataset.map(lambda image, label: image) labels_ds = val_dataset.map(lambda image, label: label ).unbatch() val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): val_probabilities = alpha*m1+(1-alpha)*m2 val_predictions = np.argmax(val_probabilities, axis=-1) scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
13,081,600
<groupby><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,377,373
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<groupby>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,377,373
train.groupby('Embarked' ).Survived.agg(['mean','size'] )<set_options>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,377,373
all_data.Embarked.fillna(train.Embarked.mode() [0],inplace=True )<groupby>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
13,377,373
train.groupby(train.Ticket.map(lambda x: str(x)[0])).Survived.agg(['mean','count'] )<data_type_conversions>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,377,373
ticket_num = train.Ticket.str.extract('^(\d)|^\D.*\s(\d)\d*$' ).fillna(0 ).astype(int ).sum(axis=1 )<groupby>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,377,373
train.Survived.groupby(ticket_num ).agg(['mean','count'] )<groupby>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(ResNet152V2) model1.load_weights("/kaggle/input/fork-of-start-with-pre-train-255147/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,377,373
test.groupby(test.Ticket.map(lambda x: str(x)[0])).size()<data_type_conversions>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(EfficientNetB7) model2.load_weights("/kaggle/input/more-data-with-efficientnetb7/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,377,373
all_data['Ticket_first_letter'] = all_data.Ticket.map(lambda x: str(x)[0]) all_data['Ticket_first_num'] = all_data.Ticket.str.extract('^(\d)|^\D.*\s(\d)\d*$' ).fillna(0 ).astype(int ).sum(axis=1 )<feature_engineering>
val_dataset = get_validation_dataset() images_ds = val_dataset.map(lambda image, label: image) labels_ds = val_dataset.map(lambda image, label: label ).unbatch() val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): val_probabilities = alpha*m1+(1-alpha)*m2 val_predictions = np.argmax(val_probabilities, axis=-1) scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU