kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,218,760
print('Linear Regression') print("RMSE on Training set :", rmse_cv(regressor,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(regressor,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, regressor.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, regressor.predict(X_test))) print('-'*25) print('Linear Regression Ridge') print("RMSE on Training set :", rmse_cv(ridge,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(ridge,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, ridge.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, ridge.predict(X_test))) print('-'*25) print('Linear Regression Lasso') print("RMSE on Training set :", rmse_cv(lasso,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(lasso,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, lasso.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, lasso.predict(X_test))) print('-'*25) print('Linear Regression ElasticNet') print("RMSE on Training set :", rmse_cv(en,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(en,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, en.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, en.predict(X_test))) print('-'*25) print('Decision Tree Regression') print("RMSE on Training set :", rmse_cv(dtr,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(dtr,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, dtr.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, dtr.predict(X_test))) print('-'*25) print('Random Forest Regression') print("RMSE on Training set :", rmse_cv(rf,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(rf,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, rf.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, rf.predict(X_test)) )<choose_model_class>
if not SKIP_VALIDATION: history2 = model2.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=get_validation_dataset() , callbacks = [lr_callback]) else: history2 = model2.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks = [lr_callback] )
Petals to the Metal - Flower Classification on TPU
10,218,760
ridge = Ridge(alpha = cv_ridge[cv_ridge == min(cv_ridge)].index[0]) ridge.fit(X_train, y_train) ridge_train_lm = ridge.predict(X_train) ridge_test_lm = ridge.predict(X_test) lasso = Lasso(max_iter=500,alpha = cv_lasso[cv_lasso == min(cv_lasso)].index[0]) lasso.fit(X_train, y_train) lasso_train_lm = lasso.predict(X_train) lasso_test_lm = lasso.predict(X_test) en = ElasticNet(max_iter=500,alpha = cv_elasticnet[cv_elasticnet == min(cv_elasticnet)].index[0]) en.fit(X_train, y_train) en_train_lm = en.predict(X_train) en_test_lm = en.predict(X_test )<compute_train_metric>
if not SKIP_VALIDATION: cmdataset = get_validation_dataset(ordered=True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): cm_probabilities = alpha*m1+(1-alpha)*m2 cm_predictions = np.argmax(cm_probabilities, axis=-1) scores.append(f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')) best_alpha = np.argmax(scores)/100 else: best_alpha = 0.51 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
10,218,760
print('Linear Regression Ridge') print("RMSE on Training set :", rmse_cv(ridge,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(ridge,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, ridge.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, ridge.predict(X_test))) print('-'*25) print('Linear Regression Lasso') print("RMSE on Training set :", rmse_cv(lasso,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(lasso,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, lasso.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, lasso.predict(X_test))) print('-'*25) print('Linear Regression ElasticNet') print("RMSE on Training set :", rmse_cv(en,X_train,y_train ).mean()) print("RMSE on Test set :", rmse_cv(en,X_test,y_test ).mean()) print('RMSLE score on train data: ', rmsle(y_train, en.predict(X_train))) print('RMSLE score on test data: ', rmsle(y_test, en.predict(X_test)) )<define_search_model>
if not SKIP_VALIDATION: cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels=range(len(CLASSES))) score = f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') precision = precision_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') recall = recall_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') display_confusion_matrix(cmat, score, precision, recall) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall))
Petals to the Metal - Flower Classification on TPU
10,218,760
alphas_ridge = [2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30] alphas_lasso = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.0011] alphas_en = [0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.0011, 0.0012, 0.0013, 0.0014, 0.0015, 0.0016] l1ratio_en = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99, 1] ridge = make_pipeline(RidgeCV(alphas=alphas_ridge, cv=kfolds)) lasso = make_pipeline(LassoCV(max_iter=500, alphas=alphas_lasso, random_state=42, cv=kfolds)) elasticnet = make_pipeline(ElasticNetCV(max_iter=500,alphas=alphas_en, cv=kfolds, l1_ratio=l1ratio_en))<train_model>
def predict_tta(model, n_iter): probs = [] for i in range(n_iter): test_ds = get_test_dataset(ordered=True) test_images_ds = test_ds.map(lambda image, idnum: image) probs.append(model.predict(test_images_ds,verbose=0)) return probs
Petals to the Metal - Flower Classification on TPU
10,218,760
<compute_train_metric><EOS>
test_ds = get_test_dataset(ordered=True) print('Calculating predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probs1 = np.mean(predict_tta(model1, TTA_NUM), axis=0) probs2 = np.mean(predict_tta(model2, TTA_NUM), axis=0) probabilities = best_alpha*probs1 +(1-best_alpha)*probs2 predictions = np.argmax(probabilities, axis=-1) print('Generating submission file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
14,153,282
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<compute_test_metric>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
14,153,282
print('Ensembled Model') print('RMSLE score on train data: ', rmsle(y_train, blend_models_predict(X_train,[ridge_full,lasso_full,en_full]))) print('RMSLE score on test data: ', rmsle(y_test, blend_models_predict(X_test,[ridge_full,lasso_full,en_full])) )<create_dataframe>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
14,153,282
blended_train = pd.DataFrame({'Id': X_train.index.values, 'SalePrice': np.expm1(blend_models_predict(X_train,[ridge_full,lasso_full,en_full])) }) blended_test = pd.DataFrame({'Id': X_test.index.values, 'SalePrice': np.expm1(blend_models_predict(X_test,[ridge_full,lasso_full,en_full])) } )<feature_engineering>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
14,153,282
q1_tr = blended_train['SalePrice'].quantile(0.01) q2_tr = blended_train['SalePrice'].quantile(0.99) q1_te = blended_test['SalePrice'].quantile(0.01) q2_te = blended_test['SalePrice'].quantile(0.99) blended_train['SalePrice'] = blended_train['SalePrice'].apply(lambda x: x if x > q1_tr else x*0.8) blended_train['SalePrice'] = blended_train['SalePrice'].apply(lambda x: x if x < q2_tr else x*1.1) blended_test['SalePrice'] = blended_test['SalePrice'].apply(lambda x: x if x > q1_te else x*0.8) blended_test['SalePrice'] = blended_test['SalePrice'].apply(lambda x: x if x < q2_te else x*1.1 )<compute_test_metric>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
14,153,282
print('Ensembled Model') print('RMSLE score on train data: ', rmsle(y_train, np.log1p(blended_train['SalePrice']))) print('RMSLE score on test data: ', rmsle(y_test, np.log1p(blended_test['SalePrice'])) )<train_model>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
14,153,282
ridge_full = ridge.fit(train, y) lasso_full = lasso.fit(train, y) en_full = elasticnet.fit(train, y )<create_dataframe>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(DenseNet201) model1.load_weights("/kaggle/input/start-with-pre-train-0895d8/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
14,153,282
submission = pd.DataFrame({'Id': df_test.index.values, 'SalePrice': np.expm1(blend_models_predict(test,[ridge_full,lasso_full,en_full])) } )<feature_engineering>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(EfficientNetB7) model2.load_weights("/kaggle/input/fork-of-start-with-pre-train-0895d8/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
14,153,282
<save_to_csv><EOS>
test_ds = get_test_dataset(ordered=True) best_alpha = 0.48 print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
14,041,455
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<install_modules>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
14,041,455
!pip install --upgrade pandas<set_options>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7,EfficientNetL2, EfficientNetB0, EfficientNetB1
Petals to the Metal - Flower Classification on TPU
14,041,455
%matplotlib inline warnings.filterwarnings("ignore") embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-large/5") print("No.of Processors are ",multiprocessing.cpu_count()) pd.set_option('max_colwidth', 500) pd.set_option('max_columns', 500) pd.set_option('max_rows', 100) warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) <categorify>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
14,041,455
def clean(tweet): tweet = re.sub(r"\x89Û_", "", tweet) tweet = re.sub(r"\x89ÛÒ", "", tweet) tweet = re.sub(r"\x89ÛÓ", "", tweet) tweet = re.sub(r"\x89ÛÏWhen", "When", tweet) tweet = re.sub(r"\x89ÛÏ", "", tweet) tweet = re.sub(r"China\x89Ûªs", "China's", tweet) tweet = re.sub(r"let\x89Ûªs", "let's", tweet) tweet = re.sub(r"\x89Û÷", "", tweet) tweet = re.sub(r"\x89Ûª", "", tweet) tweet = re.sub(r"\x89Û\x9d", "", tweet) tweet = re.sub(r"å_", "", tweet) tweet = re.sub(r"\x89Û¢", "", tweet) tweet = re.sub(r"\x89Û¢åÊ", "", tweet) tweet = re.sub(r"fromåÊwounds", "from wounds", tweet) tweet = re.sub(r"åÊ", "", tweet) tweet = re.sub(r"åÈ", "", tweet) tweet = re.sub(r"JapÌ_n", "Japan", tweet) tweet = re.sub(r"Ì©", "e", tweet) tweet = re.sub(r"å¨", "", tweet) tweet = re.sub(r"Surṳ", "Suruc", tweet) tweet = re.sub(r"åÇ", "", tweet) tweet = re.sub(r"å£3million", "3 million", tweet) tweet = re.sub(r"åÀ", "", tweet) tweet = re.sub(r"he's", "he is", tweet) tweet = re.sub(r"there's", "there is", tweet) tweet = re.sub(r"We're", "We are", tweet) tweet = re.sub(r"That's", "That is", tweet) tweet = re.sub(r"won't", "will not", tweet) tweet = re.sub(r"they're", "they are", tweet) tweet = re.sub(r"Can't", "Cannot", tweet) tweet = re.sub(r"wasn't", "was not", tweet) tweet = re.sub(r"don\x89Ûªt", "do not", tweet) tweet = re.sub(r"aren't", "are not", tweet) tweet = re.sub(r"isn't", "is not", tweet) tweet = re.sub(r"What's", "What is", tweet) tweet = re.sub(r"haven't", "have not", tweet) tweet = re.sub(r"hasn't", "has not", tweet) tweet = re.sub(r"There's", "There is", tweet) tweet = re.sub(r"He's", "He is", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"You're", "You are", tweet) tweet = re.sub(r"I'M", "I am", tweet) tweet = re.sub(r"shouldn't", "should not", tweet) tweet = re.sub(r"wouldn't", "would not", tweet) tweet = re.sub(r"i'm", "I am", tweet) tweet = re.sub(r"I\x89Ûªm", "I am", tweet) tweet = re.sub(r"I'm", "I am", tweet) tweet = re.sub(r"Isn't", "is not", tweet) tweet = re.sub(r"Here's", "Here is", tweet) tweet = re.sub(r"you've", "you have", tweet) tweet = re.sub(r"you\x89Ûªve", "you have", tweet) tweet = re.sub(r"we're", "we are", tweet) tweet = re.sub(r"what's", "what is", tweet) tweet = re.sub(r"couldn't", "could not", tweet) tweet = re.sub(r"we've", "we have", tweet) tweet = re.sub(r"it\x89Ûªs", "it is", tweet) tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet) tweet = re.sub(r"It\x89Ûªs", "It is", tweet) tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet) tweet = re.sub(r"who's", "who is", tweet) tweet = re.sub(r"I\x89Ûªve", "I have", tweet) tweet = re.sub(r"y'all", "you all", tweet) tweet = re.sub(r"can\x89Ûªt", "cannot", tweet) tweet = re.sub(r"would've", "would have", tweet) tweet = re.sub(r"it'll", "it will", tweet) tweet = re.sub(r"we'll", "we will", tweet) tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet) tweet = re.sub(r"We've", "We have", tweet) tweet = re.sub(r"he'll", "he will", tweet) tweet = re.sub(r"Y'all", "You all", tweet) tweet = re.sub(r"Weren't", "Were not", tweet) tweet = re.sub(r"Didn't", "Did not", tweet) tweet = re.sub(r"they'll", "they will", tweet) tweet = re.sub(r"they'd", "they would", tweet) tweet = re.sub(r"DON'T", "DO NOT", tweet) tweet = re.sub(r"That\x89Ûªs", "That is", tweet) tweet = re.sub(r"they've", "they have", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"should've", "should have", tweet) tweet = re.sub(r"You\x89Ûªre", "You are", tweet) tweet = re.sub(r"where's", "where is", tweet) tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet) tweet = re.sub(r"we'd", "we would", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"weren't", "were not", tweet) tweet = re.sub(r"They're", "They are", tweet) tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet) tweet = re.sub(r"you\x89Ûªll", "you will", tweet) tweet = re.sub(r"I\x89Ûªd", "I would", tweet) tweet = re.sub(r"let's", "let us", tweet) tweet = re.sub(r"it's", "it is", tweet) tweet = re.sub(r"can't", "cannot", tweet) tweet = re.sub(r"don't", "do not", tweet) tweet = re.sub(r"you're", "you are", tweet) tweet = re.sub(r"i've", "I have", tweet) tweet = re.sub(r"that's", "that is", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"doesn't", "does not", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"didn't", "did not", tweet) tweet = re.sub(r"ain't", "am not", tweet) tweet = re.sub(r"you'll", "you will", tweet) tweet = re.sub(r"I've", "I have", tweet) tweet = re.sub(r"Don't", "do not", tweet) tweet = re.sub(r"I'll", "I will", tweet) tweet = re.sub(r"I'd", "I would", tweet) tweet = re.sub(r"Let's", "Let us", tweet) tweet = re.sub(r"you'd", "You would", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"Ain't", "am not", tweet) tweet = re.sub(r"Haven't", "Have not", tweet) tweet = re.sub(r"Could've", "Could have", tweet) tweet = re.sub(r"youve", "you have", tweet) tweet = re.sub(r"donå«t", "do not", tweet) tweet = re.sub(r"&gt;", ">", tweet) tweet = re.sub(r"&lt;", "<", tweet) tweet = re.sub(r"&amp;", "&", tweet) tweet = re.sub(r"w/e", "whatever", tweet) tweet = re.sub(r"w/", "with", tweet) tweet = re.sub(r"USAgov", "USA government", tweet) tweet = re.sub(r"recentlu", "recently", tweet) tweet = re.sub(r"Ph0tos", "Photos", tweet) tweet = re.sub(r"amirite", "am I right", tweet) tweet = re.sub(r"exp0sed", "exposed", tweet) tweet = re.sub(r"<3", "love", tweet) tweet = re.sub(r"amageddon", "armageddon", tweet) tweet = re.sub(r"Trfc", "Traffic", tweet) tweet = re.sub(r"8/5/2015", "2015-08-05", tweet) tweet = re.sub(r"WindStorm", "Wind Storm", tweet) tweet = re.sub(r"8/6/2015", "2015-08-06", tweet) tweet = re.sub(r"10:38PM", "10:38 PM", tweet) tweet = re.sub(r"10:30pm", "10:30 PM", tweet) tweet = re.sub(r"16yr", "16 year", tweet) tweet = re.sub(r"lmao", "laughing my ass off", tweet) tweet = re.sub(r"TRAUMATISED", "traumatized", tweet) tweet = re.sub(r"IranDeal", "Iran Deal", tweet) tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet) tweet = re.sub(r"camilacabello97", "camila cabello", tweet) tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet) tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet) tweet = re.sub(r"TrapMusic", "Trap Music", tweet) tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet) tweet = re.sub(r"PantherAttack", "Panther Attack", tweet) tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet) tweet = re.sub(r"socialnews", "social news", tweet) tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet) tweet = re.sub(r"onlinecommunities", "online communities", tweet) tweet = re.sub(r"humanconsumption", "human consumption", tweet) tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet) tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet) tweet = re.sub(r"facialabuse", "facial abuse", tweet) tweet = re.sub(r"LakeCounty", "Lake County", tweet) tweet = re.sub(r"BeingAuthor", "Being Author", tweet) tweet = re.sub(r"withheavenly", "with heavenly", tweet) tweet = re.sub(r"thankU", "thank you", tweet) tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet) tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet) tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet) tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet) tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet) tweet = re.sub(r"animalrescue", "animal rescue", tweet) tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet) tweet = re.sub(r"aRmageddon", "armageddon", tweet) tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet) tweet = re.sub(r"GodsLove", "God's Love", tweet) tweet = re.sub(r"bookboost", "book boost", tweet) tweet = re.sub(r"ibooklove", "I book love", tweet) tweet = re.sub(r"NestleIndia", "Nestle India", tweet) tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet) tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet) tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet) tweet = re.sub(r"weathernetwork", "weather network", tweet) tweet = re.sub(r"withBioterrorism&use", "with Bioterrorism & use", tweet) tweet = re.sub(r"Hostage&2", "Hostage & 2", tweet) tweet = re.sub(r"GOPDebate", "GOP Debate", tweet) tweet = re.sub(r"RickPerry", "Rick Perry", tweet) tweet = re.sub(r"frontpage", "front page", tweet) tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet) tweet = re.sub(r"ViralSpell", "Viral Spell", tweet) tweet = re.sub(r"til_now", "until now", tweet) tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet) tweet = re.sub(r"ZippedNews", "Zipped News", tweet) tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet) tweet = re.sub(r"53inch", "53 inch", tweet) tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet) tweet = re.sub(r"abstorm", "Alberta Storm", tweet) tweet = re.sub(r"Beyhive", "Beyonce hive", tweet) tweet = re.sub(r"IDFire", "Idaho Fire", tweet) tweet = re.sub(r"DETECTADO", "Detected", tweet) tweet = re.sub(r"RockyFire", "Rocky Fire", tweet) tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet) tweet = re.sub(r"NickCannon", "Nick Cannon", tweet) tweet = re.sub(r"FaroeIslands", "Faroe Islands", tweet) tweet = re.sub(r"yycstorm", "Calgary Storm", tweet) tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet) tweet = re.sub(r"ArtistsUnited", "Artists United", tweet) tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet) tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet) tweet = re.sub(r"justinbieber", "justin bieber", tweet) tweet = re.sub(r"UTC2015", "UTC 2015", tweet) tweet = re.sub(r"Time2015", "Time 2015", tweet) tweet = re.sub(r"djicemoon", "dj icemoon", tweet) tweet = re.sub(r"LivingSafely", "Living Safely", tweet) tweet = re.sub(r"FIFA16", "Fifa 2016", tweet) tweet = re.sub(r"thisiswhywecanthavenicethings", "this is why we cannot have nice things", tweet) tweet = re.sub(r"bbcnews", "bbc news", tweet) tweet = re.sub(r"UndergroundRailraod", "Underground Railraod", tweet) tweet = re.sub(r"c4news", "c4 news", tweet) tweet = re.sub(r"OBLITERATION", "obliteration", tweet) tweet = re.sub(r"MUDSLIDE", "mudslide", tweet) tweet = re.sub(r"NoSurrender", "No Surrender", tweet) tweet = re.sub(r"NotExplained", "Not Explained", tweet) tweet = re.sub(r"greatbritishbakeoff", "great british bake off", tweet) tweet = re.sub(r"LondonFire", "London Fire", tweet) tweet = re.sub(r"KOTAWeather", "KOTA Weather", tweet) tweet = re.sub(r"LuchaUnderground", "Lucha Underground", tweet) tweet = re.sub(r"KOIN6News", "KOIN 6 News", tweet) tweet = re.sub(r"LiveOnK2", "Live On K2", tweet) tweet = re.sub(r"9NewsGoldCoast", "9 News Gold Coast", tweet) tweet = re.sub(r"nikeplus", "nike plus", tweet) tweet = re.sub(r"david_cameron", "David Cameron", tweet) tweet = re.sub(r"peterjukes", "Peter Jukes", tweet) tweet = re.sub(r"JamesMelville", "James Melville", tweet) tweet = re.sub(r"megynkelly", "Megyn Kelly", tweet) tweet = re.sub(r"cnewslive", "C News Live", tweet) tweet = re.sub(r"JamaicaObserver", "Jamaica Observer", tweet) tweet = re.sub(r"TweetLikeItsSeptember11th2001", "Tweet like it is september 11th 2001", tweet) tweet = re.sub(r"cbplawyers", "cbp lawyers", tweet) tweet = re.sub(r"fewmoretweets", "few more tweets", tweet) tweet = re.sub(r"BlackLivesMatter", "Black Lives Matter", tweet) tweet = re.sub(r"cjoyner", "Chris Joyner", tweet) tweet = re.sub(r"ENGvAUS", "England vs Australia", tweet) tweet = re.sub(r"ScottWalker", "Scott Walker", tweet) tweet = re.sub(r"MikeParrActor", "Michael Parr", tweet) tweet = re.sub(r"4PlayThursdays", "Foreplay Thursdays", tweet) tweet = re.sub(r"TGF2015", "Tontitown Grape Festival", tweet) tweet = re.sub(r"realmandyrain", "Mandy Rain", tweet) tweet = re.sub(r"GraysonDolan", "Grayson Dolan", tweet) tweet = re.sub(r"ApolloBrown", "Apollo Brown", tweet) tweet = re.sub(r"saddlebrooke", "Saddlebrooke", tweet) tweet = re.sub(r"TontitownGrape", "Tontitown Grape", tweet) tweet = re.sub(r"AbbsWinston", "Abbs Winston", tweet) tweet = re.sub(r"ShaunKing", "Shaun King", tweet) tweet = re.sub(r"MeekMill", "Meek Mill", tweet) tweet = re.sub(r"TornadoGiveaway", "Tornado Giveaway", tweet) tweet = re.sub(r"GRupdates", "GR updates", tweet) tweet = re.sub(r"SouthDowns", "South Downs", tweet) tweet = re.sub(r"braininjury", "brain injury", tweet) tweet = re.sub(r"auspol", "Australian politics", tweet) tweet = re.sub(r"PlannedParenthood", "Planned Parenthood", tweet) tweet = re.sub(r"calgaryweather", "Calgary Weather", tweet) tweet = re.sub(r"weallheartonedirection", "we all heart one direction", tweet) tweet = re.sub(r"edsheeran", "Ed Sheeran", tweet) tweet = re.sub(r"TrueHeroes", "True Heroes", tweet) tweet = re.sub(r"S3XLEAK", "sex leak", tweet) tweet = re.sub(r"ComplexMag", "Complex Magazine", tweet) tweet = re.sub(r"TheAdvocateMag", "The Advocate Magazine", tweet) tweet = re.sub(r"CityofCalgary", "City of Calgary", tweet) tweet = re.sub(r"EbolaOutbreak", "Ebola Outbreak", tweet) tweet = re.sub(r"SummerFate", "Summer Fate", tweet) tweet = re.sub(r"RAmag", "Royal Academy Magazine", tweet) tweet = re.sub(r"offers2go", "offers to go", tweet) tweet = re.sub(r"foodscare", "food scare", tweet) tweet = re.sub(r"MNPDNashville", "Metropolitan Nashville Police Department", tweet) tweet = re.sub(r"TfLBusAlerts", "TfL Bus Alerts", tweet) tweet = re.sub(r"GamerGate", "Gamer Gate", tweet) tweet = re.sub(r"IHHen", "Humanitarian Relief", tweet) tweet = re.sub(r"spinningbot", "spinning bot", tweet) tweet = re.sub(r"ModiMinistry", "Modi Ministry", tweet) tweet = re.sub(r"TAXIWAYS", "taxi ways", tweet) tweet = re.sub(r"Calum5SOS", "Calum Hood", tweet) tweet = re.sub(r"po_st", "po.st", tweet) tweet = re.sub(r"scoopit", "scoop.it", tweet) tweet = re.sub(r"UltimaLucha", "Ultima Lucha", tweet) tweet = re.sub(r"JonathanFerrell", "Jonathan Ferrell", tweet) tweet = re.sub(r"aria_ahrary", "Aria Ahrary", tweet) tweet = re.sub(r"rapidcity", "Rapid City", tweet) tweet = re.sub(r"OutBid", "outbid", tweet) tweet = re.sub(r"lavenderpoetrycafe", "lavender poetry cafe", tweet) tweet = re.sub(r"EudryLantiqua", "Eudry Lantiqua", tweet) tweet = re.sub(r"15PM", "15 PM", tweet) tweet = re.sub(r"OriginalFunko", "Funko", tweet) tweet = re.sub(r"rightwaystan", "Richard Tan", tweet) tweet = re.sub(r"CindyNoonan", "Cindy Noonan", tweet) tweet = re.sub(r"RT_America", "RT America", tweet) tweet = re.sub(r"narendramodi", "Narendra Modi", tweet) tweet = re.sub(r"BakeOffFriends", "Bake Off Friends", tweet) tweet = re.sub(r"TeamHendrick", "Hendrick Motorsports", tweet) tweet = re.sub(r"alexbelloli", "Alex Belloli", tweet) tweet = re.sub(r"itsjustinstuart", "Justin Stuart", tweet) tweet = re.sub(r"gunsense", "gun sense", tweet) tweet = re.sub(r"DebateQuestionsWeWantToHear", "debate questions we want to hear", tweet) tweet = re.sub(r"RoyalCarribean", "Royal Carribean", tweet) tweet = re.sub(r"samanthaturne19", "Samantha Turner", tweet) tweet = re.sub(r"JonVoyage", "Jon Stewart", tweet) tweet = re.sub(r"renew911health", "renew 911 health", tweet) tweet = re.sub(r"SuryaRay", "Surya Ray", tweet) tweet = re.sub(r"pattonoswalt", "Patton Oswalt", tweet) tweet = re.sub(r"minhazmerchant", "Minhaz Merchant", tweet) tweet = re.sub(r"TLVFaces", "Israel Diaspora Coalition", tweet) tweet = re.sub(r"pmarca", "Marc Andreessen", tweet) tweet = re.sub(r"pdx911", "Portland Police", tweet) tweet = re.sub(r"jamaicaplain", "Jamaica Plain", tweet) tweet = re.sub(r"Japton", "Arkansas", tweet) tweet = re.sub(r"RouteComplex", "Route Complex", tweet) tweet = re.sub(r"INSubcontinent", "Indian Subcontinent", tweet) tweet = re.sub(r"NJTurnpike", "New Jersey Turnpike", tweet) tweet = re.sub(r"Politifiact", "PolitiFact", tweet) tweet = re.sub(r"Hiroshima70", "Hiroshima", tweet) tweet = re.sub(r"GMMBC", "Greater Mt Moriah Baptist Church", tweet) tweet = re.sub(r"versethe", "verse the", tweet) tweet = re.sub(r"TubeStrike", "Tube Strike", tweet) tweet = re.sub(r"MissionHills", "Mission Hills", tweet) tweet = re.sub(r"ProtectDenaliWolves", "Protect Denali Wolves", tweet) tweet = re.sub(r"NANKANA", "Nankana", tweet) tweet = re.sub(r"SAHIB", "Sahib", tweet) tweet = re.sub(r"PAKPATTAN", "Pakpattan", tweet) tweet = re.sub(r"Newz_Sacramento", "News Sacramento", tweet) tweet = re.sub(r"gofundme", "go fund me", tweet) tweet = re.sub(r"pmharper", "Stephen Harper", tweet) tweet = re.sub(r"IvanBerroa", "Ivan Berroa", tweet) tweet = re.sub(r"LosDelSonido", "Los Del Sonido", tweet) tweet = re.sub(r"bancodeseries", "banco de series", tweet) tweet = re.sub(r"timkaine", "Tim Kaine", tweet) tweet = re.sub(r"IdentityTheft", "Identity Theft", tweet) tweet = re.sub(r"AllLivesMatter", "All Lives Matter", tweet) tweet = re.sub(r"mishacollins", "Misha Collins", tweet) tweet = re.sub(r"BillNeelyNBC", "Bill Neely", tweet) tweet = re.sub(r"BeClearOnCancer", "be clear on cancer", tweet) tweet = re.sub(r"Kowing", "Knowing", tweet) tweet = re.sub(r"ScreamQueens", "Scream Queens", tweet) tweet = re.sub(r"AskCharley", "Ask Charley", tweet) tweet = re.sub(r"BlizzHeroes", "Heroes of the Storm", tweet) tweet = re.sub(r"BradleyBrad47", "Bradley Brad", tweet) tweet = re.sub(r"HannaPH", "Typhoon Hanna", tweet) tweet = re.sub(r"meinlcymbals", "MEINL Cymbals", tweet) tweet = re.sub(r"Ptbo", "Peterborough", tweet) tweet = re.sub(r"cnnbrk", "CNN Breaking News", tweet) tweet = re.sub(r"IndianNews", "Indian News", tweet) tweet = re.sub(r"savebees", "save bees", tweet) tweet = re.sub(r"GreenHarvard", "Green Harvard", tweet) tweet = re.sub(r"StandwithPP", "Stand with planned parenthood", tweet) tweet = re.sub(r"hermancranston", "Herman Cranston", tweet) tweet = re.sub(r"WMUR9", "WMUR-TV", tweet) tweet = re.sub(r"RockBottomRadFM", "Rock Bottom Radio", tweet) tweet = re.sub(r"ameenshaikh3", "Ameen Shaikh", tweet) tweet = re.sub(r"ProSyn", "Project Syndicate", tweet) tweet = re.sub(r"Daesh", "ISIS", tweet) tweet = re.sub(r"s2g", "swear to god", tweet) tweet = re.sub(r"listenlive", "listen live", tweet) tweet = re.sub(r"CDCgov", "Centers for Disease Control and Prevention", tweet) tweet = re.sub(r"FoxNew", "Fox News", tweet) tweet = re.sub(r"CBSBigBrother", "Big Brother", tweet) tweet = re.sub(r"JulieDiCaro", "Julie DiCaro", tweet) tweet = re.sub(r"theadvocatemag", "The Advocate Magazine", tweet) tweet = re.sub(r"RohnertParkDPS", "Rohnert Park Police Department", tweet) tweet = re.sub(r"THISIZBWRIGHT", "Bonnie Wright", tweet) tweet = re.sub(r"Popularmmos", "Popular MMOs", tweet) tweet = re.sub(r"WildHorses", "Wild Horses", tweet) tweet = re.sub(r"FantasticFour", "Fantastic Four", tweet) tweet = re.sub(r"HORNDALE", "Horndale", tweet) tweet = re.sub(r"PINER", "Piner", tweet) tweet = re.sub(r"BathAndNorthEastSomerset", "Bath and North East Somerset", tweet) tweet = re.sub(r"thatswhatfriendsarefor", "that is what friends are for", tweet) tweet = re.sub(r"residualincome", "residual income", tweet) tweet = re.sub(r"YahooNewsDigest", "Yahoo News Digest", tweet) tweet = re.sub(r"MalaysiaAirlines", "Malaysia Airlines", tweet) tweet = re.sub(r"AmazonDeals", "Amazon Deals", tweet) tweet = re.sub(r"MissCharleyWebb", "Charley Webb", tweet) tweet = re.sub(r"shoalstraffic", "shoals traffic", tweet) tweet = re.sub(r"GeorgeFoster72", "George Foster", tweet) tweet = re.sub(r"pop2015", "pop 2015", tweet) tweet = re.sub(r"_PokemonCards_", "Pokemon Cards", tweet) tweet = re.sub(r"DianneG", "Dianne Gallagher", tweet) tweet = re.sub(r"KashmirConflict", "Kashmir Conflict", tweet) tweet = re.sub(r"BritishBakeOff", "British Bake Off", tweet) tweet = re.sub(r"FreeKashmir", "Free Kashmir", tweet) tweet = re.sub(r"mattmosley", "Matt Mosley", tweet) tweet = re.sub(r"BishopFred", "Bishop Fred", tweet) tweet = re.sub(r"EndConflict", "End Conflict", tweet) tweet = re.sub(r"EndOccupation", "End Occupation", tweet) tweet = re.sub(r"UNHEALED", "unhealed", tweet) tweet = re.sub(r"CharlesDagnall", "Charles Dagnall", tweet) tweet = re.sub(r"Latestnews", "Latest news", tweet) tweet = re.sub(r"KindleCountdown", "Kindle Countdown", tweet) tweet = re.sub(r"NoMoreHandouts", "No More Handouts", tweet) tweet = re.sub(r"datingtips", "dating tips", tweet) tweet = re.sub(r"charlesadler", "Charles Adler", tweet) tweet = re.sub(r"twia", "Texas Windstorm Insurance Association", tweet) tweet = re.sub(r"txlege", "Texas Legislature", tweet) tweet = re.sub(r"WindstormInsurer", "Windstorm Insurer", tweet) tweet = re.sub(r"Newss", "News", tweet) tweet = re.sub(r"hempoil", "hemp oil", tweet) tweet = re.sub(r"CommoditiesAre", "Commodities are", tweet) tweet = re.sub(r"tubestrike", "tube strike", tweet) tweet = re.sub(r"JoeNBC", "Joe Scarborough", tweet) tweet = re.sub(r"LiteraryCakes", "Literary Cakes", tweet) tweet = re.sub(r"TI5", "The International 5", tweet) tweet = re.sub(r"thehill", "the hill", tweet) tweet = re.sub(r"3others", "3 others", tweet) tweet = re.sub(r"stighefootball", "Sam Tighe", tweet) tweet = re.sub(r"whatstheimportantvideo", "what is the important video", tweet) tweet = re.sub(r"ClaudioMeloni", "Claudio Meloni", tweet) tweet = re.sub(r"DukeSkywalker", "Duke Skywalker", tweet) tweet = re.sub(r"carsonmwr", "Fort Carson", tweet) tweet = re.sub(r"offdishduty", "off dish duty", tweet) tweet = re.sub(r"andword", "and word", tweet) tweet = re.sub(r"rhodeisland", "Rhode Island", tweet) tweet = re.sub(r"easternoregon", "Eastern Oregon", tweet) tweet = re.sub(r"WAwildfire", "Washington Wildfire", tweet) tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet) tweet = re.sub(r"57am", "57 am", tweet) tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet) tweet = re.sub(r"JacobHoggard", "Jacob Hoggard", tweet) tweet = re.sub(r"newnewnew", "new new new", tweet) tweet = re.sub(r"under50", "under 50", tweet) tweet = re.sub(r"getitbeforeitsgone", "get it before it is gone", tweet) tweet = re.sub(r"freshoutofthebox", "fresh out of the box", tweet) tweet = re.sub(r"amwriting", "am writing", tweet) tweet = re.sub(r"Bokoharm", "Boko Haram", tweet) tweet = re.sub(r"Nowlike", "Now like", tweet) tweet = re.sub(r"seasonfrom", "season from", tweet) tweet = re.sub(r"epicente", "epicenter", tweet) tweet = re.sub(r"epicenterr", "epicenter", tweet) tweet = re.sub(r"sicklife", "sick life", tweet) tweet = re.sub(r"yycweather", "Calgary Weather", tweet) tweet = re.sub(r"calgarysun", "Calgary Sun", tweet) tweet = re.sub(r"approachng", "approaching", tweet) tweet = re.sub(r"evng", "evening", tweet) tweet = re.sub(r"Sumthng", "something", tweet) tweet = re.sub(r"EllenPompeo", "Ellen Pompeo", tweet) tweet = re.sub(r"shondarhimes", "Shonda Rhimes", tweet) tweet = re.sub(r"ABCNetwork", "ABC Network", tweet) tweet = re.sub(r"SushmaSwaraj", "Sushma Swaraj", tweet) tweet = re.sub(r"pray4japan", "Pray for Japan", tweet) tweet = re.sub(r"hope4japan", "Hope for Japan", tweet) tweet = re.sub(r"Illusionimagess", "Illusion images", tweet) tweet = re.sub(r"SummerUnderTheStars", "Summer Under The Stars", tweet) tweet = re.sub(r"ShallWeDance", "Shall We Dance", tweet) tweet = re.sub(r"TCMParty", "TCM Party", tweet) tweet = re.sub(r"marijuananews", "marijuana news", tweet) tweet = re.sub(r"onbeingwithKristaTippett", "on being with Krista Tippett", tweet) tweet = re.sub(r"Beingtweets", "Being tweets", tweet) tweet = re.sub(r"newauthors", "new authors", tweet) tweet = re.sub(r"remedyyyy", "remedy", tweet) tweet = re.sub(r"44PM", "44 PM", tweet) tweet = re.sub(r"HeadlinesApp", "Headlines App", tweet) tweet = re.sub(r"40PM", "40 PM", tweet) tweet = re.sub(r"myswc", "Severe Weather Center", tweet) tweet = re.sub(r"ithats", "that is", tweet) tweet = re.sub(r"icouldsitinthismomentforever", "I could sit in this moment forever", tweet) tweet = re.sub(r"FatLoss", "Fat Loss", tweet) tweet = re.sub(r"02PM", "02 PM", tweet) tweet = re.sub(r"MetroFmTalk", "Metro Fm Talk", tweet) tweet = re.sub(r"Bstrd", "bastard", tweet) tweet = re.sub(r"bldy", "bloody", tweet) tweet = re.sub(r"MetrofmTalk", "Metro Fm Talk", tweet) tweet = re.sub(r"terrorismturn", "terrorism turn", tweet) tweet = re.sub(r"BBCNewsAsia", "BBC News Asia", tweet) tweet = re.sub(r"BehindTheScenes", "Behind The Scenes", tweet) tweet = re.sub(r"GeorgeTakei", "George Takei", tweet) tweet = re.sub(r"WomensWeeklyMag", "Womens Weekly Magazine", tweet) tweet = re.sub(r"SurvivorsGuidetoEarth", "Survivors Guide to Earth", tweet) tweet = re.sub(r"incubusband", "incubus band", tweet) tweet = re.sub(r"Babypicturethis", "Baby picture this", tweet) tweet = re.sub(r"BombEffects", "Bomb Effects", tweet) tweet = re.sub(r"win10", "Windows 10", tweet) tweet = re.sub(r"idkidk", "I do not know I do not know", tweet) tweet = re.sub(r"TheWalkingDead", "The Walking Dead", tweet) tweet = re.sub(r"amyschumer", "Amy Schumer", tweet) tweet = re.sub(r"crewlist", "crew list", tweet) tweet = re.sub(r"Erdogans", "Erdogan", tweet) tweet = re.sub(r"BBCLive", "BBC Live", tweet) tweet = re.sub(r"TonyAbbottMHR", "Tony Abbott", tweet) tweet = re.sub(r"paulmyerscough", "Paul Myerscough", tweet) tweet = re.sub(r"georgegallagher", "George Gallagher", tweet) tweet = re.sub(r"JimmieJohnson", "Jimmie Johnson", tweet) tweet = re.sub(r"pctool", "pc tool", tweet) tweet = re.sub(r"DoingHashtagsRight", "Doing Hashtags Right", tweet) tweet = re.sub(r"ThrowbackThursday", "Throwback Thursday", tweet) tweet = re.sub(r"SnowBackSunday", "Snowback Sunday", tweet) tweet = re.sub(r"LakeEffect", "Lake Effect", tweet) tweet = re.sub(r"RTphotographyUK", "Richard Thomas Photography UK", tweet) tweet = re.sub(r"BigBang_CBS", "Big Bang CBS", tweet) tweet = re.sub(r"writerslife", "writers life", tweet) tweet = re.sub(r"NaturalBirth", "Natural Birth", tweet) tweet = re.sub(r"UnusualWords", "Unusual Words", tweet) tweet = re.sub(r"wizkhalifa", "Wiz Khalifa", tweet) tweet = re.sub(r"acreativedc", "a creative DC", tweet) tweet = re.sub(r"vscodc", "vsco DC", tweet) tweet = re.sub(r"VSCOcam", "vsco camera", tweet) tweet = re.sub(r"TheBEACHDC", "The beach DC", tweet) tweet = re.sub(r"buildingmuseum", "building museum", tweet) tweet = re.sub(r"WorldOil", "World Oil", tweet) tweet = re.sub(r"redwedding", "red wedding", tweet) tweet = re.sub(r"AmazingRaceCanada", "Amazing Race Canada", tweet) tweet = re.sub(r"WakeUpAmerica", "Wake Up America", tweet) tweet = re.sub(r"\\Allahuakbar\", "Allahu Akbar", tweet) tweet = re.sub(r"bleased", "blessed", tweet) tweet = re.sub(r"nigeriantribune", "Nigerian Tribune", tweet) tweet = re.sub(r"HIDEO_KOJIMA_EN", "Hideo Kojima", tweet) tweet = re.sub(r"FusionFestival", "Fusion Festival", tweet) tweet = re.sub(r"50Mixed", "50 Mixed", tweet) tweet = re.sub(r"NoAgenda", "No Agenda", tweet) tweet = re.sub(r"WhiteGenocide", "White Genocide", tweet) tweet = re.sub(r"dirtylying", "dirty lying", tweet) tweet = re.sub(r"SyrianRefugees", "Syrian Refugees", tweet) tweet = re.sub(r"changetheworld", "change the world", tweet) tweet = re.sub(r"Ebolacase", "Ebola case", tweet) tweet = re.sub(r"mcgtech", "mcg technologies", tweet) tweet = re.sub(r"withweapons", "with weapons", tweet) tweet = re.sub(r"advancedwarfare", "advanced warfare", tweet) tweet = re.sub(r"letsFootball", "let us Football", tweet) tweet = re.sub(r"LateNiteMix", "late night mix", tweet) tweet = re.sub(r"PhilCollinsFeed", "Phil Collins", tweet) tweet = re.sub(r"RudyHavenstein", "Rudy Havenstein", tweet) tweet = re.sub(r"22PM", "22 PM", tweet) tweet = re.sub(r"54am", "54 AM", tweet) tweet = re.sub(r"38am", "38 AM", tweet) tweet = re.sub(r"OldFolkExplainStuff", "Old Folk Explain Stuff", tweet) tweet = re.sub(r"BlacklivesMatter", "Black Lives Matter", tweet) tweet = re.sub(r"InsaneLimits", "Insane Limits", tweet) tweet = re.sub(r"youcantsitwithus", "you cannot sit with us", tweet) tweet = re.sub(r"2k15", "2015", tweet) tweet = re.sub(r"TheIran", "Iran", tweet) tweet = re.sub(r"JimmyFallon", "Jimmy Fallon", tweet) tweet = re.sub(r"AlbertBrooks", "Albert Brooks", tweet) tweet = re.sub(r"defense_news", "defense news", tweet) tweet = re.sub(r"nuclearrcSA", "Nuclear Risk Control Self Assessment", tweet) tweet = re.sub(r"Auspol", "Australia Politics", tweet) tweet = re.sub(r"NuclearPower", "Nuclear Power", tweet) tweet = re.sub(r"WhiteTerrorism", "White Terrorism", tweet) tweet = re.sub(r"truthfrequencyradio", "Truth Frequency Radio", tweet) tweet = re.sub(r"ErasureIsNotEquality", "Erasure is not equality", tweet) tweet = re.sub(r"ProBonoNews", "Pro Bono News", tweet) tweet = re.sub(r"JakartaPost", "Jakarta Post", tweet) tweet = re.sub(r"toopainful", "too painful", tweet) tweet = re.sub(r"melindahaunton", "Melinda Haunton", tweet) tweet = re.sub(r"NoNukes", "No Nukes", tweet) tweet = re.sub(r"curryspcworld", "Currys PC World", tweet) tweet = re.sub(r"ineedcake", "I need cake", tweet) tweet = re.sub(r"blackforestgateau", "black forest gateau", tweet) tweet = re.sub(r"BBCOne", "BBC One", tweet) tweet = re.sub(r"AlexxPage", "Alex Page", tweet) tweet = re.sub(r"jonathanserrie", "Jonathan Serrie", tweet) tweet = re.sub(r"SocialJerkBlog", "Social Jerk Blog", tweet) tweet = re.sub(r"ChelseaVPeretti", "Chelsea Peretti", tweet) tweet = re.sub(r"irongiant", "iron giant", tweet) tweet = re.sub(r"RonFunches", "Ron Funches", tweet) tweet = re.sub(r"TimCook", "Tim Cook", tweet) tweet = re.sub(r"sebastianstanisaliveandwell", "Sebastian Stan is alive and well", tweet) tweet = re.sub(r"Madsummer", "Mad summer", tweet) tweet = re.sub(r"NowYouKnow", "Now you know", tweet) tweet = re.sub(r"concertphotography", "concert photography", tweet) tweet = re.sub(r"TomLandry", "Tom Landry", tweet) tweet = re.sub(r"showgirldayoff", "show girl day off", tweet) tweet = re.sub(r"Yougslavia", "Yugoslavia", tweet) tweet = re.sub(r"QuantumDataInformatics", "Quantum Data Informatics", tweet) tweet = re.sub(r"FromTheDesk", "From The Desk", tweet) tweet = re.sub(r"TheaterTrial", "Theater Trial", tweet) tweet = re.sub(r"CatoInstitute", "Cato Institute", tweet) tweet = re.sub(r"EmekaGift", "Emeka Gift", tweet) tweet = re.sub(r"LetsBe_Rational", "Let us be rational", tweet) tweet = re.sub(r"Cynicalreality", "Cynical reality", tweet) tweet = re.sub(r"FredOlsenCruise", "Fred Olsen Cruise", tweet) tweet = re.sub(r"NotSorry", "not sorry", tweet) tweet = re.sub(r"UseYourWords", "use your words", tweet) tweet = re.sub(r"WordoftheDay", "word of the day", tweet) tweet = re.sub(r"Dictionarycom", "Dictionary.com", tweet) tweet = re.sub(r"TheBrooklynLife", "The Brooklyn Life", tweet) tweet = re.sub(r"jokethey", "joke they", tweet) tweet = re.sub(r"nflweek1picks", "NFL week 1 picks", tweet) tweet = re.sub(r"uiseful", "useful", tweet) tweet = re.sub(r"JusticeDotOrg", "The American Association for Justice", tweet) tweet = re.sub(r"autoaccidents", "auto accidents", tweet) tweet = re.sub(r"SteveGursten", "Steve Gursten", tweet) tweet = re.sub(r"MichiganAutoLaw", "Michigan Auto Law", tweet) tweet = re.sub(r"birdgang", "bird gang", tweet) tweet = re.sub(r"nflnetwork", "NFL Network", tweet) tweet = re.sub(r"NYDNSports", "NY Daily News Sports", tweet) tweet = re.sub(r"RVacchianoNYDN", "Ralph Vacchiano NY Daily News", tweet) tweet = re.sub(r"EdmontonEsks", "Edmonton Eskimos", tweet) tweet = re.sub(r"david_brelsford", "David Brelsford", tweet) tweet = re.sub(r"TOI_India", "The Times of India", tweet) tweet = re.sub(r"hegot", "he got", tweet) tweet = re.sub(r"SkinsOn9", "Skins on 9", tweet) tweet = re.sub(r"sothathappened", "so that happened", tweet) tweet = re.sub(r"LCOutOfDoors", "LC Out Of Doors", tweet) tweet = re.sub(r"NationFirst", "Nation First", tweet) tweet = re.sub(r"IndiaToday", "India Today", tweet) tweet = re.sub(r"HLPS", "helps", tweet) tweet = re.sub(r"HOSTAGESTHROSW", "hostages throw", tweet) tweet = re.sub(r"SNCTIONS", "sanctions", tweet) tweet = re.sub(r"BidTime", "Bid Time", tweet) tweet = re.sub(r"crunchysensible", "crunchy sensible", tweet) tweet = re.sub(r"RandomActsOfRomance", "Random acts of romance", tweet) tweet = re.sub(r"MomentsAtHill", "Moments at hill", tweet) tweet = re.sub(r"eatshit", "eat shit", tweet) tweet = re.sub(r"liveleakfun", "live leak fun", tweet) tweet = re.sub(r"SahelNews", "Sahel News", tweet) tweet = re.sub(r"abc7newsbayarea", "ABC 7 News Bay Area", tweet) tweet = re.sub(r"facilitiesmanagement", "facilities management", tweet) tweet = re.sub(r"facilitydude", "facility dude", tweet) tweet = re.sub(r"CampLogistics", "Camp logistics", tweet) tweet = re.sub(r"alaskapublic", "Alaska public", tweet) tweet = re.sub(r"MarketResearch", "Market Research", tweet) tweet = re.sub(r"AccuracyEsports", "Accuracy Esports", tweet) tweet = re.sub(r"TheBodyShopAust", "The Body Shop Australia", tweet) tweet = re.sub(r"yychail", "Calgary hail", tweet) tweet = re.sub(r"yyctraffic", "Calgary traffic", tweet) tweet = re.sub(r"eliotschool", "eliot school", tweet) tweet = re.sub(r"TheBrokenCity", "The Broken City", tweet) tweet = re.sub(r"OldsFireDept", "Olds Fire Department", tweet) tweet = re.sub(r"RiverComplex", "River Complex", tweet) tweet = re.sub(r"fieldworksmells", "field work smells", tweet) tweet = re.sub(r"IranElection", "Iran Election", tweet) tweet = re.sub(r"glowng", "glowing", tweet) tweet = re.sub(r"kindlng", "kindling", tweet) tweet = re.sub(r"riggd", "rigged", tweet) tweet = re.sub(r"slownewsday", "slow news day", tweet) tweet = re.sub(r"MyanmarFlood", "Myanmar Flood", tweet) tweet = re.sub(r"abc7chicago", "ABC 7 Chicago", tweet) tweet = re.sub(r"copolitics", "Colorado Politics", tweet) tweet = re.sub(r"AdilGhumro", "Adil Ghumro", tweet) tweet = re.sub(r"netbots", "net bots", tweet) tweet = re.sub(r"byebyeroad", "bye bye road", tweet) tweet = re.sub(r"massiveflooding", "massive flooding", tweet) tweet = re.sub(r"EndofUS", "End of United States", tweet) tweet = re.sub(r"35PM", "35 PM", tweet) tweet = re.sub(r"greektheatrela", "Greek Theatre Los Angeles", tweet) tweet = re.sub(r"76mins", "76 minutes", tweet) tweet = re.sub(r"publicsafetyfirst", "public safety first", tweet) tweet = re.sub(r"livesmatter", "lives matter", tweet) tweet = re.sub(r"myhometown", "my hometown", tweet) tweet = re.sub(r"tankerfire", "tanker fire", tweet) tweet = re.sub(r"MEMORIALDAY", "memorial day", tweet) tweet = re.sub(r"MEMORIAL_DAY", "memorial day", tweet) tweet = re.sub(r"instaxbooty", "instagram booty", tweet) tweet = re.sub(r"Jerusalem_Post", "Jerusalem Post", tweet) tweet = re.sub(r"WayneRooney_INA", "Wayne Rooney", tweet) tweet = re.sub(r"VirtualReality", "Virtual Reality", tweet) tweet = re.sub(r"OculusRift", "Oculus Rift", tweet) tweet = re.sub(r"OwenJones84", "Owen Jones", tweet) tweet = re.sub(r"jeremycorbyn", "Jeremy Corbyn", tweet) tweet = re.sub(r"paulrogers002", "Paul Rogers", tweet) tweet = re.sub(r"mortalkombatx", "Mortal Kombat X", tweet) tweet = re.sub(r"mortalkombat", "Mortal Kombat", tweet) tweet = re.sub(r"FilipeCoelho92", "Filipe Coelho", tweet) tweet = re.sub(r"OnlyQuakeNews", "Only Quake News", tweet) tweet = re.sub(r"kostumes", "costumes", tweet) tweet = re.sub(r"YEEESSSS", "yes", tweet) tweet = re.sub(r"ToshikazuKatayama", "Toshikazu Katayama", tweet) tweet = re.sub(r"IntlDevelopment", "Intl Development", tweet) tweet = re.sub(r"ExtremeWeather", "Extreme Weather", tweet) tweet = re.sub(r"WereNotGruberVoters", "We are not gruber voters", tweet) tweet = re.sub(r"NewsThousands", "News Thousands", tweet) tweet = re.sub(r"EdmundAdamus", "Edmund Adamus", tweet) tweet = re.sub(r"EyewitnessWV", "Eye witness WV", tweet) tweet = re.sub(r"PhiladelphiaMuseu", "Philadelphia Museum", tweet) tweet = re.sub(r"DublinComicCon", "Dublin Comic Con", tweet) tweet = re.sub(r"NicholasBrendon", "Nicholas Brendon", tweet) tweet = re.sub(r"Alltheway80s", "All the way 80s", tweet) tweet = re.sub(r"FromTheField", "From the field", tweet) tweet = re.sub(r"NorthIowa", "North Iowa", tweet) tweet = re.sub(r"WillowFire", "Willow Fire", tweet) tweet = re.sub(r"MadRiverComplex", "Mad River Complex", tweet) tweet = re.sub(r"feelingmanly", "feeling manly", tweet) tweet = re.sub(r"stillnotoverit", "still not over it", tweet) tweet = re.sub(r"FortitudeValley", "Fortitude Valley", tweet) tweet = re.sub(r"CoastpowerlineTramTr", "Coast powerline", tweet) tweet = re.sub(r"ServicesGold", "Services Gold", tweet) tweet = re.sub(r"NewsbrokenEmergency", "News broken emergency", tweet) tweet = re.sub(r"Evaucation", "evacuation", tweet) tweet = re.sub(r"leaveevacuateexitbe", "leave evacuate exit be", tweet) tweet = re.sub(r"P_EOPLE", "PEOPLE", tweet) tweet = re.sub(r"Tubestrike", "tube strike", tweet) tweet = re.sub(r"CLASS_SICK", "CLASS SICK", tweet) tweet = re.sub(r"localplumber", "local plumber", tweet) tweet = re.sub(r"awesomejobsiri", "awesome job siri", tweet) tweet = re.sub(r"PayForItHow", "Pay for it how", tweet) tweet = re.sub(r"ThisIsAfrica", "This is Africa", tweet) tweet = re.sub(r"crimeairnetwork", "crime air network", tweet) tweet = re.sub(r"KimAcheson", "Kim Acheson", tweet) tweet = re.sub(r"cityofcalgary", "City of Calgary", tweet) tweet = re.sub(r"prosyndicate", "pro syndicate", tweet) tweet = re.sub(r"660NEWS", "660 NEWS", tweet) tweet = re.sub(r"BusInsMagazine", "Business Insurance Magazine", tweet) tweet = re.sub(r"wfocus", "focus", tweet) tweet = re.sub(r"ShastaDam", "Shasta Dam", tweet) tweet = re.sub(r"go2MarkFranco", "Mark Franco", tweet) tweet = re.sub(r"StephGHinojosa", "Steph Hinojosa", tweet) tweet = re.sub(r"Nashgrier", "Nash Grier", tweet) tweet = re.sub(r"NashNewVideo", "Nash new video", tweet) tweet = re.sub(r"IWouldntGetElectedBecause", "I would not get elected because", tweet) tweet = re.sub(r"SHGames", "Sledgehammer Games", tweet) tweet = re.sub(r"bedhair", "bed hair", tweet) tweet = re.sub(r"JoelHeyman", "Joel Heyman", tweet) tweet = re.sub(r"viaYouTube", "via YouTube", tweet) tweet = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", tweet) punctuations = '@ for p in punctuations: tweet = tweet.replace(p, f' {p} ') tweet = tweet.replace('...', '...') if '...' not in tweet: tweet = tweet.replace('.. ', '...') tweet = re.sub(r"MH370", "Malaysia Airlines Flight 370", tweet) tweet = re.sub(r"m̼sica", "music", tweet) tweet = re.sub(r"okwx", "Oklahoma City Weather", tweet) tweet = re.sub(r"arwx", "Arkansas Weather", tweet) tweet = re.sub(r"gawx", "Georgia Weather", tweet) tweet = re.sub(r"scwx", "South Carolina Weather", tweet) tweet = re.sub(r"cawx", "California Weather", tweet) tweet = re.sub(r"tnwx", "Tennessee Weather", tweet) tweet = re.sub(r"azwx", "Arizona Weather", tweet) tweet = re.sub(r"alwx", "Alabama Weather", tweet) tweet = re.sub(r"wordpressdotcom", "wordpress", tweet) tweet = re.sub(r"usNWSgov", "United States National Weather Service", tweet) tweet = re.sub(r"Suruc", "Sanliurfa", tweet) tweet = re.sub(r"Bestnaijamade", "bestnaijamade", tweet) tweet = re.sub(r"SOUDELOR", "Soudelor", tweet) return tweet def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF" u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) def remove_punct(text): table=str.maketrans('','',string.punctuation) return text.translate(table )<define_variables>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
14,041,455
class DataPrepare(object): def __init__(self, path_train='/kaggle/input/nlp-getting-started/train.csv', path_test='/kaggle/input/nlp-getting-started/test.csv' , path_submit= '/kaggle/input/nlp-getting-started/sample_submission.csv' , path_leaked = '/kaggle/input/a-real-disaster-leaked-label/submission.csv' , use_stemmer = False, use_filter = True , path_2 = None , use_pretraining = False ,use_tokenizer = True, clear_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z]+" ,min_cv_value = 5,drop_duplicates = True): self.path_train = path_train self.path_test = path_test self.path_submit = path_submit self.pretrain = use_pretraining self.path_2 = path_2 self.train_data = pd.read_csv(path_train) self.test_data = pd.read_csv(path_test) self.submit_data = pd.read_csv(path_submit) self.leaked_data = pd.read_csv(path_leaked) self.clear_re = clear_re self.stop_words = stopwords.words("english") self.stemmer = SnowballStemmer("english") self.use_filter = use_filter self.use_stemmer = use_stemmer self.use_tokenizer = use_tokenizer self.hashtags_filter = re.compile(r" self.at_filter = re.compile(r"@(\w+)") self.url_filter = re.compile(r"http:\/\/t.co\/(\w+)") self.drop_duplicates = drop_duplicates self.duplicates = [246,270,266,259,253,251,250,271,3930,3933,3924,3917,3136, 3133,3913,3914,3936,3921,3941, 3937,3938,3667,3674,3688,3696,601,576,584,608,606,603,592,604,591, 587,6828,6831,4415, 4400, 4399,4403,4397,4396, 4394,4414, 4393,4392,4404,4407,4420,4412,4408,4391,4405,6840,6834,6837,6841,6816] if self.drop_duplicates: self.drop_dup() self.get_hashtags() self.get_urls() self.get_ats() self.clear_keyword() self.c_vector = CountVectorizer(min_df = min_cv_value ,lowercase=False) if self.pretrain: self.sentiment = pd.read_csv(path_2, encoding='latin-1', names=['target','id','date','flag','user','text']) self.sentiment.drop(columns = ['id','date','flag','user'] , inplace = True) self.sentiment.target.replace({0:1 , 4:0} , inplace = True) self.clear_text() self.vector_values(columns = ['keyword' , 'location','hashtags' , 'urls' , 'ats' ,'text_re']) self.train_df_gaf , self.test_df_gaf = self.get_additional_features(self.train_df , self.test_df) if use_tokenizer: self.tokenizer = Tokenizer() self.train_tokenizer() self.data_max_len() self.SEQUENCE_LENGTH = max(self.train_data.text_size.max() , self.test_data.text_size.max()) def drop_dup(self): self.train_data.drop(index = self.duplicates , inplace = True) def preprocess(self , text): text = re.sub(self.clear_re, ' ', str(text ).lower() ).strip() tokens = [] for token in text.split() : if token not in self.stop_words: if self.use_stemmer: tokens.append(self.stemmer.stem(token)) else: tokens.append(token) return " ".join(tokens) def preprocess_keyword(self , text): text = str(text ).replace("%20" , " ") text = text.replace(" ","_") text = re.sub("[^A-Za-z_ ]", ' ', str(text ).lower() ).strip() return text def get_additional_features(self , train , test , ID = False): train_new = pd.DataFrame(index = self.train_df.index) test_new = pd.DataFrame(index = self.test_df.index) col = list(test.columns) n_comp = 12 tsvd = TruncatedSVD(n_components = n_comp , random_state = 98) tsvd_result_train = tsvd.fit_transform(train[col]) tsvd_result_test = tsvd.transform(test[col]) pca = PCA(n_components = n_comp , random_state = 98) pca_result_train = pca.fit_transform(train[col]) pca_result_test = pca.transform(test[col]) ica = FastICA(n_components =n_comp , random_state = 98) ica_result_train = ica.fit_transform(train[col]) ica_result_test = ica.transform(test[col]) grp = GaussianRandomProjection(n_components = n_comp , random_state = 98) grp_result_train = grp.fit_transform(train[col]) grp_result_test = grp.transform(test[col]) srp = SparseRandomProjection(n_components = n_comp , random_state = 98 , dense_output =True) srp_result_train = srp.fit_transform(train[col]) srp_result_test = srp.transform(test[col]) for i in range(1,n_comp+1): train_new['tsvd_' + str(i)] = tsvd_result_train[:, i - 1] test_new['tsvd_' + str(i)] = tsvd_result_test[:, i - 1] train_new['pca_' + str(i)] = pca_result_train[:, i - 1] test_new['pca_' + str(i)] = pca_result_test[:, i - 1] train_new['ica_' + str(i)] = ica_result_train[:, i - 1] test_new['ica_' + str(i)] = ica_result_test[:, i - 1] train_new['grp_' + str(i)] = grp_result_train[:, i - 1] test_new['grp_' + str(i)] = grp_result_test[:, i - 1] train_new['srp_' + str(i)] = srp_result_train[:, i - 1] test_new['srp_' + str(i)] = srp_result_test[:, i - 1] return train_new ,test_new def vector_values(self , columns): self.train_df = pd.DataFrame(index = self.train_data.index) self.test_df = pd.DataFrame(index = self.test_data.index) for col in columns: self.test_df = self.test_df.join(pd.DataFrame(self.c_vector.fit_transform(self.test_data[col].fillna('None')).toarray() ,columns = self.c_vector.get_feature_names() ,index= self.test_df.index ).add_prefix(col+'_')) self.train_df = self.train_df.join(pd.DataFrame(self.c_vector.transform(self.train_data[col].fillna('None')).toarray() ,columns = self.c_vector.get_feature_names() ,index= self.train_df.index ).add_prefix(col+'_')) def get_hashtags(self): self.train_data['hashtags'] = self.train_data.text.apply(lambda x : str(self.hashtags_filter.findall(str(x ).lower()))) self.test_data['hashtags'] = self.test_data.text.apply(lambda x : str(self.hashtags_filter.findall(str(x ).lower()))) def get_urls(self): self.train_data['urls'] = self.train_data.text.apply(lambda x : str(self.url_filter.findall(str(x)))) self.test_data['urls'] = self.test_data.text.apply(lambda x : str(self.url_filter.findall(str(x)))) def get_ats(self): self.train_data['ats'] = self.train_data.text.apply(lambda x : str(self.at_filter.findall(str(x ).lower()))) self.test_data['ats'] = self.test_data.text.apply(lambda x : str(self.at_filter.findall(str(x ).lower()))) def clear_keyword(self): self.train_data.keyword = self.train_data.keyword.apply(lambda x: self.preprocess_keyword(x)) self.test_data.keyword = self.test_data.keyword.apply(lambda x: self.preprocess_keyword(x)) self.train_data.location = self.train_data.location.apply(lambda x: self.preprocess_keyword(x)) self.test_data.location = self.test_data.location.apply(lambda x: self.preprocess_keyword(x)) def clear_text(self): if self.use_filter: self.train_data['text_re'] = self.train_data.text.apply(lambda x: clean(x)) self.test_data['text_re'] = self.test_data.text.apply(lambda x: clean(x)) self.train_data['text_re'] = self.train_data.text_re.apply(lambda x: remove_emoji(x)) self.test_data['text_re'] = self.test_data.text_re.apply(lambda x: remove_emoji(x)) self.train_data['text_re'] = self.train_data.text_re.apply(lambda x: self.preprocess(x)) self.test_data['text_re'] = self.test_data.text_re.apply(lambda x: self.preprocess(x)) else: self.train_data['text_re'] = self.train_data.text.apply(lambda x: self.preprocess(x)) self.test_data['text_re'] = self.test_data.text.apply(lambda x: self.preprocess(x)) def train_tokenizer(self): self.tokenizer.fit_on_texts(pd.concat([self.train_data['text_re'] , self.test_data['text_re']] ,axis = 0)) self.vocab_size = len(self.tokenizer.word_index)+1 def data_max_len(self): self.train_data['text_size'] = self.train_data.text_re.apply(lambda x: len(x.split(' '))) self.test_data['text_size'] = self.test_data.text_re.apply(lambda x: len(x.split(' ')) )<define_variables>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
14,041,455
%%time data = DataPrepare(use_filter = False) EPOCHS = 12 BATCH_SIZE = 128 W2V_SIZE = 300<prepare_x_and_y>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = 3 coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
14,041,455
%%time x_data = pad_sequences(data.tokenizer.texts_to_sequences(data.train_data.text_re), maxlen = data.SEQUENCE_LENGTH) y_data = data.train_data.target y_data = y_data.values.reshape(-1,1) x_test = pad_sequences(data.tokenizer.texts_to_sequences(data.test_data.text_re), maxlen = data.SEQUENCE_LENGTH )<feature_engineering>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 8 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.75 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
14,041,455
%%time w2v_model = gensim.models.KeyedVectors.load_word2vec_format('/kaggle/input/nlpword2vecembeddingspretrained/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros(( data.vocab_size , W2V_SIZE)) word_counter = 0 for word , i in data.tokenizer.word_index.items() : if word in w2v_model.wv: word_counter += 1 embedding_matrix[i] = w2v_model.wv[word] print('Embedding Matrix Shape - > ', embedding_matrix.shape) print('Number of Words found in Embedding Layers are' , word_counter) print('The Ratio of words Found is ' , word_counter/embedding_matrix.shape[0] )<set_options>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB7) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
14,041,455
del w2v_model gc.collect()<categorify>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
14,041,455
%%time x_train_embeddings = embed(data.train_data.text_re) x_test_embeddings = embed(data.test_data.text_re )<choose_model_class>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
14,041,455
<choose_model_class><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,462,264
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<choose_model_class>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,462,264
model_1 = build_model()<define_variables>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" )
Petals to the Metal - Flower Classification on TPU
13,462,264
utils.plot_model(model_1, to_file='model_1_plot.png', show_shapes=True, show_layer_names=True) <choose_model_class>
IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,462,264
model = define_model() model.compile(loss = 'binary_crossentropy' , optimizer = 'adam' , metrics = ['accuracy' , 'mse'] )<save_to_csv>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,462,264
def func(x , thres): if x>thres: return 1 else: return 0 def save_csv(df , name): pd.DataFrame(df ).to_csv(name , index = False) def train_model(data =data , model = model , train_data_x = [data.train_df_gaf , x_train_embeddings.numpy() , data.train_df,x_data] , train_data_y = y_data , bs = BATCH_SIZE , epochs_each_step = 1 ,total_epochs = 2 , vd = [[data.test_df_gaf , x_test_embeddings.numpy() , data.test_df , x_test], data.leaked_data.target.values] , verb = 2 , cbs = callbacks): submit = [] for i in range(total_epochs//epochs_each_step): model.fit(train_data_x , train_data_y , batch_size = bs , epochs = epochs_each_step ,validation_data = vd , verbose = verb , callbacks = cbs) data.submit_data['target'] = model.predict(vd[0]) median = data.submit_data.sort_values(by = ['target'], ascending = True)['target'].values[1861] data.submit_data.target = data.submit_data.target.apply(func , thres = median) f1_s= f1_score(data.leaked_data.target , data.submit_data.target) name = '/kaggle/working/disaster_submission_'+str(i)+'_f1_'+str(int(f1_s*10000)) +'.h5' model.save(name) submit.append(data.submit_data) print("F1 Score of This Layer is ", f1_score(data.leaked_data.target , data.submit_data.target)) return submit<train_model>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model1 = get_model(DenseNet201) model1.load_weights("/kaggle/input/start-with-densenet201/my_densenet_201.h5" )
Petals to the Metal - Flower Classification on TPU
13,462,264
submit = train_model()<choose_model_class>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) return model with strategy.scope() : model2 = get_model(ResNet50) model2.load_weights("/kaggle/input/start-with-pre-train/my_ef_net_b7.h5" )
Petals to the Metal - Flower Classification on TPU
13,462,264
MLA = [ neighbors.KNeighborsClassifier(n_jobs = -1), neighbors.NearestCentroid() , tree.DecisionTreeClassifier() , tree.ExtraTreeClassifier() , naive_bayes.GaussianNB() , naive_bayes.BernoulliNB() , semi_supervised.LabelPropagation() , discriminant_analysis.LinearDiscriminantAnalysis() , discriminant_analysis.QuadraticDiscriminantAnalysis() , svm.LinearSVC(multi_class='crammer_singer'), svm.SVC(probability=True), svm.NuSVC() , linear_model.LogisticRegressionCV(multi_class='multinomial'), linear_model.RidgeClassifier() , linear_model.RidgeClassifierCV() , linear_model.Perceptron() , linear_model.PassiveAggressiveClassifier() , linear_model.SGDClassifier() , neural_network.MLPClassifier() , ensemble.RandomForestClassifier(n_jobs = -1), ensemble.ExtraTreesClassifier(n_jobs = -1), ensemble.AdaBoostClassifier() , ensemble.GradientBoostingClassifier() , ensemble.BaggingClassifier(n_jobs = -1), gaussian_process.GaussianProcessClassifier(n_jobs = -1), calibration.CalibratedClassifierCV() , ] MLA_new = [ linear_model.LogisticRegressionCV(multi_class='multinomial'), ensemble.RandomForestClassifier() , linear_model.RidgeClassifier() , naive_bayes.BernoulliNB() , ]<compute_train_metric>
val_dataset = get_validation_dataset() images_ds = val_dataset.map(lambda image, label: image) labels_ds = val_dataset.map(lambda image, label: label ).unbatch() val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() m1 = model1.predict(images_ds) m2 = model2.predict(images_ds) scores = [] for alpha in np.linspace(0,1,100): val_probabilities = alpha*m1+(1-alpha)*m2 val_predictions = np.argmax(val_probabilities, axis=-1) scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro')) best_alpha = np.argmax(scores)/100 print('Best alpha: ' + str(best_alpha))
Petals to the Metal - Flower Classification on TPU
13,462,264
<sort_values><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities1 = model1.predict(test_images_ds) probabilities2 = model2.predict(test_images_ds) probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2 predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,292,818
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<set_options>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,292,818
py.init_notebook_mode(connected=True) %matplotlib inline warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) <load_from_csv>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0, EfficientNetB1
Petals to the Metal - Flower Classification on TPU
13,292,818
train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') PassengerId=test['PassengerId'] train.head()<feature_engineering>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,292,818
full_data=[train,test] train['name_length']=train['Name'].apply(len) test['name_length']=test['Name'].apply(len) train['has_cabin']=train["Cabin"].apply(lambda x:0 if type(x)==float else 1) test['has_cabin']=test["Cabin"].apply(lambda x:0 if type(x)==float else 1) for dataline in full_data: dataline["familysize"]=dataline['SibSp']+dataline['Parch']+1 for dataline in full_data: dataline["isalone"]=0 dataline.loc[dataline['familysize']==1,'isalone']=1 dataline['Embarked']= dataline['Embarked'].fillna('S') dataline['Fare']=dataline['Fare'].fillna(train['Fare'].median()) train['categoricalfare']=pd.cut(train['Fare'],4) for dataset in full_data: age_avg=dataset['Age'].mean() age_std=dataset['Age'].std() age_null_count=dataset['Age'].isnull().sum() age_null_random_list=np.random.randint(age_avg-age_std,age_avg+age_std,size=age_null_count) dataset['Age'][np.isnan(dataset['Age'])]=age_null_random_list dataset['Age']=dataset['Age'].astype(int) train['categoricalage']=pd.cut(train['Age'],5) def get_title(name): title_search=re.search('([A-Za-z]+)\.',name) if title_search: return title_search.group(1) return "" for dataset in full_data: dataset['Title']=dataset['Name'].apply(get_title) for dataset in full_data: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train.head() <categorify>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
13,292,818
for dataset in full_data: dataset['Sex']=dataset['Sex'].map({'female':0,'male':1} ).astype(int) title_mapping={"Mr":1,"Miss":2,"Mrs":3,"Master":4,"Rare":5} dataset['Title']=dataset['Title'].map(title_mapping) dataset['Title']=dataset['Title'].fillna(0) dataset['Embarked']=dataset['Embarked'].map({'S':0,'C':1,'Q':2}) dataset.loc[dataset['Fare']<=7.91,'Fare']=0 dataset.loc[(dataset['Fare']>7.91)&(dataset['Fare']<=14.454),'Fare']=1 dataset.loc[(dataset['Fare']>14.454)&(dataset['Fare']<=31),'Fare']=2 dataset.loc[dataset['Fare']>31,'Fare']=3 dataset['Fare']=dataset['Fare'].astype(int) dataset.loc[dataset['Age']<=16,'Age']=0 dataset.loc[(dataset['Age']>16)&(dataset['Age']<=32),'Age']=1 dataset.loc[(dataset['Age']>32)&(dataset['Age']<=48),'Age']=2 dataset.loc[(dataset['Age']>48)&(dataset['Age']<=64),'Age']=3 dataset.loc[dataset['Age']>64,'Age']=4 train.head()<drop_column>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,292,818
drop_elements=['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp'] train=train.drop(drop_elements,axis=1) train=train.drop(['categoricalage', 'categoricalfare'],axis=1) test=test.drop(drop_elements,axis=1) <find_best_model_class>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = 3 coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,292,818
ntrain=train.shape[0] ntest=test.shape[0] SEED=0 nfolds=5 kf=KFold(n_splits=nfolds,random_state=SEED) class SKlearnHelper(object): def __init__(self,clf,seed=0,params=None): self.clf=clf(**params) def train(self,x_train,y_train): self.clf.fit(x_train,y_train) def predict(self,x): return self.clf.predict(x) def fit(self,x,y): return self.clf.fit(x,y) def feature_importances(self,x,y): print(self.clf.fit(x,y ).feature_importances_) <predict_on_test>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.75 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
13,292,818
def get_oof(clf,x_train,y_train,x_test): oof_train=np.zeros(( ntrain,)) oof_test=np.zeros(( ntest,)) oof_test_skf=np.empty(( nfolds,ntest)) for i,(train_index, test_index)in enumerate(kf.split(x_train)) : x_tr=x_train[train_index] y_tr=y_train[train_index] x_te=x_train[test_index] clf.train(x_tr,y_tr) oof_train[test_index]=clf.predict(x_te) oof_test_skf[i,:]=clf.predict(x_test) oof_test[:]=oof_test_skf.mean(axis=0) return oof_train.reshape(-1,1),oof_test.reshape(-1,1) <init_hyperparams>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB1) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
13,292,818
rf_params={ 'n_jobs':-1, 'n_estimators':500, 'warm_start':True, 'max_depth':6, 'min_samples_leaf':2, 'max_features':'sqrt', 'verbose':0 } et_params={ 'n_jobs':-1, 'n_estimators':500, 'max_depth':8, 'min_samples_leaf':2, 'verbose':0 } ada_params={ 'n_estimators':500, 'learning_rate':0.75 } gb_params={ 'n_estimators':500, 'max_depth':8, 'min_samples_leaf':2, 'verbose':0 } svc_params={ 'kernel':'linear', 'C':0.025 } knn_params={ 'n_neighbors':3 } dst_params={ 'max_depth':8 }<choose_model_class>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b1.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
13,292,818
rf=SKlearnHelper(clf=RandomForestClassifier,seed=SEED,params=rf_params) et=SKlearnHelper(clf=ExtraTreesClassifier,seed=SEED,params=et_params) ada=SKlearnHelper(clf=AdaBoostClassifier,seed=SEED,params=ada_params) gb=SKlearnHelper(clf=GradientBoostingClassifier,seed=SEED,params=gb_params) svc=SKlearnHelper(clf=SVC,seed=SEED,params=svc_params) knn=SKlearnHelper(clf=KNeighborsClassifier,seed=SEED,params=knn_params) dst=SKlearnHelper(clf=DecisionTreeClassifier,seed=SEED,params=dst_params )<prepare_x_and_y>
model = tf.keras.models.load_model('my_ef_net_b1.h5' )
Petals to the Metal - Flower Classification on TPU
13,292,818
<categorify><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
13,198,271
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<prepare_output>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
13,198,271
base_prediction_train=pd.DataFrame({'randomforest':rf_oof_train.ravel() , 'extratrees':et_oof_train.ravel() , 'adaboost':ada_oof_train.ravel() , 'gradientboost':gb_oof_train.ravel() , }) base_prediction_train.head()<concatenate>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0, EfficientNetB1
Petals to the Metal - Flower Classification on TPU
13,198,271
x_train=np.concatenate(( et_oof_train,rf_oof_train,gb_oof_train,ada_oof_train,svc_oof_train,knn_oof_train,dst_oof_train),axis=1) x_test=np.concatenate(( et_oof_test,rf_oof_test,gb_oof_test,ada_oof_test,svc_oof_test,knn_oof_test,det_oof_test),axis=1) <train_model>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
13,198,271
gbm=xgb.XGBClassifier( n_estimators=2000, max_depth=4, min_child_weight=2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=-1, scale_pos_weight=1 ).fit(x_train,y_train) predictions=gbm.predict(x_test) <save_to_csv>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started") MORE_IMAGES_GCS_DS_PATH = KaggleDatasets().get_gcs_path('tf-flower-photo-tfrec' )
Petals to the Metal - Flower Classification on TPU
13,198,271
stackingsubmission=pd.DataFrame({'PassengerId':PassengerId,'Survived':predictions}) stackingsubmission.to_csv("stackingsubmission.csv",index=False )<set_options>
IMAGE_SIZE = [331, 331] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] MOREIMAGES_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') IMAGENET_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/imagenet' + MOREIMAGES_PATH + '/*.tfrec') INATURELIST_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/inaturalist' + MOREIMAGES_PATH + '/*.tfrec') OPENIMAGE_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/openimage' + MOREIMAGES_PATH + '/*.tfrec') OXFORD_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/oxford_102' + MOREIMAGES_PATH + '/*.tfrec') TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES + IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES + OXFORD_FILES SEED = 2020
Petals to the Metal - Flower Classification on TPU
13,198,271
%matplotlib inline warnings.filterwarnings('ignore' )<load_from_csv>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = random.randint(1,3) coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
13,198,271
train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") train.describe(include="all" )<count_missing_values>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 3 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
13,198,271
print(pd.isnull(train ).sum() )<drop_column>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(DenseNet201) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
13,198,271
train = train.drop(['Cabin'], axis = 1) test = test.drop(['Cabin'], axis = 1 )<drop_column>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
13,198,271
train = train.drop(['Ticket'], axis = 1) test = test.drop(['Ticket'], axis = 1 )<data_type_conversions>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
13,198,271
train = train.fillna({"Embarked": "S"}) <feature_engineering>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,689,718
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,689,718
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royal": 5, "Rare": 6} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train.head() <categorify>
import math, re, os import tensorflow as tf import numpy as np from matplotlib import pyplot as plt from kaggle_datasets import KaggleDatasets import efficientnet.tfkeras as efn from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
Petals to the Metal - Flower Classification on TPU
12,689,718
mr_age = train[train["Title"] == 1]["AgeGroup"].mode() miss_age = train[train["Title"] == 2]["AgeGroup"].mode() mrs_age = train[train["Title"] == 3]["AgeGroup"].mode() master_age = train[train["Title"] == 4]["AgeGroup"].mode() royal_age = train[train["Title"] == 5]["AgeGroup"].mode() rare_age = train[train["Title"] == 6]["AgeGroup"].mode() age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult", 6: "Adult"} for x in range(len(train["AgeGroup"])) : if train["AgeGroup"][x] == "Unknown": train["AgeGroup"][x] = age_title_mapping[train["Title"][x]] for x in range(len(test["AgeGroup"])) : if test["AgeGroup"][x] == "Unknown": test["AgeGroup"][x] = age_title_mapping[test["Title"][x]]<categorify>
AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
12,689,718
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7} train['AgeGroup'] = train['AgeGroup'].map(age_mapping) test['AgeGroup'] = test['AgeGroup'].map(age_mapping) train.head() train = train.drop(['Age'], axis = 1) test = test.drop(['Age'], axis = 1 )<drop_column>
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu )
Petals to the Metal - Flower Classification on TPU
12,689,718
train = train.drop(['Name'], axis = 1) test = test.drop(['Name'], axis = 1 )<categorify>
GCS_DS_PATH = KaggleDatasets().get_gcs_path() IMAGE_SIZE = [512, 512] EPOCHS = 50 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec' )
Petals to the Metal - Flower Classification on TPU
12,689,718
sex_mapping = {"male": 0, "female": 1} train['Sex'] = train['Sex'].map(sex_mapping) test['Sex'] = test['Sex'].map(sex_mapping) train.head() <categorify>
CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
12,689,718
embarked_mapping = {"S": 1, "C": 2, "Q": 3} train['Embarked'] = train['Embarked'].map(embarked_mapping) test['Embarked'] = test['Embarked'].map(embarked_mapping) train.head()<split>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,689,718
predictors = train.drop(['Survived', 'PassengerId'], axis=1) target = train["Survived"] x_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0 )<predict_on_test>
with strategy.scope() : enet = efn.EfficientNetB7( input_shape=(812, 812, 3), weights='imagenet', include_top=False ) model = tf.keras.Sequential([ enet, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) model.compile( optimizer=tf.keras.optimizers.Adam(lr=0.0001), loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) model.summary()
Petals to the Metal - Flower Classification on TPU
12,689,718
gaussian = GaussianNB() gaussian.fit(x_train, y_train) y_pred = gaussian.predict(x_val) acc_gaussian = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_gaussian )<compute_train_metric>
scheduler = tf.keras.callbacks.ReduceLROnPlateau(patience=3, verbose=1) history = model.fit( get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[scheduler], validation_data=get_validation_dataset() )
Petals to the Metal - Flower Classification on TPU
12,689,718
<compute_train_metric><EOS>
test_ds = get_test_dataset(ordered=True) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='')
Petals to the Metal - Flower Classification on TPU
12,486,043
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<compute_train_metric>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,486,043
linear_svc = LinearSVC() linear_svc.fit(x_train, y_train) y_pred = linear_svc.predict(x_val) acc_linear_svc = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_linear_svc )<compute_train_metric>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0
Petals to the Metal - Flower Classification on TPU
12,486,043
perceptron = Perceptron() perceptron.fit(x_train, y_train) y_pred = perceptron.predict(x_val) acc_perceptron = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_perceptron )<train_model>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,486,043
decisiontree = DecisionTreeClassifier() decisiontree.fit(x_train, y_train) y_pred = decisiontree.predict(x_val) acc_decisiontree = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_decisiontree )<train_model>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
12,486,043
randomforest = RandomForestClassifier() randomforest.fit(x_train, y_train) y_pred = randomforest.predict(x_val) acc_randomforest = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_randomforest )<compute_train_metric>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync SEED = 2020 NUM_TRAINING_IMAGES = 12753 NUM_TEST_IMAGES = 7382 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
Petals to the Metal - Flower Classification on TPU
12,486,043
knn = KNeighborsClassifier() knn.fit(x_train, y_train) y_pred = knn.predict(x_val) acc_knn = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_knn )<predict_on_test>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) return dataset def data_augment(image, label): image = tf.image.random_crop(image, size=[int(IMAGE_SIZE[0]*0.75), int(IMAGE_SIZE[0]*0.75), 3], seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/train/*.tfrec'), labeled=True) dataset = dataset.map(data_augment) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(-1) return dataset def get_validation_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/val/*.tfrec'), labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(-1) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/test/*.tfrec'), labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(-1) return dataset training_dataset = get_training_dataset() validation_dataset = get_validation_dataset()
Petals to the Metal - Flower Classification on TPU
12,486,043
sgd = SGDClassifier() sgd.fit(x_train, y_train) y_pred = sgd.predict(x_val) acc_sgd = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_sgd )<predict_on_test>
LR_START = 0.00001 LR_MAX = 0.0001 LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 8 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,486,043
gbk = GradientBoostingClassifier() gbk.fit(x_train, y_train) y_pred = gbk.predict(x_val) acc_gbk = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_gbk )<create_dataframe>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(InceptionV3) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_layer_names=True, )
Petals to the Metal - Flower Classification on TPU
12,486,043
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Linear SVC', 'Decision Tree', 'Stochastic Gradient Descent', 'Gradient Boosting Classifier'], 'Score': [acc_svc, acc_knn, acc_logreg, acc_randomforest, acc_gaussian, acc_perceptron,acc_linear_svc, acc_decisiontree, acc_sgd, acc_gbk]}) models.sort_values(by='Score', ascending=False )<save_to_csv>
history = model.fit(training_dataset, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=validation_dataset )
Petals to the Metal - Flower Classification on TPU
12,486,043
ids = test['PassengerId'] predictions = gbk.predict(test.drop('PassengerId', axis=1)) output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) output.to_csv('submission.csv', index=False )<load_from_csv>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
12,486,043
<rename_columns><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,393,699
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<data_type_conversions>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,393,699
df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True )<something_strange>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2
Petals to the Metal - Flower Classification on TPU
12,393,699
EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state<data_type_conversions>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,393,699
X_Train = df_train.copy() X_Train['State'].fillna(EMPTY_VAL, inplace=True) X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Train.loc[:, 'Date'] = X_Train.Date.dt.strftime("%m%d") X_Train["Date"] = X_Train["Date"].astype(int) X_Train.head()<feature_engineering>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
12,393,699
X_Test = df_test.copy() X_Test['State'].fillna(EMPTY_VAL, inplace=True) X_Test['State'] = X_Test.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Test.loc[:, 'Date'] = X_Test.Date.dt.strftime("%m%d") X_Test["Date"] = X_Test["Date"].astype(int) X_Test.head()<categorify>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync SEED = 2020 NUM_TRAINING_IMAGES = 12753 NUM_TEST_IMAGES = 7382 STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
Petals to the Metal - Flower Classification on TPU
12,393,699
le = preprocessing.LabelEncoder()<categorify>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) return dataset def data_augment(image, label): image = tf.image.random_crop(image, [150, 150, 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/train/*.tfrec'), labeled=True) dataset = dataset.map(data_augment) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(-1) return dataset def get_validation_dataset() : dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/val/*.tfrec'), labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(-1) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/test/*.tfrec'), labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(-1) return dataset training_dataset = get_training_dataset() validation_dataset = get_validation_dataset()
Petals to the Metal - Flower Classification on TPU
12,393,699
X_Train.Country = le.fit_transform(X_Train.Country) X_Train['State'] = le.fit_transform(X_Train['State']) X_Train.head()<categorify>
LR_START = 0.00001 LR_MAX = 0.0001 LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 9 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,393,699
X_Test.Country = le.fit_transform(X_Test.Country) X_Test['State'] = le.fit_transform(X_Test['State']) X_Test.head()<filter>
def get_model(use_model): base_model = use_model(weights='imagenet', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(InceptionV3) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_layer_names=True, )
Petals to the Metal - Flower Classification on TPU
12,393,699
df_train.loc[df_train.Country == 'Afghanistan', :]<set_options>
history = model.fit(training_dataset, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=validation_dataset )
Petals to the Metal - Flower Classification on TPU
12,393,699
filterwarnings('ignore' )<categorify>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
12,393,699
<import_modules><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
12,042,833
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<data_type_conversions>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
12,042,833
df_out.ForecastId = df_out.ForecastId.astype('int') df_out2.ForecastId = df_out2.ForecastId.astype('int' )<feature_engineering>
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201 from tensorflow.keras.applications.xception import Xception from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2 from tensorflow.keras.applications.nasnet import NASNetLarge from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0, EfficientNetB1
Petals to the Metal - Flower Classification on TPU
12,042,833
df_out['ConfirmedCases'] =(1/2)*(df_out['ConfirmedCases'] + df_out2['ConfirmedCases']) df_out['Fatalities'] =(1/2)*(df_out['Fatalities'] + df_out2['Fatalities'] )<data_type_conversions>
%matplotlib inline print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,042,833
df_out['ConfirmedCases'] = df_out['ConfirmedCases'].round().astype(int) df_out['Fatalities'] = df_out['Fatalities'].round().astype(int )<save_to_csv>
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
Petals to the Metal - Flower Classification on TPU
12,042,833
df_out.to_csv('submission.csv', index=False )<import_modules>
IMAGE_SIZE = [512, 512] EPOCHS = 30 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') SEED = 2020
Petals to the Metal - Flower Classification on TPU
12,042,833
import numpy as np import pandas as pd<load_from_csv>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): flag = random.randint(1,3) coef_1 = random.randint(70, 90)* 0.01 coef_2 = random.randint(70, 90)* 0.01 if flag == 1: image = tf.image.random_flip_left_right(image, seed=SEED) elif flag == 2: image = tf.image.random_flip_up_down(image, seed=SEED) else: image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset() : dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,042,833
train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv' )<define_variables>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.75 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,042,833
women = train_data[train_data['Sex'] == 'female']['Survived'] rate_women = sum(women)/len(women) print('% of women who survived:', rate_women )<define_variables>
def get_model(use_model): base_model = use_model(weights='noisy-student', include_top=False, pooling='avg', input_shape=(*IMAGE_SIZE, 3)) x = base_model.output predictions = Dense(104, activation='softmax' )(x) return Model(inputs=base_model.input, outputs=predictions) with strategy.scope() : model = get_model(EfficientNetB7) model.compile( optimizer='nadam', loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] )
Petals to the Metal - Flower Classification on TPU
12,042,833
men = train_data[train_data.Sex == 'male']['Survived'] rate_men = sum(men)/len(men) print('% of men who survived:', rate_men )<groupby>
history = model.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss', save_best_only=True)], validation_data=get_validation_dataset() , workers = 3 )
Petals to the Metal - Flower Classification on TPU
12,042,833
train_data[['Sex', 'Survived']].groupby(['Sex'] ).mean()<groupby>
model = tf.keras.models.load_model('my_ef_net_b7.h5' )
Petals to the Metal - Flower Classification on TPU
12,042,833
<groupby><EOS>
test_ds = get_test_dataset(ordered=True) print('Вычисляем предсказания...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) print('Создание файла submission.csv...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' )
Petals to the Metal - Flower Classification on TPU
11,896,796
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<groupby>
print(tf.__version__) warnings.filterwarnings('ignore' )
Petals to the Metal - Flower Classification on TPU
11,896,796
train_data[['Embarked', 'Survived']].groupby(['Embarked'] ).mean()<groupby>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Device:', tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print("No tpu present") print('Number of replicas:', strategy.num_replicas_in_sync) AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
11,896,796
train_data[['Embarked', 'Pclass']].groupby(['Embarked'] ).mean()<define_variables>
IMAGE_SIZE = [224, 224] EPOCHS = 10 FOLDS = 5 SEED = 777 BATCH_SIZE = 16 * strategy.num_replicas_in_sync
Petals to the Metal - Flower Classification on TPU