kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
12,096,421
embeddings_glove = np.load('.. /input/pickled-glove840b300d-for-10sec-loading/glove.840B.300d.pkl', allow_pickle=True )<categorify>
def read_unlabeled_tfrecord(record): record = tf.io.parse_single_example(record , UNLABELED_TFREC_FORMAT) image = decode_image(record['image']) id_num = record['id'] return image , id_num
Petals to the Metal - Flower Classification on TPU
12,096,421
embedding_matrix = np.zeros(( vocab_size + 1, embedding_dim)) for word, i in word_index.items() : embedding_vector = embeddings_glove.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector<normalization>
def load_dataset(filenames , labeled=True , ordered = False): if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
class F1Score(tf.keras.metrics.Metric): def __init__(self, name='f1_score', **kwargs): super(F1Score, self ).__init__(name=name, **kwargs) self.p = tf.keras.metrics.Precision() self.r = tf.keras.metrics.Recall() def update_state(self, *args, **kwargs): self.p.update_state(*args, **kwargs) self.r.update_state(*args, **kwargs) def reset_states(self): self.p.reset_states() self.r.reset_states() def result(self): p_res, r_res = self.p.result() , self.r.result() return(2 * p_res * r_res)/(p_res + r_res )<choose_model_class>
def load_augmented_dataset(filenames , labeled=True , ordered = False): if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord) dataset = dataset.map(augment_data) return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
def create_model() : tf.keras.backend.clear_session() model = tf.keras.Sequential([ tf.keras.layers.Embedding( vocab_size + 1, embedding_dim, weights=[embedding_matrix], input_length=max_len, trainable=False ), tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(300) ), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(30, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile( loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam() , metrics=[F1Score() ] ) return model<feature_engineering>
def get_training_dataset() : dataset = load_dataset(training_data , labeled = True , ordered = False) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
final_predictions = np.average(predictions, axis=0) test_pd['target'] = final_predictions test_pd['target'] = test_pd['target'].apply(lambda x: 1 if x >= 0.5 else 0 )<save_to_csv>
def get_augmented_dataset() : dataset = load_augmented_dataset(training_data , labeled = True , ordered = False) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
test_pd.to_csv('/kaggle/working/submission.csv', columns=['id', 'target'], index=False )<import_modules>
def get_validation_dataset() : dataset = load_dataset(validation_data , labeled = True , ordered = False) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
<install_modules>
def get_test_dataset(ordered = False): dataset = load_dataset(testing_data , labeled = False , ordered = ordered) dataset = dataset.batch(BATCH_SIZE) return dataset
Petals to the Metal - Flower Classification on TPU
12,096,421
<import_modules>
training_dataset = get_training_dataset() validation_dataset = get_validation_dataset()
Petals to the Metal - Flower Classification on TPU
12,096,421
<import_modules>
!pip install efficientnet
Petals to the Metal - Flower Classification on TPU
12,096,421
from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import make_scorer, accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler<load_from_csv>
import efficientnet.tfkeras as efn
Petals to the Metal - Flower Classification on TPU
12,096,421
df = pd.read_csv('.. /input/train.csv', header = 0, dtype={'Age': np.float64}) test = pd.read_csv('.. /input/test.csv' , header = 0, dtype={'Age': np.float64} )<groupby>
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss' , patience = 4) learning_rate_start = 0.00001 learning_rate_max = 0.00005 * strategy.num_replicas_in_sync learning_rate_min = 0.0001 learning_rate_boost_epochs = 3 learning_rate_sustain_epochs = 0 learning_rate_decay = 0.9 def learning_rate_schedule(epoch): if epoch < learning_rate_boost_epochs: lr =(learning_rate_max - learning_rate_start)/ learning_rate_boost_epochs * epoch + learning_rate_start elif epoch < learning_rate_boost_epochs + learning_rate_sustain_epochs: lr = learning_rate_max else: lr =(learning_rate_max - learning_rate_min)* learning_rate_decay **(epoch - learning_rate_boost_epochs - learning_rate_sustain_epochs)+ learning_rate_min return lr learning_rate_callback = tf.keras.callbacks.LearningRateScheduler(learning_rate_schedule , verbose = True)
Petals to the Metal - Flower Classification on TPU
12,096,421
df.groupby(by=['Pclass'])['Survived'].agg(['mean','count'] )<set_options>
def create_model() : with strategy.scope() : input_layer = tf.keras.layers.Input(shape =(*IMAGE_SIZE,3)) pretrained_model = efn.EfficientNetB7(include_top = False , weights = 'noisy-student' , input_shape =(*IMAGE_SIZE,3), input_tensor = input_layer , pooling='avg') for layer in pretrained_model.layers: layer.trainable = True X = tf.keras.layers.Dropout(0.2 )(pretrained_model.layers[-1].output) X = tf.keras.layers.Dense(NUM_CLASSES , activation = 'softmax' , dtype= 'float32' )(X) model = tf.keras.Model(inputs = input_layer , outputs = X) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False) loss = tf.keras.losses.SparseCategoricalCrossentropy() model.compile(loss = loss , optimizer = optimizer , metrics=['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
12,096,421
Image(url= "https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/2bc37b51-c9e4-402e-938e-70d3145815f2/d787jna-1b3767d2-f297-4b73-a874-7cfa6d1e8a69.png/v1/fill/w_1600,h_460,q_80,strp/r_m_s__titanic_class_system_by_monroegerman_d787jna-fullview.jpg?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9NDYwIiwicGF0aCI6IlwvZlwvMmJjMzdiNTEtYzllNC00MDJlLTkzOGUtNzBkMzE0NTgxNWYyXC9kNzg3am5hLTFiMzc2N2QyLWYyOTctNGI3My1hODc0LTdjZmE2ZDFlOGE2OS5wbmciLCJ3aWR0aCI6Ijw9MTYwMCJ9XV0sImF1ZCI6WyJ1cm46c2VydmljZTppbWFnZS5vcGVyYXRpb25zIl19.6krQcPQvsfcQ_ZJ_CGvufi9MT-PJkkg1I8-grLy7Hiw" )<groupby>
model.fit(training_dataset , epochs = EPOCHS , validation_data = validation_dataset , steps_per_epoch = STEPS_PER_EPOCH , callbacks = [learning_rate_callback])
Petals to the Metal - Flower Classification on TPU
12,096,421
sex_survived= df.groupby(by=['Sex','Survived'])['Survived'].agg(['count'] ).reset_index() sex_survived<set_options>
augmented_dataset = get_augmented_dataset()
Petals to the Metal - Flower Classification on TPU
12,096,421
def configure_plotly_browser_state() : display(IPython.core.display.HTML() )<create_dataframe>
model.fit(augmented_dataset , epochs = EPOCHS , validation_data = validation_dataset , steps_per_epoch = STEPS_PER_EPOCH , callbacks = [learning_rate_callback] )
Petals to the Metal - Flower Classification on TPU
12,096,421
male_survived=pd.DataFrame(df['Age'][(df['Sex']=='male')&(df['Survived']==1)].value_counts().sort_index(ascending=False)).reset_index().rename(columns={'index':'Age','Age':'Number'}) female_survived=pd.DataFrame(df['Age'][(df['Sex']=='female')&(df['Survived']==1)].value_counts().sort_index(ascending=False)).reset_index().rename(columns={'index':'Age','Age':'Number'}) male_not_survived=pd.DataFrame(df['Age'][(df['Sex']=='male')&(df['Survived']==0)].value_counts().sort_index(ascending=False)).reset_index().rename(columns={'index':'Age','Age':'Number'}) female_not_survived=pd.DataFrame(df['Age'][(df['Sex']=='female')&(df['Survived']==0)].value_counts().sort_index(ascending=False)).reset_index().rename(columns={'index':'Age','Age':'Number'}) <define_variables>
test_dataset = get_test_dataset(ordered=True )
Petals to the Metal - Flower Classification on TPU
12,096,421
df_all = [df,test]<count_missing_values>
print("Predicting") test_images_ds = test_dataset.map(lambda image , idnum : image) prob = model.predict(test_images_ds) pred = np.argmax(prob , axis = -1) print(pred )
Petals to the Metal - Flower Classification on TPU
12,096,421
<feature_engineering><EOS>
print("Generating Csv") test_ids_ds = test_dataset.map(lambda image , idnum : idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv' , np.rec.fromarrays([test_ids , pred]), fmt=['%s' , '%d'] , delimiter=',',header='id,label' , comments='' )
Petals to the Metal - Flower Classification on TPU
12,028,553
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<count_values>
!pip install efficientnet print("Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
12,028,553
test.isAlone.value_counts()<drop_column>
AUTO = tf.data.experimental.AUTOTUNE tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu )
Petals to the Metal - Flower Classification on TPU
12,028,553
for data in df_all: data.drop(columns=['PassengerId','Name','Cabin','Ticket','SibSp','Parch'],inplace=True,axis=1 )<count_missing_values>
SEED = 42 def seed_everything(seed): os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) tf.random.set_seed(seed) seed_everything(SEED )
Petals to the Metal - Flower Classification on TPU
12,028,553
for data in df_all: print(f" -------- {data.index } ------- ") print(data.isnull().sum() )<categorify>
IMAGE_SIZE = [[512, 512]] EPOCHS = 30 BATCH_SIZE = 32 * strategy.num_replicas_in_sync AUG_BATCH = BATCH_SIZE LEARNING_RATE = 1e-3 TTA_NUM = 3 RESUME_TRAINING = True print("Batch size used: ", BATCH_SIZE )
Petals to the Metal - Flower Classification on TPU
12,028,553
test = pd.get_dummies(test,columns=['Sex','Embarked']) df = pd.get_dummies(df,columns=['Sex','Embarked'] )<prepare_x_and_y>
GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started') MORE_IMAGES_GCS_DS_PATH = KaggleDatasets().get_gcs_path('tf-flower-photo-tfrec') GCS_PATH_SELECT = { 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } MOREIMAGES_PATH_SELECT = { 512: '/tfrecords-jpeg-512x512' }
Petals to the Metal - Flower Classification on TPU
12,028,553
y=df['Survived'] X=df.drop(columns=['Survived'],axis=1 )<split>
def decode_image(image_data, img_size): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*img_size, 3]) return image def read_labeled_tfrecord(example, img_size): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image'], img_size) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example, img_size): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image'], img_size) idnum = example['id'] return image, idnum def data_augment(image, label, seed=SEED): image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_brightness(image, 0.1, seed=seed) image = tf.image.random_hue(image, 0.01) image = tf.image.random_saturation(image, 0.7, 1.3) image = tf.image.random_contrast(image, 0.8, 1.2) image = tf.image.random_brightness(image, 0.1) image = tf.image.random_saturation(image, 0, 2) return image, label def get_training_dataset(IMG_SIZE=None, do_aug=True, cutmixup=True, mataug=False): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, IMG_SIZE= IMG_SIZE) if do_aug: dataset = dataset.map(data_augment, num_parallel_calls=AUTO) if mataug: dataset = dataset.map(transform2, num_parallel_calls=AUTO) dataset = dataset.repeat() if cutmixup: dataset = dataset.batch(AUG_BATCH) dataset = dataset.map(lambda image, label: transform(image, label, DIM=IMG_SIZE[0]), num_parallel_calls=AUTO) dataset = dataset.unbatch() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False, IMG_SIZE=None): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered, IMG_SIZE=IMG_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_train_valid_datasets(IMG_SIZE=None): dataset = load_dataset(TRAINING_FILENAMES + VALIDATION_FILENAMES, labeled=True, IMG_SIZE=IMG_SIZE) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False, IMG_SIZE=None): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered, IMG_SIZE=IMG_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) def load_dataset(filenames, labeled=True, ordered=False, IMG_SIZE=None): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(lambda example: read_labeled_tfrecord(example, IMG_SIZE)if labeled else read_unlabeled_tfrecord(example, IMG_SIZE), num_parallel_calls=AUTO) return dataset def plot_training(H): with plt.xkcd() : plt.figure() plt.plot(H.history["loss"], label="train_loss") plt.title("Training Loss") plt.xlabel("Epoch plt.ylabel("Loss") plt.legend(loc="lower left") plt.show()
Petals to the Metal - Flower Classification on TPU
12,028,553
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3 )<train_model>
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0][0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') MOREIMAGES_PATH = MOREIMAGES_PATH_SELECT[IMAGE_SIZE[0][0]] IMAGENET_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/imagenet' + MOREIMAGES_PATH + '/*.tfrec') INATURELIST_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/inaturalist' + MOREIMAGES_PATH + '/*.tfrec') OPENIMAGE_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/openimage' + MOREIMAGES_PATH + '/*.tfrec') SKIP_VALIDATION = True if SKIP_VALIDATION: TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES + IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH =(NUM_TRAINING_IMAGES + NUM_VALIDATION_IMAGES)// BATCH_SIZE print('Dataset: {} training images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES+NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
12,028,553
DT= DecisionTreeClassifier() DT.fit(X_train, y_train) DT.score(X_test,y_test )<import_modules>
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def transform2(image, label, DIM = IMAGE_SIZE[0][0]): XDIM = DIM % 2 rot = 15.* tf.random.normal([1],dtype='float32') shr = 5.* tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image,tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]),label
Petals to the Metal - Flower Classification on TPU
12,028,553
IFrame("https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html<define_search_space>
def cutmix(image, label, PROBABILITY = 1.0, DIM = None): CLASSES = 104 imgs = []; labs = [] for j in range(AUG_BATCH): P = tf.cast(tf.random.uniform([],0,1)<=PROBABILITY, tf.int32) k = tf.cast(tf.random.uniform([],0,AUG_BATCH),tf.int32) x = tf.cast(tf.random.uniform([],0,DIM),tf.int32) y = tf.cast(tf.random.uniform([],0,DIM),tf.int32) b = tf.random.uniform([],0,1) WIDTH = tf.cast(DIM * tf.math.sqrt(1-b),tf.int32)* P ya = tf.math.maximum(0,y-WIDTH//2) yb = tf.math.minimum(DIM,y+WIDTH//2) xa = tf.math.maximum(0,x-WIDTH//2) xb = tf.math.minimum(DIM,x+WIDTH//2) one = image[j,ya:yb,0:xa,:] two = image[k,ya:yb,xa:xb,:] three = image[j,ya:yb,xb:DIM,:] middle = tf.concat([one,two,three],axis=1) img = tf.concat([image[j,0:ya,:,:],middle,image[j,yb:DIM,:,:]],axis=0) imgs.append(img) a = tf.cast(WIDTH*WIDTH/DIM/DIM,tf.float32) if len(label.shape)==1: lab1 = tf.one_hot(label[j],CLASSES) lab2 = tf.one_hot(label[k],CLASSES) else: lab1 = label[j,] lab2 = label[k,] labs.append(( 1-a)*lab1 + a*lab2) image2 = tf.reshape(tf.stack(imgs),(AUG_BATCH,DIM,DIM,3)) label2 = tf.reshape(tf.stack(labs),(AUG_BATCH,CLASSES)) return image2,label2
Petals to the Metal - Flower Classification on TPU
12,028,553
parameters1 = [{'max_depth':np.linspace(1, 15, 15),'min_samples_split': np.linspace(0.1, 1.0, 5, endpoint=True)}]<train_on_grid>
def mixup(image, label, PROBABILITY = 1.0, DIM=None): CLASSES = 104 imgs = []; labs = [] for j in range(AUG_BATCH): P = tf.cast(tf.random.uniform([],0,1)<=PROBABILITY, tf.float32) k = tf.cast(tf.random.uniform([],0,AUG_BATCH),tf.int32) a = tf.random.uniform([],0,1)*P img1 = image[j,] img2 = image[k,] imgs.append(( 1-a)*img1 + a*img2) if len(label.shape)==1: lab1 = tf.one_hot(label[j],CLASSES) lab2 = tf.one_hot(label[k],CLASSES) else: lab1 = label[j,] lab2 = label[k,] labs.append(( 1-a)*lab1 + a*lab2) image2 = tf.reshape(tf.stack(imgs),(AUG_BATCH,DIM,DIM,3)) label2 = tf.reshape(tf.stack(labs),(AUG_BATCH,CLASSES)) return image2,label2
Petals to the Metal - Flower Classification on TPU
12,028,553
Grid1 = GridSearchCV(DT, parameters1, cv=4,return_train_score=True,iid=True) Grid1.fit(X_train,y_train )<find_best_params>
def transform(image,label, DIM=None): CLASSES = 104 SWITCH = 0.5 CUTMIX_PROB = 0.666 MIXUP_PROB = 0.666 image2, label2 = cutmix(image, label, CUTMIX_PROB, DIM) image3, label3 = mixup(image, label, MIXUP_PROB, DIM) imgs = []; labs = [] for j in range(AUG_BATCH): P = tf.cast(tf.random.uniform([],0,1)<=SWITCH, tf.float32) imgs.append(P*image2[j,]+(1-P)*image3[j,]) labs.append(P*label2[j,]+(1-P)*label3[j,]) image4 = tf.reshape(tf.stack(imgs),(AUG_BATCH,DIM,DIM,3)) label4 = tf.reshape(tf.stack(labs),(AUG_BATCH,CLASSES)) return image4,label4
Petals to the Metal - Flower Classification on TPU
12,028,553
scores = Grid1.cv_results_<compute_test_metric>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.7 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True) rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
12,028,553
for param, mean_train in zip(scores['params'],scores['mean_train_score']): print(f"{param} accuracy on training data is {mean_train}" )<find_best_params>
def get_efficientnet() : with strategy.scope() : efficient = efn.EfficientNetB5( input_shape =(IMAGE_SIZE[0][0], IMAGE_SIZE[0][1], 3), weights = 'noisy-student', include_top = False ) efficient.trainable = True model = tf.keras.Sequential([ efficient, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(104, activation = 'softmax',dtype = 'float32') ]) model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
12,028,553
Grid1.best_estimator_<choose_model_class>
%%time if RESUME_TRAINING: with strategy.scope() : model1 = load_model('.. /input/train-efficientnet/Effnet_save.h5') else: model1 = get_efficientnet() Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"Effnet_30ep.h5", verbose=1, mode='max') train_history0 = model1.fit( get_training_dataset(IMAGE_SIZE[0], mataug=True, cutmixup=True, do_aug=True), steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, initial_epoch=20, callbacks=[lr_callback, Checkpoint], )
Petals to the Metal - Flower Classification on TPU
12,028,553
XGB = XGBClassifier()<define_search_space>
def predict(model, img_size, n_iter): probs = [] data = get_test_dataset(ordered=True, IMG_SIZE=img_size) for i in range(n_iter): test_images_ds = data.map(lambda image, idnum: image) probs.append(model.predict(test_images_ds,verbose=0)) return probs
Petals to the Metal - Flower Classification on TPU
12,028,553
parameters3 =[{"learning_rate": [0.05, 0.10, 0.15, 0.20] ,"max_depth": [ 3, 4, 5, 6], "min_child_weight": [3,5,7],"gamma": [ 0.0, 0.1, 0.2 ,0.3],"colsample_bytree" : [ 0.4, 0.5]}]<train_on_grid>
print('Calculating predictions...') probs1 = np.mean(predict(model1, IMAGE_SIZE[0], TTA_NUM), axis=0) predictions = np.argmax(probs1, axis=-1) print('Generating submission file...') test_ds = get_test_dataset(ordered=True, IMG_SIZE=IMAGE_SIZE[0]) test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='')
Petals to the Metal - Flower Classification on TPU
10,949,329
Grid1 = GridSearchCV(XGB, parameters3, cv=2,return_train_score=True) Grid1.fit(X_train,y_train )<find_best_params>
def seed_everything(seed=0): np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) os.environ['TF_DETERMINISTIC_OPS'] = '1' seed = 0 seed_everything(seed) warnings.filterwarnings("ignore" )
Petals to the Metal - Flower Classification on TPU
10,949,329
scores = Grid1.cv_results_<find_best_params>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
10,949,329
Grid1.best_estimator_<choose_model_class>
BATCH_SIZE = 32 * strategy.num_replicas_in_sync WARMUP_EPOCHS = 3 WARMUP_LEARNING_RATE = 1e-4 * strategy.num_replicas_in_sync EPOCHS = 20 LEARNING_RATE = 3e-5 * strategy.num_replicas_in_sync HEIGHT = 512 WIDTH = 512 CHANNELS = 3 N_CLASSES = 104 ES_PATIENCE = 6 RLROP_PATIENCE = 3 DECAY_DROP = 0.3 model_path = 'DenseNet201_%sx%s.h5' %(HEIGHT, WIDTH) GCS_PATH = KaggleDatasets().get_gcs_path() + '/tfrecords-jpeg-%sx%s' %(HEIGHT, WIDTH) TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') CLASSES = [ 'pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
10,949,329
XGB = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bytree=0.5, gamma=0.0, learning_rate=0.1, max_delta_step=0, max_depth=3, min_child_weight=5, missing=None, n_estimators=100, n_jobs=1, nthread=None, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=True, subsample=1 )<train_model>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def dataset_to_numpy_util(dataset, N): dataset = dataset.unbatch().batch(N) for images, labels in dataset: numpy_images = images.numpy() numpy_labels = labels.numpy() break; return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): label = np.argmax(label, axis=-1) correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], str(correct), ', shoud be ' if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower_eval(image, title, subplot, red=False): plt.subplot(subplot) plt.axis('off') plt.imshow(image) plt.title(title, fontsize=14, color='red' if red else 'black') return subplot+1 def display_9_images_with_predictions(images, predictions, labels): subplot=331 plt.figure(figsize=(13,13)) for i, image in enumerate(images): title, correct = title_from_label_and_target(predictions[i], labels[i]) subplot = display_one_flower_eval(image, title, subplot, not correct) if i >= 8: break; plt.tight_layout() plt.subplots_adjust(wspace=0.1, hspace=0.1) plt.show()
Petals to the Metal - Flower Classification on TPU
10,949,329
XGB.fit(X_train, y_train )<compute_test_metric>
AUTO = tf.data.experimental.AUTOTUNE def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [HEIGHT, WIDTH, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_flip_up_down(image, seed=seed) image = tf.image.random_saturation(image, lower=0, upper=2, seed=seed) image = tf.image.random_crop(image, size=[int(HEIGHT*.8), int(WIDTH*.8), CHANNELS], seed=seed) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_training_dataset_preview(ordered=True): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n)
Petals to the Metal - Flower Classification on TPU
10,949,329
XGB.score(X_test,y_test )<load_from_csv>
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) train_dataset = get_training_dataset_preview(ordered=True) y_train = next(iter(train_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy() print('Number of training images %d' % NUM_TRAINING_IMAGES )
Petals to the Metal - Flower Classification on TPU
10,949,329
<load_from_csv>
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) valid_dataset = get_validation_dataset(ordered=True) y_valid = next(iter(valid_dataset.unbatch().map(lambda image, label: label ).batch(NUM_VALIDATION_IMAGES)) ).numpy() print('Number of validation images %d' % NUM_VALIDATION_IMAGES)
Petals to the Metal - Flower Classification on TPU
10,949,329
df = pd.read_csv('.. /input/train.csv' , header = 0,dtype={'Age': np.float64}) test = pd.read_csv('.. /input/test.csv' , header = 0,dtype={'Age': np.float64} )<feature_engineering>
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Number of test images %d' % NUM_TEST_IMAGES) test_dataset = get_test_dataset(ordered=True )
Petals to the Metal - Flower Classification on TPU
10,949,329
for data in [df,test]: data['isAlone']=1 data['Family_No'] = data['Parch'] + data['SibSp'] + 1 data['isAlone'].loc[data['Family_No']>1]=0 data['Age'].fillna(data['Age'].mean() , inplace=True) data['Embarked'].fillna(data['Embarked'].mode().iloc[0], inplace=True) data['Fare'] = df.groupby('Pclass')['Fare'].apply(lambda x: x.fillna(x.mean()))<feature_engineering>
display_batch_of_images(next(iter(train_dataset.unbatch().batch(15))))
Petals to the Metal - Flower Classification on TPU
10,949,329
<feature_engineering>
display_batch_of_images(next(iter(valid_dataset.unbatch().batch(15))))
Petals to the Metal - Flower Classification on TPU
10,949,329
<categorify>
display_batch_of_images(next(iter(test_dataset.unbatch().batch(15))))
Petals to the Metal - Flower Classification on TPU
10,949,329
LE = LabelEncoder() for data in [test,df]: bins = [-1,0,5,10, 15, 25, 50,100] labels = ['Unknown','Baby','Child','Young','Teen','Adult','Old'] data['Age'] = pd.cut(data['Age'], bins=bins,labels=labels) data['Age'] = data['Age'].astype(str) test['Age'] = LE.fit_transform(test['Age']) df['Age'] = LE.fit_transform(df['Age']) <string_transform>
def create_model(input_shape, N_CLASSES): base_model = applications.DenseNet201(weights='imagenet', include_top=False, input_shape=input_shape) base_model.trainable = False model = tf.keras.Sequential([ base_model, layers.GlobalAveragePooling2D() , layers.Dense(N_CLASSES, activation='softmax') ]) return model
Petals to the Metal - Flower Classification on TPU
10,949,329
for data in [test,df]: for i,k in enumerate(data['Name']): x=k.split(",")[1] data['Name'].replace(data['Name'][i],x.split(" ")[1],inplace=True) <count_values>
with strategy.scope() : model = create_model(( None, None, CHANNELS), N_CLASSES) metric_list = ['sparse_categorical_accuracy'] optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=metric_list) model.summary()
Petals to the Metal - Flower Classification on TPU
10,949,329
df['Name'].value_counts()<feature_engineering>
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE warmup_history = model.fit(x=get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, validation_data=get_validation_dataset() , epochs=WARMUP_EPOCHS, verbose=2 ).history
Petals to the Metal - Flower Classification on TPU
10,949,329
all_data = [df,test] Known = ['Mr.','Miss.','Mrs.','Master.','Ms.','Mlle.','Mme.'] for k in(all_data): for i,data in enumerate(k['Name']): if(data)in Known: if(data=='Mlle.'): k['Name'] = k['Name'].replace('Mlle.','Miss.') elif(data=='Ms.'): k['Name'] = k['Name'].replace('Ms.','Miss.') elif(data=='Mme.'): k['Name'] = k['Name'].replace('Mme.','Mrs.') else: continue else: k['Name'] = k['Name'].replace(data,'not_known') <count_values>
LR_START = 0.00000001 LR_MIN = 0.000001 LR_MAX = LEARNING_RATE LR_RAMPUP_EPOCHS = 3 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] sns.set(style="whitegrid") fig, ax = plt.subplots(figsize=(20, 6)) plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
10,949,329
df['Name'][df['Survived']==1].value_counts() /df['Name'].value_counts()<categorify>
for layer in model.layers: layer.trainable = True checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True) es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) lr_callback = LearningRateScheduler(lrfn, verbose=1) callback_list = [checkpoint, es, lr_callback] optimizer = optimizers.Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=metric_list) model.summary()
Petals to the Metal - Flower Classification on TPU
10,949,329
test=pd.get_dummies(test,columns=['Embarked','Name']) df=pd.get_dummies(df,columns=['Embarked','Name']) test['Sex'] = LE.fit_transform(test['Sex']) df['Sex'] = LE.fit_transform(df['Sex'] )<drop_column>
history = model.fit(x=get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, validation_data=get_validation_dataset() , callbacks=callback_list, epochs=EPOCHS, verbose=2 ).history
Petals to the Metal - Flower Classification on TPU
10,949,329
for data in [df,test]: data.drop(columns=['Ticket','Cabin','SibSp','Parch','PassengerId'], inplace=True, axis=1 )<drop_column>
x_train = train_dataset.map(lambda image, label: image) train_preds = model.predict(x_train) train_preds = np.argmax(train_preds, axis=-1) print(classification_report(y_train, train_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
10,949,329
df.drop(columns=['Embarked_Q'],axis=1,inplace=True) test.drop(columns=['Embarked_Q'],axis=1,inplace=True )<normalization>
x_valid = valid_dataset.map(lambda image, label: image) valid_preds = model.predict(x_valid) valid_preds = np.argmax(valid_preds, axis=-1) print(classification_report(y_valid, valid_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
10,949,329
for data in [df,test]: scale = StandardScaler().fit(data[['Fare']]) data[['Fare']] = scale.transform(data[['Fare']]) <prepare_x_and_y>
x_valid_samp, y_valid_samp = dataset_to_numpy_util(valid_dataset, 9) valid_samp_preds = model.predict(x_valid_samp, batch_size=9) display_9_images_with_predictions(x_valid_samp, valid_samp_preds, y_valid_samp )
Petals to the Metal - Flower Classification on TPU
10,949,329
y=df['Survived'] X=df.drop(columns=['Survived'],axis=1) <split>
x_test = test_dataset.map(lambda image, idnum: image) test_preds = model.predict(x_test) test_preds = np.argmax(test_preds, axis=-1)
Petals to the Metal - Flower Classification on TPU
10,949,329
<import_modules><EOS>
test_ids_ds = test_dataset.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') submission = pd.DataFrame(test_ids, columns=['id']) submission['label'] = test_preds submission.to_csv('submission.csv', index=False) display(submission.head(10))
Petals to the Metal - Flower Classification on TPU
11,634,419
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<define_search_space>
print("TF version ", tf.__version__) AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
11,634,419
parameters_DC = [{'max_depth':[50,100],'min_samples_split': [0.1,0.2,0.5,0.8,0.9]}] paramaters_RF = [{'max_depth':[2,5,10,15,20,50],'min_samples_split': [0.1,0.2,0.5,0.8],'n_estimators':[100]}] parameters_XGB =[{"learning_rate": [0.2,0.5,0.8,0.9] ,"max_depth": [1, 3,5, 10], "min_child_weight": [3,5,7,10,20],"gamma": [0.1, 0.2 ,0.4,0.7],'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05],'n_estimator':[100,1000,2000,4000]}] parameters_GBC =[{"learning_rate": [0.5, 0.25, 0.1, 0.05, 0.01] ,"max_depth": [ 3, 4, 5, 6], "min_samples_leaf" :[50,100,150],"n_estimators" : [16, 32, 64, 128]}] parameters_ADA =[{'algorithm':['SAMME'],"base_estimator__criterion" : ["gini"],"base_estimator__splitter" : ["best", "random"],"n_estimators": [500,1000],"learning_rate": [ 0.01, 0.1, 1.0]}] <train_on_grid>
def seed_everything(seed=0): np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) os.environ['TF_DETERMINISTIC_OPS'] = '1' seed = 0 seed_everything(seed) warnings.filterwarnings("ignore" )
Petals to the Metal - Flower Classification on TPU
11,634,419
DC = DecisionTreeClassifier() Grid_DC = GridSearchCV(DC, parameters_DC, cv=4,scoring="accuracy", n_jobs= 4,return_train_score=True, verbose = 1) Grid_DC.fit(X_train,y_train) DC_best = Grid_DC.best_estimator_ Grid_DC.best_score_<train_on_grid>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print("Running on TPU ", tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS : ", strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
11,634,419
RF = RandomForestClassifier() Grid_RF = GridSearchCV(RF, paramaters_RF, cv=4,scoring="accuracy", n_jobs= 4,return_train_score=True, verbose = 1) Grid_RF.fit(X_train,y_train) RF_best = Grid_RF.best_estimator_ Grid_RF.best_score_ <train_on_grid>
BATCH_SIZE = 16 * strategy.num_replicas_in_sync WARMUP_EPOCHS = 3 WARMUP_LEARNING_RATE = 1e-4 * strategy.num_replicas_in_sync EPOCHS = 20 LEARNING_RATE = 3e-5 * strategy.num_replicas_in_sync HEIGHT = 512 WIDTH = 512 CHANNELS = 3 N_CLASSES = 104 ES_PATIENCE = 6 RLROP_PATIENCE = 3 DECAY_DROP = 0.3
Petals to the Metal - Flower Classification on TPU
11,634,419
XGB = XGBClassifier() Grid_XGB = GridSearchCV(XGB, parameters_XGB, cv=4,scoring="accuracy", n_jobs= 4,return_train_score=True, verbose = 1) Grid_XGB.fit(X_train,y_train) XGB_best = Grid_XGB.best_estimator_ Grid_XGB.best_score_<train_on_grid>
model_path = 'DenseNet201_%sx%s.h5' %(HEIGHT, WIDTH) GCS_PATH = KaggleDatasets().get_gcs_path() + '/tfrecords-jpeg-%sx%s' %(HEIGHT, WIDTH) TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec' )
Petals to the Metal - Flower Classification on TPU
11,634,419
GBC = GradientBoostingClassifier() Grid_GBC = GridSearchCV(GBC,parameters_GBC, cv=4, scoring="accuracy", n_jobs= 4, return_train_score=True,verbose = 1) Grid_GBC.fit(X_train,y_train) GBC_best = Grid_GBC.best_estimator_ Grid_GBC.best_score_<train_on_grid>
CLASSES = [ 'pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose' ]
Petals to the Metal - Flower Classification on TPU
11,634,419
ADA = AdaBoostClassifier(DC_best) Grid_ADA = GridSearchCV(ADA,parameters_ADA, cv=4, scoring="accuracy", n_jobs= 4, return_train_score=True,verbose = 1) Grid_ADA.fit(X_train,y_train) ADA_best = Grid_ADA.best_estimator_ Grid_ADA.best_score_<define_search_space>
AUTO = tf.data.experimental.AUTOTUNE def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels = 3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [HEIGHT, WIDTH, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { 'image' : tf.io.FixedLenFeature([], tf.string), 'class' : tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TREC_FORMAT = { 'image' : tf.io.FixedLenFeature([], tf.string), 'id' : tf.io.FixedLenFeature([], tf.string) } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum
Petals to the Metal - Flower Classification on TPU
11,634,419
parameters_SVM = {'C': [0.1, 1, 10,50,100], 'gamma' : [0.001, 0.01, 0.1, 1,10]} <train_on_grid>
def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
11,634,419
SVMC =SVC(probability=True) Grid_SVC = GridSearchCV(SVMC, parameters_SVM, scoring="accuracy", return_train_score=True,verbose = 1,cv=2) Grid_SVC.fit(X_train, y_train) SVM_best = Grid_SVC.best_estimator_ Grid_SVC.best_score_<train_model>
def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled = True) dataset = dataset.map(data_augment, num_parallel_calls = AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
11,634,419
voting = VotingClassifier(estimators=[('ADA', ADA_best),('DC', DC_best),('RF', RF_best),('GBC',GBC_best),('XGB',XGB_best),('SVC',SVM_best)],weights=[3,0,0,1,3,3], voting='hard', n_jobs=4) voting_result = voting.fit(X_train, y_train )<compute_test_metric>
def get_training_dataset_preview(ordered=True): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered = ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
11,634,419
voting.score(X_test,y_test )<predict_on_test>
def get_validation_dataset(ordered=True): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
11,634,419
pred = voting.predict(test )<load_from_csv>
def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset
Petals to the Metal - Flower Classification on TPU
11,634,419
test_2 = pd.read_csv('.. /input/test.csv' , header = 0, dtype={'Age': np.float64} )<create_dataframe>
def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
11,634,419
result = pd.DataFrame(pred,columns=['Survived']) submission13 = result.join(test_2['PassengerId'] ).iloc[:,::-1]<save_to_csv>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def dataset_to_numpy_util(dataset, N): dataset = dataset.unbatch().batch(N) for images, labels in dataset: numpy_images = images.numpy() numpy_labels = labels.numpy() break; return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): label = np.argmax(label, axis=-1) correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], str(correct), ', shoud be ' if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower_eval(image, title, subplot, red=False): plt.subplot(subplot) plt.axis('off') plt.imshow(image) plt.title(title, fontsize=14, color='red' if red else 'black') return subplot+1 def display_9_images_with_predictions(images, predictions, labels): subplot=331 plt.figure(figsize=(13,13)) for i, image in enumerate(images): title, correct = title_from_label_and_target(predictions[i], labels[i]) subplot = display_one_flower_eval(image, title, subplot, not correct) if i >= 8: break; plt.tight_layout() plt.subplots_adjust(wspace=0.1, hspace=0.1) plt.show()
Petals to the Metal - Flower Classification on TPU
11,634,419
submission13.to_csv('submission13.csv', index=False )<load_from_csv>
AUTO = tf.data.experimental.AUTOTUNE def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [HEIGHT, WIDTH, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_flip_up_down(image, seed=seed) image = tf.image.random_saturation(image, lower=0, upper=2, seed=seed) image = tf.image.random_crop(image, size=[int(HEIGHT*.8), int(WIDTH*.8), CHANNELS], seed=seed) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_training_dataset_preview(ordered=True): dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
11,634,419
warnings.filterwarnings("ignore") train_file = '.. /input/train.csv' data = pd.read_csv(train_file, delimiter=',') test_file = '.. /input/test.csv' tdata = pd.read_csv(test_file, delimiter=',') train_data = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Cabin', 'Fare', 'Survived', 'Name', 'Ticket']] test_data = tdata[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Cabin', 'Fare', 'Name', 'Ticket']] def update_name(df): df['Lname'] = df.Name.apply(lambda x: x.split(',')[0]) return df def update_sex(df): df.Sex = df.Sex.str[0] return df def update_cabin(df): df.Cabin = df.Cabin.fillna('N') df.Cabin = df.Cabin.str[0] return df def update_fare(df): df.Fare = df.Fare.fillna(0) bins =(-1, 0, 8, 15, 31, 60, 513) bin_names = [0, 1, 2, 3, 4, 5] df.Fare = pd.cut(df.Fare, bins, labels=bin_names) return df def update_age(df): df.Age = df.Age.fillna(0) bins =(-1, 0, 5, 12, 20, 28, 38, 60, 90) bin_names = [0, 1, 2, 3, 4, 5, 6, 7] df.Age = pd.cut(df.Age, bins, labels=bin_names) return df def update_columns(df): df = update_name(df) df = update_sex(df) df = update_cabin(df) df = update_fare(df) df = update_age(df) df = df.drop(['Name'], axis=1) return df train_data = update_columns(train_data) test_data = update_columns(test_data) def encode_features(df): features = ['Sex', 'Ticket', 'Cabin', 'Lname'] df_combined = df[features] for feature in features: le = preprocessing.LabelEncoder() le = le.fit(df_combined[feature]) df[feature] = le.transform(df[feature]) return df train_data = encode_features(train_data) test_data = encode_features(test_data) X_all = train_data.drop(['Survived'], axis=1) y_all = train_data['Survived']<save_to_csv>
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) train_dataset = get_training_dataset_preview(ordered=True) y_train = next(iter(train_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy() print('Number of training images %d' % NUM_TRAINING_IMAGES )
Petals to the Metal - Flower Classification on TPU
11,634,419
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=100) clf.fit(X_all, y_all) test_predictions = clf.predict(test_data) preds = {'PassengerId': tdata['PassengerId'], 'Survived': test_predictions} df = pd.DataFrame(data=preds) df.to_csv('preds.csv', sep=',', index=False )<import_modules>
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) valid_dataset = get_validation_dataset(ordered=True) y_valid = next(iter(valid_dataset.unbatch().map(lambda image, label: label ).batch(NUM_VALIDATION_IMAGES)) ).numpy() print('Number of validation images %d' % NUM_VALIDATION_IMAGES )
Petals to the Metal - Flower Classification on TPU
11,634,419
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone from sklearn.model_selection import KFold, cross_val_score, train_test_split from sklearn.metrics import mean_squared_error import xgboost as xgb import lightgbm as lgb<load_from_csv>
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Number of test images %d' % NUM_TEST_IMAGES) test_dataset = get_test_dataset(ordered=True )
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset=pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv' )<load_from_csv>
display_batch_of_images(next(iter(train_dataset.unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
11,634,419
test=pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' )<count_missing_values>
display_batch_of_images(next(iter(valid_dataset.unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset['GarageYrBlt'].isna().sum()<count_missing_values>
display_batch_of_images(next(iter(test_dataset.unbatch().batch(20))))
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset['TotRmsAbvGrd'].isna().sum()<count_missing_values>
def create_model(input_shape, N_CLASSES): base_model = applications.DenseNet201(weights = 'imagenet', include_top = False, input_shape = input_shape) base_model.trainable = False model = tf.keras.Sequential([ base_model, layers.GlobalAveragePooling2D() , layers.Dense(N_CLASSES, activation='softmax') ]) return model
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset['GarageCars'].isna().sum()<drop_column>
with strategy.scope() : model = create_model(( None, None, CHANNELS), N_CLASSES) metric_list = ['sparse_categorical_accuracy'] optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE) model.compile(optimizer=optimizer, loss = 'sparse_categorical_crossentropy', metrics = metric_list) model.summary()
Petals to the Metal - Flower Classification on TPU
11,634,419
del dataset['GarageYrBlt'] del test['GarageYrBlt'] <import_modules>
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE warmup_history = model.fit(x=get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, validation_data=get_validation_dataset() , epochs=WARMUP_EPOCHS, verbose=2 ).history
Petals to the Metal - Flower Classification on TPU
11,634,419
import scipy.stats<set_options>
LR_START = 0.00000001 LR_MIN = 0.000001 LR_MAX = LEARNING_RATE LR_RAMPUP_EPOCHS = 3 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr rng = [i for i in range(EPOCHS)] y = [lrfn(x)for x in rng] sns.set(style='whitegrid') fig, ax = plt.subplots(figsize =(20, 6)) plt.plot(rng, y) print("Learning rate schedule : {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
11,634,419
class column_similarity(object): def __init__(self, data , filter_col): num_columns=[] cat_columns=[] self.data=data.fillna('') for col in data.columns: if self._check_numberic(data[col]): num_columns.append(col) else: cat_columns.append(col) self.cat_columns=np.array(cat_columns) self.num_columns=np.array(num_columns) self.cat_dist=None self.num_dist=None self.dist_reset() def dist_reset(self): if len(self.cat_columns)>0: self.cat_dist=np.zeros(( len(self.cat_columns),len(self.cat_columns))) if len(self.num_columns)>0: self.num_dist=np.zeros(( len(self.num_columns),len(self.num_columns))) def _check_numberic(self,data): return pd.to_numeric(data[data.notna() ], errors='coerce' ).notnull().all() def filtered_similarity(self,delimiter,threshold_chi,threshold_cor, filt=None): self.dist_reset() Cat_dist_frame=pd.DataFrame() Num_dist_frame=pd.DataFrame() chi_column1=[] chi_column2=[] chi_usecase1=[] chi_usecase2=[] chi_score=[] if self.cat_columns is not None: for i in range(len(self.cat_columns)) : for j in range(i,len(self.cat_columns)) : contingency_table=None contingency_table=pd.crosstab(self.data[self.cat_columns[i]],self.data[self.cat_columns[j]]) chi2, p, dof, ex=scipy.stats.chi2_contingency(contingency_table) self.cat_dist[i][j]=chi2 self.cat_dist[j][i]=chi2 if self.cat_dist[i][i]>0: self.cat_dist[i]= self.cat_dist[i]/self.cat_dist[i][i] selected_elems=list(np.where(self.cat_dist[i]>=threshold_chi)[0]) if len(selected_elems)>0: selected_elems.remove(i) chi_column1=chi_column1+[self.cat_columns[i]]*len(selected_elems) chi_column2=chi_column2+list(self.cat_columns[selected_elems]) chi_score=chi_score+list(self.cat_dist[i][selected_elems]) Cat_dist_frame['Column1']=[col.split(delimiter)[1] if delimiter in col else col for col in chi_column1] Cat_dist_frame['Column2']=[col.split(delimiter)[1] if delimiter in col else col for col in chi_column2] Cat_dist_frame['Score']=chi_score return Cat_dist_frame.copy() def similarity(self,delimiter='_',threshold_chi=0.0,threshold_cor=0.0): global_Cat_dist_frame=self.filtered_similarity(delimiter,threshold_chi,threshold_cor) return global_Cat_dist_frame<compute_test_metric>
for layer in model.layers: layer.trainable = True checkpoint = ModelCheckpoint(model_path, monitor = 'val_loss', mode='min', save_best_only=True) es = EarlyStopping(monitor = 'val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights = True, verbose=1) lr_callback = LearningRateScheduler(lrfn, verbose=1) callback_list = [checkpoint, es, lr_callback] optimizer = optimizers.Adam(lr=LEARNING_RATE) model.compile(optimizer = optimizer, loss = 'sparse_categorical_crossentropy', metrics = metric_list) model.summary()
Petals to the Metal - Flower Classification on TPU
11,634,419
sm=column_similarity(dataset,dataset.columns )<compute_test_metric>
history = model.fit( x = get_training_dataset() , steps_per_epoch = STEPS_PER_EPOCH, validation_data = get_validation_dataset() , callbacks = callback_list, epochs = EPOCHS, verbose = 2 ).history
Petals to the Metal - Flower Classification on TPU
11,634,419
similarity=sm.similarity()<drop_column>
x_train = train_dataset.map(lambda image, label : image) train_preds = model.predict(x_train) train_preds = np.argmax(train_preds, axis=-1) print(classification_report(y_train, train_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
11,634,419
<count_values>
x_valid = valid_dataset.map(lambda image, label : image) valid_preds = model.predict(x_valid) valid_preds = np.argmax(valid_preds, axis=-1) print(classification_report(y_valid, valid_preds, target_names=CLASSES))
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset['GarageCond'].value_counts()<data_type_conversions>
x_train_samp, y_train_samp = dataset_to_numpy_util(train_dataset, 9) train_samp_preds = model.predict(x_train_samp, batch_size=9) display_9_images_with_predictions(x_train_samp, train_samp_preds, y_train_samp )
Petals to the Metal - Flower Classification on TPU
11,634,419
dataset['GarageFinish']=dataset['GarageFinish'].fillna('Unf') dataset['GarageCond']=dataset['GarageCond'].fillna('TA') test['GarageFinish']=dataset['GarageFinish'].fillna('Unf') test['GarageCond']=dataset['GarageCond'].fillna('TA' )<filter>
x_valid_samp, y_valid_samp = dataset_to_numpy_util(valid_dataset, 9) valid_samp_preds = model.predict(x_valid_samp, batch_size=9) display_9_images_with_predictions(x_valid_samp, valid_samp_preds, y_valid_samp )
Petals to the Metal - Flower Classification on TPU
11,634,419
cols_null=dataset.isna().sum(axis=0) cols_null=cols_null[cols_null>0]<sort_values>
x_test = test_dataset.map(lambda image, idnum:image) test_preds = model.predict(x_test) test_preds = np.argmax(test_preds, axis=-1 )
Petals to the Metal - Flower Classification on TPU
11,634,419
<drop_column><EOS>
test_ids_ds = test_dataset.map(lambda image, idnum:idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') submission = pd.DataFrame(test_ids, columns=['id']) submission['label'] = test_preds submission.to_csv('submission.csv', index=False) display(submission.head(10))
Petals to the Metal - Flower Classification on TPU
11,220,203
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<groupby>
print("Currently using Tensorflow version " + tf.__version__ )
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset[['FireplaceQu','SalePrice']].fillna('O' ).groupby('FireplaceQu' ).agg(['mean','min','max','count'] )<drop_column>
DEVICE = 'TPU' if DEVICE == "TPU": print("connecting to TPU...") try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: print("Could not connect to TPU") tpu = None if tpu: try: print("initializing TPU...") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("TPU initialized") except _: print("failed to initialize TPU") else: DEVICE = "GPU" if DEVICE != "TPU": print("Using default strategy for CPU and single GPU") strategy = tf.distribute.get_strategy() if DEVICE == "GPU": print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}' )
Petals to the Metal - Flower Classification on TPU
11,220,203
del dataset['FireplaceQu'] del test['FireplaceQu']<data_type_conversions>
GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started' )
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset['LotFrontage']=dataset['LotFrontage'].fillna(dataset['LotFrontage'].median()) test['LotFrontage']=test['LotFrontage'].fillna(dataset['LotFrontage'].median() )<sort_values>
SEED = 34 IMAGE_SIZE = [512, 512] BATCH_SIZE = 16 * strategy.num_replicas_in_sync AUG_BATCH = BATCH_SIZE FOLDS = 3 TTA = 5 GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
Petals to the Metal - Flower Classification on TPU
11,220,203
cols_null=dataset.isna().sum(axis=0) cols_null=cols_null[cols_null>0] cols_null.sort_values(ascending=False )<create_dataframe>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example, return_image_name): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum if return_image_name else 0 def data_augment(image, label): image = tf.image.random_flip_left_right(image) return image, label def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset[['GarageQual','SalePrice']].fillna('O' ).groupby('GarageQual' ).agg(['mean','min','max','count'] )<data_type_conversions>
AUTO = tf.data.experimental.AUTOTUNE TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')+ tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') NUM_TRAINING_IMAGES = int(count_data_items(TRAINING_FILENAMES)*(FOLDS-1.) /FOLDS) NUM_VALIDATION_IMAGES = int(count_data_items(TRAINING_FILENAMES)*(1./FOLDS)) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset['GarageQual']=dataset['GarageQual'].fillna('TA') test['GarageQual']=test['GarageQual'].fillna('TA' )<data_type_conversions>
def get_train_ds(files, tta_aug = False, cutmix_aug = False, shuffle = True, repeat = True, labeled=True, return_image_names = True): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) if tta_aug: ds = ds.map(data_augment, num_parallel_calls = AUTO) ds = ds.map(transform, num_parallel_calls=AUTO) if cutmix_aug: ds = ds.batch(AUG_BATCH) ds = ds.map(mixup_and_cutmix, num_parallel_calls=AUTO) ds = ds.unbatch() ds = ds.batch(BATCH_SIZE) ds = ds.prefetch(AUTO) return ds def get_val_ds(files, shuffle = True, labeled=True, return_image_names=False): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.batch(BATCH_SIZE) ds = ds.map(onehot, num_parallel_calls=AUTO) ds = ds.prefetch(AUTO) return ds
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset['GarageType']=dataset['GarageType'].fillna('Attchd') test['GarageType']=test['GarageType'].fillna('Attchd' )<sort_values>
classes = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
11,220,203
cols_null=dataset.isna().sum(axis=0) cols_null=cols_null[cols_null>0] cols_null.sort_values(ascending=False )<groupby>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return classes[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(classes[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', classes[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else classes[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show()
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset[['BsmtFinType2','SalePrice']].fillna('O' ).groupby('BsmtFinType2' ).agg(['mean','median','min','max','count'] )<data_type_conversions>
training_dataset = get_train_ds(TRAINING_FILENAMES, cutmix_aug = False, tta_aug = False, labeled = True, shuffle = True, repeat = True) training_dataset = training_dataset.unbatch().batch(20) train_batch = iter(training_dataset )
Petals to the Metal - Flower Classification on TPU
11,220,203
dataset['BsmtFinType2']=dataset['BsmtFinType2'].fillna('Unf') test['BsmtFinType2']=test['BsmtFinType2'].fillna('Unf' )<groupby>
test_dataset = get_train_ds(TEST_FILENAMES, labeled = False, shuffle = True, repeat = False) test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset )
Petals to the Metal - Flower Classification on TPU