kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
3,401,239
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<data_type_conversions>
import os import json import numpy as np import pandas as pd import keras from keras import layers from keras.applications import DenseNet121 from keras.callbacks import Callback, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.models import Sequential from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
iWildCam 2019 - FGVC6
3,401,239
ans_dict = dict(zip(submids, subm_preds.astype(str)) )<prepare_output>
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples' )
iWildCam 2019 - FGVC6
3,401,239
df_to_process =( pd.DataFrame .from_dict(ans_dict, orient='index', columns=['Category']) .reset_index() .rename({'index':'Id'}, axis=1) ) df_to_process['Id'] = df_to_process['Id'].map(lambda x: str(x)[:-4]) df_to_process.head()<categorify>
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .
iWildCam 2019 - FGVC6
3,401,239
def process_one_id(id_classes_str): if id_classes_str: return REVERSE_CLASSMAP[int(id_classes_str)] else: return id_classes_str<feature_engineering>
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_pred = self.model.predict(X_val) y_pred_cat = keras.utils.to_categorical( y_pred.argmax(axis=1), num_classes=14 ) _val_f1 = f1_score(y_val, y_pred_cat, average='macro') _val_recall = recall_score(y_val, y_pred_cat, average='macro') _val_precision = precision_score(y_val, y_pred_cat, average='macro') self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(( f"val_f1: {_val_f1:.4f}" f" — val_precision: {_val_precision:.4f}" f" — val_recall: {_val_recall:.4f}")) return f1_metrics = Metrics()
iWildCam 2019 - FGVC6
3,401,239
df_to_process['Category'] = df_to_process['Category'].apply(process_one_id) df_to_process.head()<save_to_csv>
densenet = DenseNet121( weights='.. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5', include_top=False, input_shape=(32,32,3) )
iWildCam 2019 - FGVC6
3,401,239
df_to_process.to_csv('submission.csv', index=False )<set_options>
model = Sequential() model.add(densenet) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dense(14, activation='softmax'))
iWildCam 2019 - FGVC6
3,401,239
%load_ext Cython<init_hyperparams>
model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) checkpoint = ModelCheckpoint( 'model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto' ) history = model.fit( x=x_train, y=y_train, batch_size=64, epochs=7, callbacks=[checkpoint, f1_metrics], validation_split=0.1 )
iWildCam 2019 - FGVC6
3,401,239
%%cython c @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cdef int calc_neighs(unsigned char[:, :] field, int i, int j, int n, int k): cdef: int neighs = 0; int i_min = i - 1; int i_pl = i + 1; int j_min = j - 1; int j_pl = j + 1; neighs = 0 if i_min >= 0: if j_min >= 0: neighs += field[i_min, j_min] neighs += field[i_min, j] if j_pl < k: neighs += field[i_min, j_pl] if j_min >= 0: neighs += field[i, j_min] if j_pl < k: neighs += field[i, j_pl] if i_pl < n: if j_min >= 0: neighs += field[i_pl, j_min] neighs += field[i_pl, j] if j_pl < k: neighs += field[i_pl, j_pl] return neighs @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cpdef make_move(unsigned char[:, :] field, int moves): cdef: int _, i, j, neighs; int n, k; int switch = 0; unsigned char[:, :] cur_field; unsigned char[:, :] next_field; cur_field = np.copy(field) next_field = np.zeros_like(field, 'uint8') n = field.shape[0] k = field.shape[1] for _ in range(moves): if switch == 0: for i in range(n): for j in range(k): neighs = calc_neighs(cur_field, i, j, n, k) if cur_field[i, j] and neighs == 2: next_field[i, j] = 1 elif neighs == 3: next_field[i, j] = 1 else: next_field[i, j] = 0 else: for i in range(n): for j in range(k): neighs = calc_neighs(next_field, i, j, n, k) if next_field[i, j] and neighs == 2: cur_field[i, j] = 1 elif neighs == 3: cur_field[i, j] = 1 else: cur_field[i, j] = 0 switch =(switch + 1)% 2 return np.array(next_field if switch else cur_field )<categorify>
model.load_weights('model.h5') y_test = model.predict(x_test) submission_df = pd.read_csv('.. /input/iwildcam-2019-fgvc6/sample_submission.csv') submission_df['Predicted'] = y_test.argmax(axis=1) print(submission_df.shape) submission_df.head()
iWildCam 2019 - FGVC6
3,401,239
<choose_model_class><EOS>
submission_df.to_csv('submission.csv',index=False )
iWildCam 2019 - FGVC6
3,399,907
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<train_model>
import os import json import numpy as np import pandas as pd import keras from keras.callbacks import Callback from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score from sklearn.model_selection import train_test_split
iWildCam 2019 - FGVC6
3,399,907
models = [] for delta in range(1, 6): model = create_model(n_hidden_convs=6, n_hidden_filters=256) es = EarlyStopping(monitor='loss', patience=9, min_delta=0.001) model.fit_generator(data_generator(delta=delta, ravel=False), steps_per_epoch=500, epochs=50, verbose=1, callbacks=[es]) models.append(model )<load_from_csv>
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples' )
iWildCam 2019 - FGVC6
3,399,907
train_df = pd.read_csv('.. /input/train.csv', index_col=0) test_df = pd.read_csv('.. /input/test.csv', index_col=0 )<create_dataframe>
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .
iWildCam 2019 - FGVC6
3,399,907
submit_df = pd.DataFrame(index=test_df.index, columns=['start.' + str(_)for _ in range(1, 401)] )<categorify>
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_pred = self.model.predict(X_val) y_pred_cat = keras.utils.to_categorical( y_pred.argmax(axis=1), num_classes=num_classes ) _val_f1 = f1_score(y_val, y_pred_cat, average='macro') _val_recall = recall_score(y_val, y_pred_cat, average='macro') _val_precision = precision_score(y_val, y_pred_cat, average='macro') self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(( f"val_f1: {_val_f1:.4f}" f" — val_precision: {_val_precision:.4f}" f" — val_recall: {_val_recall:.4f}")) return
iWildCam 2019 - FGVC6
3,399,907
for delta in range(1, 6): mod = models[delta-1] delta_df = test_df[test_df.delta == delta].iloc[:, 1:].values.reshape(-1, 20, 20, 1) submit_df[test_df.delta == delta] = mod.predict(delta_df ).reshape(-1, 400 ).round(0 ).astype('uint8' )<save_to_csv>
batch_size = 64 num_classes = 14 epochs = 30 val_split = 0.1 save_dir = os.path.join(os.getcwd() , 'models') model_name = 'keras_cnn_model.h5'
iWildCam 2019 - FGVC6
3,399,907
submit_df.to_csv('cnns_40.csv' )<import_modules>
model = Sequential() model.add(Conv2D(32,(3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32,(3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64,(3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax'))
iWildCam 2019 - FGVC6
3,399,907
import os import sys import operator import numpy as np import pandas as pd from scipy import sparse import xgboost as xgb from sklearn import model_selection, preprocessing, ensemble from sklearn.metrics import log_loss from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer <train_model>
f1_metrics = Metrics() model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) hist = model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[f1_metrics], validation_split=val_split, shuffle=True )
iWildCam 2019 - FGVC6
3,399,907
def runXGB(train_X, train_y, test_X, test_y=None, feature_names=None, seed_val=0, num_rounds=1000): param = {} param['objective'] = 'multi:softprob' param['eta'] = 0.1 param['max_depth'] = 6 param['silent'] = 1 param['num_class'] = 3 param['eval_metric'] = "mlogloss" param['min_child_weight'] = 1 param['subsample'] = 0.7 param['colsample_bytree'] = 0.7 param['seed'] = seed_val num_rounds = num_rounds plst = list(param.items()) xgtrain = xgb.DMatrix(train_X, label=train_y) if test_y is not None: xgtest = xgb.DMatrix(test_X, label=test_y) watchlist = [(xgtrain,'train'),(xgtest, 'test')] model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=20) else: xgtest = xgb.DMatrix(test_X) model = xgb.train(plst, xgtrain, num_rounds) pred_test_y = model.predict(xgtest) return pred_test_y, model <load_from_disk>
if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path )
iWildCam 2019 - FGVC6
3,399,907
data_path = ".. /input/" train_file = data_path + "train.json" test_file = data_path + "test.json" train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) print(train_df.shape) print(test_df.shape) <define_variables>
y_test = model.predict(x_test) submission_df = pd.read_csv('.. /input/iwildcam-2019-fgvc6/sample_submission.csv') submission_df['Predicted'] = y_test.argmax(axis=1) print(submission_df.shape) submission_df.head()
iWildCam 2019 - FGVC6
3,399,907
<feature_engineering><EOS>
submission_df.to_csv('submission.csv',index=False) history_df.to_csv('history.csv', index=False) with open('history.json', 'w')as f: json.dump(hist.history, f )
iWildCam 2019 - FGVC6
4,570,315
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<categorify>
%reload_ext autoreload %autoreload 2 %matplotlib inline
iWildCam 2019 - FGVC6
4,570,315
categorical = ["display_address", "manager_id", "building_id", "street_address"] for f in categorical: if train_df[f].dtype=='object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train_df[f].values)+ list(test_df[f].values)) train_df[f] = lbl.transform(list(train_df[f].values)) test_df[f] = lbl.transform(list(test_df[f].values)) features_to_use.append(f) <feature_engineering>
from fastai import * from fastai.vision import * import pandas as pd from fastai.utils.mem import *
iWildCam 2019 - FGVC6
4,570,315
train_df['features'] = train_df["features"].apply(lambda x: " ".join(["_".join(i.split(" ")) for i in x])) test_df['features'] = test_df["features"].apply(lambda x: " ".join(["_".join(i.split(" ")) for i in x])) print(train_df["features"].head()) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df["features"]) te_sparse = tfidf.transform(test_df["features"]) <prepare_x_and_y>
path = Path('/kaggle/input/iwildcam-2019-fgvc6') debug =1 if debug: train_pct=0.04 else: train_pct=0.5
iWildCam 2019 - FGVC6
4,570,315
train_X = sparse.hstack([train_df[features_to_use], tr_sparse] ).tocsr() test_X = sparse.hstack([test_df[features_to_use], te_sparse] ).tocsr() target_num_map = {'high':0, 'medium':1, 'low':2} train_y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x])) print(train_X.shape, test_X.shape) <compute_train_metric>
train_df = pd.read_csv(path/'train.csv') train_df = pd.concat([train_df['id'],train_df['category_id']],axis=1,keys=['id','category_id']) train_df.head()
iWildCam 2019 - FGVC6
4,570,315
cv_scores = [] kf = model_selection.KFold(n_splits=7, shuffle=True, random_state=2016) for dev_index, val_index in kf.split(range(train_X.shape[0])) : dev_X, val_X = train_X[dev_index,:], train_X[val_index,:] dev_y, val_y = train_y[dev_index], train_y[val_index] preds, model = runXGB(dev_X, dev_y, val_X, val_y) cv_scores.append(log_loss(val_y, preds)) print(cv_scores) break <save_to_csv>
test_df = pd.read_csv(path/'test.csv') test_df = pd.DataFrame(test_df['id']) test_df['predicted'] = 0 test_df.head()
iWildCam 2019 - FGVC6
4,570,315
preds, model = runXGB(train_X, train_y, test_X, num_rounds=400) out_df = pd.DataFrame(preds) out_df.columns = ["high", "medium", "low"] out_df["listing_id"] = test_df.listing_id.values out_df.to_csv("xgb_starter2.csv", index=False) <set_options>
free = gpu_mem_get_free_no_cache() if free > 8200: bs=64 else: bs=32 print(f"using bs={bs}, have {free}MB of GPU RAM free") tfms = get_transforms(max_rotate=20, max_zoom=1.3, max_lighting=0.4, max_warp=0.4, p_affine=1., p_lighting=1.)
iWildCam 2019 - FGVC6
4,570,315
%matplotlib inline <define_variables>
data = get_data(224, bs, 'zeros' )
iWildCam 2019 - FGVC6
4,570,315
horizontal_flip = False rotation_augmentation = True brightness_augmentation = True shift_augmentation = True random_noise_augmentation = True include_unclean_data = True sample_image_index = 20 rotation_angles = [12] pixel_shifts = [12] NUM_EPOCHS = 80 BATCH_SIZE = 64<load_pretrained>
gc.collect() wd=1e-1 learn = cnn_learner(data, models.resnet34, metrics=error_rate, bn_final=True, wd=wd) learn.model_dir= '/kaggle/working/'
iWildCam 2019 - FGVC6
4,570,315
print("Contents of input/facial-keypoints-detection directory: ") !ls.. /input/facial-keypoints-detection/ print(" Extracting.zip dataset files to working directory...") !unzip -u.. /input/facial-keypoints-detection/test.zip !unzip -u.. /input/facial-keypoints-detection/training.zip print(" Current working directory:") !pwd print(" Contents of working directory:") !ls<load_from_csv>
data = get_data(352,bs) learn.data = data
iWildCam 2019 - FGVC6
4,570,315
%%time train_file = 'training.csv' test_file = 'test.csv' idlookup_file = '.. /input/facial-keypoints-detection/IdLookupTable.csv' train_data = pd.read_csv(train_file) test_data = pd.read_csv(test_file) idlookup_data = pd.read_csv(idlookup_file )<count_values>
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4)) learn.save('352' )
iWildCam 2019 - FGVC6
4,570,315
print("Length of train data: {}".format(len(train_data))) print("Number of Images with missing pixel values: {}".format(len(train_data)- int(train_data.Image.apply(lambda x: len(x.split())).value_counts().values)) )<count_missing_values>
lr = 1e-3 learn.fit_one_cycle(8, slice(lr/100, lr))
iWildCam 2019 - FGVC6
4,570,315
train_data.isnull().sum()<categorify>
learn.save('stage-2-sz32' )
iWildCam 2019 - FGVC6
4,570,315
%%time def load_images(image_data): images = [] for idx, sample in image_data.iterrows() : image = np.array(sample['Image'].split(' '), dtype=int) image = np.reshape(image,(96,96,1)) images.append(image) images = np.array(images)/255. return images def load_keypoints(keypoint_data): keypoint_data = keypoint_data.drop('Image',axis = 1) keypoint_features = [] for idx, sample_keypoints in keypoint_data.iterrows() : keypoint_features.append(sample_keypoints) keypoint_features = np.array(keypoint_features, dtype = 'float') return keypoint_features clean_train_images = load_images(clean_train_data) print("Shape of clean_train_images: {}".format(np.shape(clean_train_images))) clean_train_keypoints = load_keypoints(clean_train_data) print("Shape of clean_train_keypoints: {}".format(np.shape(clean_train_keypoints))) test_images = load_images(test_data) print("Shape of test_images: {}".format(np.shape(test_images))) train_images = clean_train_images train_keypoints = clean_train_keypoints fig, axis = plt.subplots() plot_sample(clean_train_images[sample_image_index], clean_train_keypoints[sample_image_index], axis, "Sample image & keypoints") if include_unclean_data: unclean_train_images = load_images(unclean_train_data) print("Shape of unclean_train_images: {}".format(np.shape(unclean_train_images))) unclean_train_keypoints = load_keypoints(unclean_train_data) print("Shape of unclean_train_keypoints: {} ".format(np.shape(unclean_train_keypoints))) train_images = np.concatenate(( train_images, unclean_train_images)) train_keypoints = np.concatenate(( train_keypoints, unclean_train_keypoints))<concatenate>
interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs )
iWildCam 2019 - FGVC6
4,570,315
def left_right_flip(images, keypoints): flipped_keypoints = [] flipped_images = np.flip(images, axis=2) for idx, sample_keypoints in enumerate(keypoints): flipped_keypoints.append([96.-coor if idx%2==0 else coor for idx,coor in enumerate(sample_keypoints)]) return flipped_images, flipped_keypoints if horizontal_flip: flipped_train_images, flipped_train_keypoints = left_right_flip(clean_train_images, clean_train_keypoints) print("Shape of flipped_train_images: {}".format(np.shape(flipped_train_images))) print("Shape of flipped_train_keypoints: {}".format(np.shape(flipped_train_keypoints))) train_images = np.concatenate(( train_images, flipped_train_images)) train_keypoints = np.concatenate(( train_keypoints, flipped_train_keypoints)) fig, axis = plt.subplots() plot_sample(flipped_train_images[sample_image_index], flipped_train_keypoints[sample_image_index], axis, "Horizontally Flipped" )<normalization>
test_preds = learn.get_preds(DatasetType.Test) test_df['predicted'] = test_preds[0].argmax(dim=1 )
iWildCam 2019 - FGVC6
4,570,315
<categorify><EOS>
csv_path ='/kaggle/working/submission.csv' test_df.to_csv(csv_path, index=False )
iWildCam 2019 - FGVC6
3,480,019
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<choose_model_class>
import os import json import numpy as np import pandas as pd import keras from keras import layers from keras.applications import DenseNet121 from keras.callbacks import Callback, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.models import Sequential from keras.utils.vis_utils import plot_model from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
iWildCam 2019 - FGVC6
3,480,019
model = Sequential() model.add(Convolution2D(32,(3,3), padding='same', use_bias=False, input_shape=(96,96,1))) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(32,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(512,activation='relu')) model.add(Dropout(0.1)) model.add(Dense(30)) model.summary()<train_model>
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .
iWildCam 2019 - FGVC6
3,480,019
%%time if os.path.exists('.. /input/data-augmentation-for-facial-keypoint-detection/best_model.hdf5'): model = load_model('.. /input/data-augmentation-for-facial-keypoint-detection/best_model.hdf5') checkpointer = ModelCheckpoint(filepath = 'best_model.hdf5', monitor='val_mae', verbose=1, save_best_only=True, mode='min') model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae', 'acc']) history = model.fit(train_images, train_keypoints, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, validation_split=0.05, callbacks=[checkpointer] )<train_model>
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_pred = self.model.predict(X_val) y_pred_cat = keras.utils.to_categorical( y_pred.argmax(axis=1), num_classes=14 ) _val_f1 = f1_score(y_val, y_pred_cat, average='macro') _val_recall = recall_score(y_val, y_pred_cat, average='macro') _val_precision = precision_score(y_val, y_pred_cat, average='macro') self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(( f"val_f1: {_val_f1:.4f}" f" — val_precision: {_val_precision:.4f}" f" — val_recall: {_val_recall:.4f}")) return f1_metrics = Metrics()
iWildCam 2019 - FGVC6
3,480,019
%%time checkpointer = ModelCheckpoint(filepath = 'best_model.hdf5', monitor='mae', verbose=1, save_best_only=True, mode='min') model.fit(train_images, train_keypoints, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, callbacks=[checkpointer] )<predict_on_test>
densenet = DenseNet121( weights='.. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5', include_top=False, input_shape=(32,32,3) ) model = Sequential() model.add(densenet) model.add(layers.GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(layers.Dense(14, activation='softmax')) plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) model.summary()
iWildCam 2019 - FGVC6
3,480,019
%%time model = load_model('best_model.hdf5') test_preds = model.predict(test_images )<save_to_csv>
checkpoint = ModelCheckpoint( 'model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto' )
iWildCam 2019 - FGVC6
3,480,019
feature_names = list(idlookup_data['FeatureName']) image_ids = list(idlookup_data['ImageId']-1) row_ids = list(idlookup_data['RowId']) feature_list = [] for feature in feature_names: feature_list.append(feature_names.index(feature)) predictions = [] for x,y in zip(image_ids, feature_list): predictions.append(test_preds[x][y]) row_ids = pd.Series(row_ids, name = 'RowId') locations = pd.Series(predictions, name = 'Location') locations = locations.clip(0.0,96.0) submission_result = pd.concat([row_ids,locations],axis = 1) submission_result.to_csv('submission.csv',index = False )<import_modules>
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit( x=x_train, y=y_train, batch_size=256, epochs=30, callbacks=[checkpoint, f1_metrics], validation_split=0.2 )
iWildCam 2019 - FGVC6
3,480,019
<load_from_csv><EOS>
model.load_weights('model.h5') y_test = model.predict(x_test) submission_df = pd.read_csv('.. /input/iwildcam-2019-fgvc6/sample_submission.csv') submission_df['Predicted'] = y_test.argmax(axis=1) print(submission_df.shape) submission_df.head() submission_df.to_csv('submission.csv',index=False )
iWildCam 2019 - FGVC6
3,391,563
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<load_from_csv>
warnings.filterwarnings('ignore' )
iWildCam 2019 - FGVC6
3,391,563
samplesubmission = pd.read_csv("/kaggle/input/SampleSubmission.csv") samplesubmission.head()<load_from_csv>
%%time train_df = pd.read_csv(os.path.join(PATH, 'train.csv')) test_df = pd.read_csv(os.path.join(PATH, 'test.csv'))
iWildCam 2019 - FGVC6
3,391,563
train = pd.read_csv("/kaggle/input/training/training.csv") train.head().T<count_missing_values>
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy' )
iWildCam 2019 - FGVC6
3,391,563
train.isnull().sum()<count_missing_values>
classes_wild = {0: 'empty', 1: 'deer', 2: 'moose', 3: 'squirrel', 4: 'rodent', 5: 'small_mammal', \ 6: 'elk', 7: 'pronghorn_antelope', 8: 'rabbit', 9: 'bighorn_sheep', 10: 'fox', 11: 'coyote', \ 12: 'black_bear', 13: 'raccoon', 14: 'skunk', 15: 'wolf', 16: 'bobcat', 17: 'cat',\ 18: 'dog', 19: 'opossum', 20: 'bison', 21: 'mountain_goat', 22: 'mountain_lion'}
iWildCam 2019 - FGVC6
3,391,563
train.isnull().sum()<correct_missing_values>
train_df['classes_wild'] = train_df['category_id'].apply(lambda cw: classes_wild[cw] )
iWildCam 2019 - FGVC6
3,391,563
train.fillna(method = 'ffill',inplace = True )<prepare_x_and_y>
train_image_files = list(os.listdir(os.path.join(PATH,'train_images'))) test_image_files = list(os.listdir(os.path.join(PATH,'test_images'))) print("Number of image files: train:{} test:{}".format(len(train_image_files), len(test_image_files)) )
iWildCam 2019 - FGVC6
3,391,563
X = train.Image.values del train['Image'] Y = train.values<concatenate>
%%time train_file_names = list(train_df['file_name']) print("Matching train image names: {}".format(len(set(train_file_names ).intersection(train_image_files))))
iWildCam 2019 - FGVC6
3,391,563
x = [] for i in tqdm(X): q = [int(j)for j in i.split() ] x.append(q) len(x )<split>
%%time test_file_names = list(test_df['file_name']) print("Matching test image names: {}".format(len(set(test_file_names ).intersection(test_image_files))))
iWildCam 2019 - FGVC6
3,391,563
x_train,x_test,y_train,y_test = tts(x,Y,random_state = 69,test_size = 0.1 )<import_modules>
cnt_classes_images = train_df.classes_wild.nunique() print("There are {} classes of images".format(cnt_classes_images)) pd.DataFrame(train_df.classes_wild.value_counts() ).transpose()
iWildCam 2019 - FGVC6
3,391,563
from keras.layers.advanced_activations import LeakyReLU from keras.models import Sequential, Model from keras.layers import Activation, Convolution2D, MaxPooling2D, BatchNormalization, Flatten, Dense, Dropout, Conv2D,MaxPool2D, ZeroPadding2D<choose_model_class>
try: train_df['date_time'] = pd.to_datetime(train_df['date_captured'], errors='coerce') train_df["year"] = train_df['date_time'].dt.year train_df["month"] = train_df['date_time'].dt.month train_df["day"] = train_df['date_time'].dt.day train_df["hour"] = train_df['date_time'].dt.hour train_df["minute"] = train_df['date_time'].dt.minute except Exception as ex: print("Exception:".format(ex))
iWildCam 2019 - FGVC6
3,391,563
model = Sequential() model.add(Convolution2D(32,(3,3), padding='same', use_bias=False, input_shape=(96,96,1))) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(32,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(512,activation='relu')) model.add(Dropout(0.1)) model.add(Dense(30)) model.summary()<compute_test_metric>
IMAGE_PATH = os.path.join(PATH,'train_images/') draw_category_images('classes_wild' )
iWildCam 2019 - FGVC6
3,391,563
<train_model>
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .
iWildCam 2019 - FGVC6
3,391,563
model.compile(optimizer = 'adam',loss = 'mean_squared_error', metrics = ['mae','acc']) model.fit(x_train,y_train,batch_size=256, epochs=50,validation_data=(x_test,y_test))<train_model>
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_pred = self.model.predict(X_val) y_pred_cat = keras.utils.to_categorical( y_pred.argmax(axis=1), num_classes=14 ) _val_f1 = f1_score(y_val, y_pred_cat, average='macro') _val_recall = recall_score(y_val, y_pred_cat, average='macro') _val_precision = precision_score(y_val, y_pred_cat, average='macro') self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(( f"val_f1: {_val_f1:.4f}" f" — val_precision: {_val_precision:.4f}" f" — val_recall: {_val_recall:.4f}")) return f1_metrics = Metrics()
iWildCam 2019 - FGVC6
3,391,563
model.compile(optimizer = 'adam',loss = 'mean_squared_error', metrics = ['mae']) model.fit(x,Y,batch_size=64, epochs=100) model.fit(x,Y,batch_size=128, epochs=50) model.fit(x,Y,batch_size=256, epochs=50 )<load_from_csv>
model_densenet = DenseNet121( weights='.. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5', include_top=False, input_shape=(32,32,3) )
iWildCam 2019 - FGVC6
3,391,563
test = pd.read_csv("/kaggle/input/test/test.csv") test.head()<count_missing_values>
model = Sequential() model.add(model_densenet) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dense(cnt_classes_images, activation='softmax'))
iWildCam 2019 - FGVC6
3,391,563
test.isnull().sum()<count_missing_values>
model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
iWildCam 2019 - FGVC6
3,391,563
test.isnull().sum()<split>
checkpoint = ModelCheckpoint( 'model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto' )
iWildCam 2019 - FGVC6
3,391,563
test = test.Image.values x_t = [] for i in tqdm(test): q = [int(j)for j in i.split() ] x_t.append(q) x_t = np.array(x_t) x_t = x_t.reshape(-1, 96,96,1) x_t = x_t/255.0 x_t.shape<predict_on_test>
BATCH_SIZE = 64 EPOCHS = 35 VALID_SPLIT = 0.1 history = model.fit( x=x_train, y=y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[checkpoint, f1_metrics], validation_split=VALID_SPLIT )
iWildCam 2019 - FGVC6
3,391,563
pred = model.predict(x_t) pred.shape<define_variables>
model.load_weights('model.h5') y_test = model.predict(x_test )
iWildCam 2019 - FGVC6
3,391,563
lookid_list = list(lookid_data['FeatureName']) imageID = list(lookid_data['ImageId']-1) pre_list = list(pred )<define_variables>
submission_df = pd.read_csv(os.path.join(PATH,'sample_submission.csv')) submission_df['Predicted'] = y_test.argmax(axis=1) print(submission_df.shape) submission_df.head(3 )
iWildCam 2019 - FGVC6
3,391,563
<define_variables><EOS>
submission_df.to_csv("submission.csv", index=False )
iWildCam 2019 - FGVC6
10,768,563
model2.save('my_model2.h5') model2 = load_model('my_model2.h5' )<define_variables>
Facial Keypoints Detection
10,768,563
class FlippedImageDataGenerator(ImageDataGenerator): flip_indices = [ (0, 2),(1, 3), (4, 8),(5, 9),(6, 10),(7, 11), (12, 16),(13, 17),(14, 18),(15, 19), (22, 24),(23, 25), ] def next(self): X_batch, y_batch = super(FlippedImageDataGenerator, self ).next() batch_size = X_batch.shape[0] indices = np.random.choice(batch_size, batch_size/2, replace=False) X_batch[indices] = X_batch[indices, :, :, ::-1] if y_batch is not None: y_batch[indices, ::2] = y_batch[indices, ::2] * -1 for a, b in self.flip_indices: y_batch[indices, a], y_batch[indices, b] =( y_batch[indices, b], y_batch[indices, a] ) return X_batch, y_batch<split>
!ls.. /input/facial-keypoints-detection/ print(" Extracting.zip dataset files to working directory...") !unzip -u.. /input/facial-keypoints-detection/test.zip !unzip -u.. /input/facial-keypoints-detection/training.zip print(" Current working directory:") !pwd print(" Contents of working directory:") !ls
Facial Keypoints Detection
10,768,563
X, y = load2d() X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42 )<train_on_grid>
%%time train_csv = 'training.csv' test_csv = 'test.csv' idlookup_file = '.. /input/facial-keypoints-detection/IdLookupTable.csv' train = pd.read_csv(train_csv) test = pd.read_csv(test_csv) idlookup_data = pd.read_csv(idlookup_file )
Facial Keypoints Detection
10,768,563
model3 = CNN() flipgen = FlippedImageDataGenerator() hist3 = model3.fit_generator(flipgen.flow(X_train, y_train), samples_per_epoch=X_train.shape[0], nb_epoch=300, validation_data=(X_val, y_val)) <load_pretrained>
train.isnull().sum()
Facial Keypoints Detection
10,768,563
model3.save('my_model3.h5') model3 = load_model('my_model3.h5' )<define_variables>
train.isnull().sum()
Facial Keypoints Detection
10,768,563
SPECIALIST_SETTINGS = [ dict( columns=( 'left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', ), flip_indices=(( 0, 2),(1, 3)) , ), dict( columns=( 'nose_tip_x', 'nose_tip_y', ), flip_indices=() , ), dict( columns=( 'mouth_left_corner_x', 'mouth_left_corner_y', 'mouth_right_corner_x', 'mouth_right_corner_y', 'mouth_center_top_lip_x', 'mouth_center_top_lip_y', ), flip_indices=(( 0, 2),(1, 3)) , ), dict( columns=( 'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', ), flip_indices=() , ), dict( columns=( 'left_eye_inner_corner_x', 'left_eye_inner_corner_y', 'right_eye_inner_corner_x', 'right_eye_inner_corner_y', 'left_eye_outer_corner_x', 'left_eye_outer_corner_y', 'right_eye_outer_corner_x', 'right_eye_outer_corner_y', ), flip_indices=(( 0, 2),(1, 3),(4, 6),(5, 7)) , ), dict( columns=( 'left_eyebrow_inner_end_x', 'left_eyebrow_inner_end_y', 'right_eyebrow_inner_end_x', 'right_eyebrow_inner_end_y', 'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y', 'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y', ), flip_indices=(( 0, 2),(1, 3),(4, 6),(5, 7)) , ), ] <train_model>
train.fillna(method = 'ffill',inplace = True )
Facial Keypoints Detection
10,768,563
def fit_specialists(freeze=True, print_every=10, epochs=100, prop=0.1, name_transfer_model="my_model3.h5"): specialists = OrderedDict() for setting in SPECIALIST_SETTINGS: cols = setting['columns'] flip_indices = setting['flip_indices'] X, y = load2d(cols=cols) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) model4 = load_model(name_transfer_model) if freeze: for layer in model.layers: layer.trainable = False model4.layers.pop() model4.outputs = [model4.layers[-1].output] model4.layers[-1].outbound_nodes = [] model4.add(Dense(len(cols))) model4.compile(loss='mean_squared_error', optimizer="adam") flipgen = FlippedImageDataGenerator() flipgen.flip_indices = setting['flip_indices'] print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape) hist_final = model4.fit_generator(flipgen.flow(X_train, y_train), samples_per_epoch=X_train.shape[0], nb_epoch=epochs, validation_data=(X_val, y_val)) specialists[cols] = model4 return(specialists) <train_model>
train.isnull().sum()
Facial Keypoints Detection
10,768,563
%%time specialists1 = fit_specialists(freeze=True, print_every=10, epochs=100, name_transfer_model="my_model3.h5" )<predict_on_test>
train.isnull().sum()
Facial Keypoints Detection
10,768,563
X_test,_ = load2d(test=True) y_pred3 = model3.predict(X_test) landmark_nm = read_csv(os.path.expanduser(FTRAIN)).columns[:-1].values df_y_pred3 = DataFrame(y_pred3,columns = landmark_nm) def predict_specialist(specialists1,X_test): y_pred_s = [] for columns, value in specialists1.items() : smodel = value y_pred = smodel.predict(X_test) y_pred = DataFrame(y_pred,columns=columns) y_pred_s.append(y_pred) df_y_pred_s = concat(y_pred_s,axis=1) return(df_y_pred_s) df_y_pred_s = predict_specialist(specialists1,X_test) y_pred_s = df_y_pred_s.values<feature_engineering>
test.isnull().sum()
Facial Keypoints Detection
10,768,563
FIdLookup = '.. /input/IdLookupTable.csv' IdLookup = read_csv(os.path.expanduser(FIdLookup)) def prepare_submission(y_pred4,filename): ImageId = IdLookup["ImageId"] FeatureName = IdLookup["FeatureName"] RowId = IdLookup["RowId"] submit = [] for rowId,irow,landmark in zip(RowId,ImageId,FeatureName): submit.append([rowId,y_pred4[landmark].iloc[irow-1]]) submit = DataFrame(submit,columns=["RowId","Location"]) submit["Location"] = submit["Location"]*48 + 48 print(submit.shape) if filename == "model3": submit.to_csv("model3.csv",index=False) else: submit.to_csv("special.csv",index=False) prepare_submission(df_y_pred_s,"special") prepare_submission(df_y_pred3,"model3") <predict_on_test>
test.isnull().sum()
Facial Keypoints Detection
10,768,563
<create_dataframe>
import tensorflow.keras as keras from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Input,LeakyReLU, Conv2D,Flatten, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D,MaxPool2D from tensorflow.keras import optimizers import tensorflow as tf from keras.utils import np_utils from keras import applications from keras.layers import concatenate import time from skimage.transform import resize
Facial Keypoints Detection
10,768,563
df_y_pred_s = df_y_pred_s[df_y_pred3.columns] df_compare = {} df_compare["difference"] =(( df_y_pred_s - df_y_pred3)**2 ).mean(axis=1) df_compare["RowId"] = range(df_y_pred_s.shape[0]) df_compare = DataFrame(df_compare) df_compare = df_compare.sort_values("difference",ascending=False )<prepare_x_and_y>
model = Sequential() model.add(Conv2D(32,(5,5), padding='same', use_bias=True, input_shape=(96,96,1))) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(32,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(32,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Conv2D(64,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(64,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(64,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Conv2D(96,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(96,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(96,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Conv2D(128,(5,5),padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128,(5,5),padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Conv2D(256,(5,5),padding='same',use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256,(5,5),padding='same',use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Conv2D(512,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(512,(5,5), padding='same', use_bias=True)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.1)) model.add(Dense(30)) model.summary()
Facial Keypoints Detection
10,768,563
def fit_specialists(freeze=True, print_every=10, epochs=100, prop=0.1, name_transfer_model="my_model2.h5"): specialists = OrderedDict() for setting in SPECIALIST_SETTINGS: cols = setting['columns'] flip_indices = setting['flip_indices'] X, y = load2d(cols=cols) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) model4 = load_model(name_transfer_model) if freeze: for layer in model.layers: layer.trainable = False model4.layers.pop() model4.outputs = [model4.layers[-1].output] model4.layers[-1].outbound_nodes = [] model4.add(Dense(len(cols))) model4.compile(loss='mean_squared_error', optimizer="adam") flipgen = FlippedImageDataGenerator() flipgen.flip_indices = setting['flip_indices'] print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape) hist_final = model4.fit_generator(flipgen.flow(X_train, y_train), samples_per_epoch=X_train.shape[0], nb_epoch=epochs, validation_data=(X_val, y_val)) specialists[cols] = model4 return(specialists) <train_model>
Facial Keypoints Detection
10,768,563
%%time specialists2 = fit_specialists(freeze=True, print_every=10, epochs=100, name_transfer_model="my_model2.h5" )<predict_on_test>
checkpointer = ModelCheckpoint(filepath = 'best_modelV8.hdf5', monitor='val_mae', verbose=1, save_best_only=True, mode='min') model.compile(optimizer="adam", loss='mean_squared_error', metrics=['mae', 'acc']) history = model.fit(train_images, train_keypoints, epochs=500, batch_size=256, validation_split=0.1, callbacks=[checkpointer])
Facial Keypoints Detection
10,768,563
X_test,_ = load2d(test=True) def predict_specialist(specialists2,X_test): y_pred_s = [] for columns, value in specialists2.items() : smodel = value y_pred = smodel.predict(X_test) y_pred = DataFrame(y_pred,columns=columns) y_pred_s.append(y_pred) df_y_pred_s = concat(y_pred_s,axis=1) return(df_y_pred_s) df_y_pred_s = predict_specialist(specialists2,X_test) y_pred_s = df_y_pred_s.values<predict_on_test>
model.save('best_modelV5x5_V1.hdf5')
Facial Keypoints Detection
10,768,563
y_pred2 = model2.predict(X_test) landmark_nm = read_csv(os.path.expanduser(FTRAIN)).columns[:-1].values df_y_pred2 = DataFrame(y_pred2,columns = landmark_nm )<feature_engineering>
%%time model = load_model('best_modelV5x5_V1.hdf5') test_preds = model.predict(test_images )
Facial Keypoints Detection
10,768,563
FIdLookup = '.. /input/IdLookupTable.csv' IdLookup = read_csv(os.path.expanduser(FIdLookup)) def prepare_submission(y_pred2,filename): ImageId = IdLookup["ImageId"] FeatureName = IdLookup["FeatureName"] RowId = IdLookup["RowId"] submit = [] for rowId,irow,landmark in zip(RowId,ImageId,FeatureName): submit.append([rowId,y_pred2[landmark].iloc[irow-1]]) submit = DataFrame(submit,columns=["RowId","Location"]) submit["Location"] = submit["Location"]*48 + 48 print(submit.shape) if filename == "model2": submit.to_csv("model2.csv",index=False) else: submit.to_csv("special_model2.csv",index=False) prepare_submission(df_y_pred_s,"special_model2") prepare_submission(df_y_pred2,"model2" )<create_dataframe>
feature_names = list(idlookup_data['FeatureName']) image_ids = list(idlookup_data['ImageId']-1) row_ids = list(idlookup_data['RowId']) feature_list = [] for feature in feature_names: feature_list.append(feature_names.index(feature)) predictions = [] for x,y in zip(image_ids, feature_list): predictions.append(test_preds[x][y]) row_ids = pd.Series(row_ids, name = 'RowId') locations = pd.Series(predictions, name = 'Location') locations = locations.clip(0.0,96.0) submission_result = pd.concat([row_ids,locations],axis = 1) submission_result.to_csv('charlin_version_5X5.csv',index = False )
Facial Keypoints Detection
10,768,563
df_y_pred_s = df_y_pred_s[df_y_pred3.columns] df_compare = {} df_compare["difference"] =(( df_y_pred_s - df_y_pred2)**2 ).mean(axis=1) df_compare["RowId"] = range(df_y_pred_s.shape[0]) df_compare = DataFrame(df_compare) df_compare = df_compare.sort_values("difference",ascending=False )<prepare_x_and_y>
aug = ImageDataGenerator( rotation_range=20, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest" )
Facial Keypoints Detection
10,768,563
<import_modules>
trainX=np.swapaxes(trainX,0,1) trainX=np.swapaxes(trainX,1,2) trainX=np.swapaxes(trainX,2,3) trainX.shape
Facial Keypoints Detection
10,768,563
<load_from_csv><EOS>
testX=np.swapaxes(testX,0,1) testX=np.swapaxes(testX,1,2) testX=np.swapaxes(testX,2,3) testX.shape
Facial Keypoints Detection
3,593,820
<SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<load_from_csv>
%reload_ext autoreload %autoreload 2 %matplotlib inline rn_seed=1 np.random.seed(rn_seed)
Facial Keypoints Detection
3,593,820
RegularSeason_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WRegularSeasonCompactResults.csv') MSeasons = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WSeasons.csv') MTeams=pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WTeams.csv' )<merge>
train_path = Path('/tmp/train') test_path = Path('/tmp/test' )
Facial Keypoints Detection
3,593,820
Tourney_Results_Compact=pd.merge(Tourney_Compact_Results, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'WinningSeed'},inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID'],axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'LoosingSeed'}, inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID','NumOT','WLoc'],axis=1) Tourney_Results_Compact<drop_column>
root = Path('.. /input' )
Facial Keypoints Detection
3,593,820
Tourney_Results_Compact=Tourney_Results_Compact.drop(['WScore','LScore'],axis=1) Tourney_Results_Compact.head()<data_type_conversions>
id_lookup = pd.read_csv(root/'IdLookupTable.csv') train_csv = pd.read_csv(root/'training/training.csv') test_csv = pd.read_csv(root/'test/test.csv' )
Facial Keypoints Detection
3,593,820
Tourney_Results_Compact['WinningSeed'] = Tourney_Results_Compact['WinningSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact['LoosingSeed'] = Tourney_Results_Compact['LoosingSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact.WinningSeed = pd.to_numeric(Tourney_Results_Compact.WinningSeed, errors='coerce') Tourney_Results_Compact.LoosingSeed = pd.to_numeric(Tourney_Results_Compact.LoosingSeed, errors='coerce' )<rename_columns>
for c in train_csv.columns: if(train_csv[c].dtype!='object'): train_csv[c]=train_csv[c].fillna(train_csv[c].median() )
Facial Keypoints Detection
3,593,820
season_winning_team = RegularSeason_Compact_Results[['Season', 'WTeamID', 'WScore']] season_losing_team = RegularSeason_Compact_Results[['Season', 'LTeamID', 'LScore']] season_winning_team.rename(columns={'WTeamID':'TeamID','WScore':'Score'}, inplace=True) season_losing_team.rename(columns={'LTeamID':'TeamID','LScore':'Score'}, inplace=True) RegularSeason_Compact_Results = pd.concat(( season_winning_team, season_losing_team)).reset_index(drop=True) RegularSeason_Compact_Results<groupby>
def save_str_img(strimg,w,h,flpath): px=255-np.array(strimg.split() ,dtype=int) if(len(px)==w*h and len(px)%w==0 and len(px)%h==0): cpx = list(px.reshape(w,h)) img = Image(Tensor([cpx,cpx,cpx])) img.save(flpath) return img else: raise Exception("Invalid height and width" )
Facial Keypoints Detection
3,593,820
RegularSeason_Compact_Results_Final = RegularSeason_Compact_Results.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() RegularSeason_Compact_Results_Final<merge>
for index, train_row in train_csv.iterrows() : save_str_img(train_row.Image,96,96,train_path/(str(index)+'.jpg'))
Facial Keypoints Detection
3,593,820
Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'WScoreTotal'}, inplace=True) Tourney_Results_Compact<save_to_csv>
for index, test_row in test_csv.iterrows() : save_str_img(test_row.Image,96,96,test_path/(str(index)+'.jpg'))
Facial Keypoints Detection
3,593,820
Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'LScoreTotal'}, inplace=True) Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact=Tourney_Results_Compact[Tourney_Results_Compact['Season'] < 2016] Tourney_Results_Compact<drop_column>
def get_locs(flname): index = int(flname.name[:-4]) plist=[] coords=list(train_csv.loc[index]) for i in range(len(coords)//2): plist.append([coords[i*2+1],coords[i*2]]) return tensor(plist)
Facial Keypoints Detection
3,593,820
Tourney_Win_Results=Tourney_Results_Compact.drop(['Season','WTeamID','LTeamID','DayNum'],axis=1) Tourney_Win_Results<rename_columns>
data.show_batch(3,figsize=(6,6))
Facial Keypoints Detection
3,593,820
Tourney_Win_Results.rename(columns={'WinningSeed':'Seed1', 'LoosingSeed':'Seed2', 'WScoreTotal':'ScoreT1', 'LScoreTotal':'ScoreT2'}, inplace=True )<prepare_output>
def mloss(y_true, y_pred): y_true=y_true.view(-1,15,2) y_true[:,:,0]=y_true[:,:,0].clone() -y_pred[:,:,0] y_true[:,:,1]=y_true[:,:,1].clone() -y_pred[:,:,1] y_true[:,:,0]=y_true[:,:,0].clone() **2 y_true[:,:,1]=y_true[:,:,1].clone() **2 return y_true.sum(dim=2 ).sum(dim=1 ).sum()
Facial Keypoints Detection
3,593,820
tourney_lose_result = Tourney_Win_Results.copy() tourney_lose_result['Seed1'] = Tourney_Win_Results['Seed2'] tourney_lose_result['Seed2'] = Tourney_Win_Results['Seed1'] tourney_lose_result['ScoreT1'] = Tourney_Win_Results['ScoreT2'] tourney_lose_result['ScoreT2'] = Tourney_Win_Results['ScoreT1'] tourney_lose_result<feature_engineering>
learn = cnn_learner(data,models.resnet152,loss_func=mloss )
Facial Keypoints Detection
3,593,820
Tourney_Win_Results['Seed_diff'] = Tourney_Win_Results['Seed1'] - Tourney_Win_Results['Seed2'] Tourney_Win_Results['ScoreT_diff'] = Tourney_Win_Results['ScoreT1'] - Tourney_Win_Results['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']<save_to_csv>
learn.fit_one_cycle(10 )
Facial Keypoints Detection
3,593,820
Tourney_Win_Results['result'] = 1 tourney_lose_result['result'] = 0 tourney_result_Final = pd.concat(( Tourney_Win_Results, tourney_lose_result)).reset_index(drop=True) tourney_result_Final.to_csv('Tourneyvalidate.csv', index=False )<drop_column>
learn.save('s1' )
Facial Keypoints Detection
3,593,820
tourney_result_Final1 = tourney_result_Final[[ 'Seed1', 'Seed2', 'ScoreT1', 'ScoreT2', 'Seed_diff', 'ScoreT_diff', 'result']]<feature_engineering>
learn.load('s1');
Facial Keypoints Detection
3,593,820
tourney_result_Final1.loc[lambda x:(x['Seed1'].isin([14,15,16])) &(x['Seed2'].isin([1,2,3])) ,'result' ] = 0 <load_from_csv>
learn.lr_find()
Facial Keypoints Detection
3,593,820
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv' )<feature_engineering>
learn.fit_one_cycle(30,5e-5 )
Facial Keypoints Detection
3,593,820
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df<merge>
def flp(npa): for i in range(npa.shape[0]): if(i%2==1): tmp=npa[i] npa[i]=npa[i-1] npa[i-1]=tmp return npa
Facial Keypoints Detection