kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
15,039,251
players_df = pd.read_csv(datadir/'MPlayers.csv') players_df<load_from_csv>
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'] )
Facial Keypoints Detection
15,039,251
team_coaches_df = pd.read_csv(stage1dir/'MTeamCoaches.csv') print('team_coaches_df', team_coaches_df.shape) team_coaches_df.iloc[80:85]<load_from_csv>
ckp_filepath = 'trained_models/model' model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=ckp_filepath, monitor='val_mae', mode='auto', save_best_only=True, save_weights_only=True) reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(factor=0.9, monitor='val_mae', mode='auto', cooldown=0, patience=5, verbose=1, min_lr=1e-5 )
Facial Keypoints Detection
15,039,251
conferences_df = pd.read_csv(stage1dir/'Conferences.csv') team_conferences_df = pd.read_csv(stage1dir/'MTeamConferences.csv' )<filter>
EPOCHS = 300 BATCH_SIZE = 256 history = model.fit(train_images, train_labels, validation_data=(valid_images, valid_labels), batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[model_checkpoint, reduce_lr] )
Facial Keypoints Detection
15,039,251
team_conferences_df[team_conferences_df['TeamID'] == 1102]<load_from_csv>
df_test = pd.read_csv('.. /input/facial-keypoints-detection/test.zip') df_test.head(1 )
Facial Keypoints Detection
15,039,251
conference_tourney_games_df = pd.read_csv(stage1dir/'MConferenceTourneyGames.csv') conference_tourney_games_df<load_from_csv>
test_images = np.array(df_test['Image'].str.split().tolist() , dtype='int' ).reshape(-1, 96, 96, 1 )
Facial Keypoints Detection
15,039,251
secondary_tourney_teams_df = pd.read_csv(stage1dir/'MSecondaryTourneyTeams.csv') secondary_tourney_teams_df<load_from_csv>
normalized_test_images = test_images / 255 .
Facial Keypoints Detection
15,039,251
secondary_tourney_results_df = pd.read_csv(stage1dir/'MSecondaryTourneyCompactResults.csv') secondary_tourney_results_df<load_from_csv>
model.load_weights(ckp_filepath) keypoints_predictions = model.predict(normalized_test_images, batch_size=BATCH_SIZE )
Facial Keypoints Detection
15,039,251
<load_from_csv>
idx = np.random.choice(16, 16) show_examples(test_images[idx], keypoints_predictions[idx] )
Facial Keypoints Detection
15,039,251
tourney_slots_df = pd.read_csv(stage1dir/'MNCAATourneySlots.csv') tourney_seed_round_slots_df = pd.read_csv(stage1dir/'MNCAATourneySeedRoundSlots.csv' )<filter>
lookup = pd.read_csv('.. /input/facial-keypoints-detection/IdLookupTable.csv') lookup.head()
Facial Keypoints Detection
15,039,251
<filter><EOS>
df_test_predictions = pd.DataFrame(keypoints_predictions, columns=target_cols) df_test_predictions.index += 1 lookup['Location'] = lookup.set_index(['ImageId', 'FeatureName'] ).index.map(df_test_predictions.stack() ).values timestamp = pd.to_datetime('today' ).floor('15min') lookup[['RowId', 'Location']].to_csv(f'predictions_{timestamp}.csv', index=False )
Facial Keypoints Detection
8,310,191
<SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<feature_engineering>
%matplotlib inline
Facial Keypoints Detection
8,310,191
for key, row in tournament_results2015_df.iterrows() : if row['WTeamID'] < row['LTeamID']: id_name = str(row['Season'])+ '_' + str(row['WTeamID'])+ '_' + str(row['LTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 1.0 else: id_name = str(row['Season'])+ '_' + str(row['LTeamID'])+ '_' + str(row['WTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 0.0<save_to_csv>
def load_data(dir_path, filename): df = pd.read_csv(os.path.join(dir_path, filename)) if filename not in ['IdLookupTable.csv','SampleSubmission.csv']: df['Image'] = df['Image'].apply(lambda img: np.fromstring(img, sep=' ')) return df
Facial Keypoints Detection
8,310,191
sample_submission.to_csv('submission.csv', index=False )<define_variables>
def summary(df, test=False): dataname = 'test' if test else 'train' print("Length of %s data %d.. " %(dataname, len(df))) print("Count values for each variable: ") print(df.count() , ' ') print("How many variables has missing values ? ") print(df.isnull().any().value_counts()) print() print("Pourcentage of missing values for each variable: ") summary_list =[100 - df[c].count() /len(df)*100 for c in df.columns] var_list = [var for var in df.columns] for i in range(len(summary_list)) : print("{} : {}".format(var_list[i], np.round(summary_list[i]),2)) print()
Facial Keypoints Detection
8,310,191
YEAR = 2021 STAGE = 2 NCAAM = True NCAAW = False<define_variables>
def get_mean_cols(df): df_mean_cols = df[df.columns[:-1]].mean(axis = 0, skipna = True ).reset_index() mean_cols = df_mean_cols[0].mean(axis = 0, skipna = True) return round(mean_cols )
Facial Keypoints Detection
8,310,191
if NCAAM: DATA_DIR = f'.. /input/ncaam-march-mania-2021/MDataFiles_Stage{STAGE}/' if NCAAW: DATA_DIR = f'.. /input/ncaaw-march-mania-2021/WDataFiles_Stage{STAGE}/'<import_modules>
def scale_data(df, test=False, random_state=42): img = np.vstack(df['Image'].values)/ 255. img = img.astype(np.float32) mean_cols = get_mean_cols(df) if not test: keypoints = df[df.columns[:-1]].values keypoints =(keypoints - mean_cols)/ mean_cols keypoints = keypoints.astype(np.float32) else: keypoints = None return img, keypoints, mean_cols
Facial Keypoints Detection
8,310,191
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc<load_from_csv>
def _random_indices(inputs, ratio, random_state=1234): np.random.seed(random_state); actual_batchsize = inputs.shape[0] size = int(actual_batchsize * ratio) indices = np.random.choice(actual_batchsize, size, replace=False) return indices
Facial Keypoints Detection
8,310,191
if YEAR == 2020: tourney_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') elif YEAR == 2021: tourney_result = pd.read_csv(DATA_DIR + 'MNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv(DATA_DIR + 'MNCAATourneySeeds.csv' )<drop_column>
def rotate(y, inputs, targets, rotate_ratio, angle= None, right_left = 0): if angle is None: angle = np.random.randint(10) if right_left != 0: angle = 360 - angle for i in range(inputs.shape[0]): inputs[i, :, :, 0] = sk.rotate(inputs[i, :, :, 0], angle) angle = np.radians(angle) indices = np.arange(targets.shape[0]) R = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) targets = targets.reshape(len(targets), y.shape[1] // 2, 2) targets[indices] = np.dot(targets[indices], R) targets = targets.reshape(len(targets), y.shape[1]) return inputs, targets
Facial Keypoints Detection
8,310,191
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result<merge>
def flipp(inputs, targets, flip_ratio, flip_indices= None, random_seed=123): if flip_indices is None: flip_indices = [(0, 2),(1, 3),(4, 8),(5, 9),(6, 10),(7, 11), (12, 16),(13, 17),(14, 18),(15, 19),(22, 24), (23, 25)] for i in range(inputs.shape[0]): inputs[i, :, :, :] = inputs[i, :, ::-1, :] indices = np.arange(inputs.shape[0]) targets[indices, ::2] = targets[indices, ::2] * -1 for a, b in flip_indices: targets[indices, a], targets[indices, b] = targets[indices, b], targets[indices, a] return inputs, targets
Facial Keypoints Detection
8,310,191
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result<categorify>
def shift_image(X,y,prop=0.1): X = X.reshape(-1,96,96) y = y.reshape(-1,30) for i in range(X.shape[0]): x_ = X[i] y_ = y[i] X[i],y[i] = shift_single_image(x_,y_,prop=prop) return(X,y )
Facial Keypoints Detection
8,310,191
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) tourney_result<load_from_csv>
def add_noise(inputs, noise_ratio=0.001): noisy_img = np.zeros(inputs.shape) for i in range(inputs.shape[0]): noise = np.random.randn(96,96,1) noisy_img[i] = inputs[i] + noise_ratio*noise return noisy_img
Facial Keypoints Detection
8,310,191
if YEAR == 2020: season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') elif YEAR == 2021: season_result = pd.read_csv(DATA_DIR + 'MRegularSeasonCompactResults.csv' )<concatenate>
def split_train_validation(X, y, size=0.3, random_state = 69): X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=size, random_state=random_state) print("Splitting data into train {} and validation {}".format(X_train.shape, X_validation.shape)) return X_train, X_validation, y_train, y_validation
Facial Keypoints Detection
8,310,191
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result<groupby>
train_data = load_data(dir_path, 'training.zip') test_data = load_data(dir_path, 'test.zip') idlookup = load_data(dir_path, 'IdLookupTable.csv') sample_submission = load_data(dir_path,'SampleSubmission.csv' )
Facial Keypoints Detection
8,310,191
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score<merge>
summary(test_data, test=True )
Facial Keypoints Detection
8,310,191
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result<drop_column>
train_data_copy = train_data.copy() test_data_copy = test_data.copy()
Facial Keypoints Detection
8,310,191
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True) tourney_win_result<prepare_output>
print("Missing values for the train set where simply NaNs were dropped: ") print(train_dropna.isnull().any().value_counts()) print() print("Missing values for the train set where NaNs filled using the forward technique: ") print(train_fill_nan.isnull().any().value_counts())
Facial Keypoints Detection
8,310,191
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result<feature_engineering>
X_train_dropna, y_train_dropna, mean_cols = scale_data(train_dropna) X_test_dropna, y_test_dropna, _ = scale_data(train_dropna, test=True) X_train_fill_nan, y_train_fill_nan, mean_cols = scale_data(train_fill_nan) X_test_fill_nan, y_test_fill_nan, _ = scale_data(train_fill_nan, test=True )
Facial Keypoints Detection
8,310,191
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']<concatenate>
flip_ratio = 0.8 rotate_ratio = 0.8 contrast_ratio = 1.2 random_seed = 342 angle = 9 use_flip_transf = True use_rotation_transf = False use_brightness_transf = False
Facial Keypoints Detection
8,310,191
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result<load_from_csv>
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy()
Facial Keypoints Detection
8,310,191
if YEAR == 2020: test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') elif YEAR == 2021: test_df = pd.read_csv(DATA_DIR + f'MSampleSubmissionStage{STAGE}.csv' )<feature_engineering>
if use_flip_transf: flipped_img, flipped_kepoints = flipp(aug_x_train, aug_y_train, flip_ratio, None, random_seed) print("Shape of flipped images {} and keypoints {}".format(flipped_img.shape, flipped_kepoints.shape))
Facial Keypoints Detection
8,310,191
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df<merge>
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy() use_rotation_transf = True if use_rotation_transf: rotated_img_l, rotated_keypoints_l = rotate(y_train_dropna, aug_x_train, aug_y_train, rotate_ratio, 9, 0 )
Facial Keypoints Detection
8,310,191
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df<feature_engineering>
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy() if use_rotation_transf: rotated_img_r, rotated_keypoints_r = rotate(y_train_dropna, aug_x_train, aug_y_train, rotate_ratio, 9, 1) print("Shape of rotated images {} and keypoints {}".format(rotated_img_r.shape, rotated_keypoints_r.shape))
Facial Keypoints Detection
8,310,191
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df<prepare_x_and_y>
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy() use_brightness_transf = True if use_brightness_transf: inc_brightness_images = brightness(aug_x_train) print("Shape of brightned images {} ".format(inc_brightness_images.shape))
Facial Keypoints Detection
8,310,191
X = tourney_result.drop('result', axis=1) y = tourney_result.result<init_hyperparams>
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy()
Facial Keypoints Detection
8,310,191
params_lgb = {'num_leaves': 127, 'min_data_in_leaf': 10, 'objective': 'binary', 'max_depth': -1, 'learning_rate': 0.01, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": 0 } params_xgb = {'colsample_bytree': 0.8, 'learning_rate': 0.0003, 'max_depth': 31, 'subsample': 1, 'objective':'binary:logistic', 'eval_metric':'logloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':5000, 'verbosity':0 }<split>
shifted_img, shifted_keypoints = shift_image(aug_x_train, aug_y_train, prop=0.1) shifted_img = shifted_img[:,:,:,np.newaxis] print("Shape of shifted images {} ".format(shifted_img.shape))
Facial Keypoints Detection
8,310,191
NFOLDS = 5 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_lgb = np.zeros(test_df.shape[0]) y_oof_lgb = np.zeros(X.shape[0]) for fold_n,(train_index, valid_index)in enumerate(splits): X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) y_pred_valid = clf.predict(X_valid) y_oof_lgb[valid_index] = y_pred_valid y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()<split>
aug_x_train = X_train_dropna.copy() noisy_img = add_noise(aug_x_train) print("Shape of noisy images {} ".format(noisy_img.shape))
Facial Keypoints Detection
8,310,191
NFOLDS = 10 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_xgb = np.zeros(test_df.shape[0]) y_oof_xgb = np.zeros(X.shape[0]) for fold_n,(train_index, valid_index)in enumerate(splits): X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] train_set = xgb.DMatrix(X_train, y_train) val_set = xgb.DMatrix(X_valid, y_valid) test_set = xgb.DMatrix(test_df) clf = xgb.train(params_xgb, train_set,num_boost_round=5000, evals=[(train_set, 'train'),(val_set, 'val')], early_stopping_rounds=100, verbose_eval=100) y_preds_xgb += clf.predict(test_set)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()<load_from_csv>
aug_x_train_ffill = X_train_fill_nan.copy().reshape(( -1, 96,96,1)) aug_y_train_ffill = y_train_fill_nan.copy() aug_x_train = X_train_dropna.copy().reshape(( -1, 96,96,1)) aug_y_train = y_train_dropna.copy() aug_x_train = np.concatenate(( aug_x_train, flipped_img, rotated_img_r, rotated_img_l, inc_brightness_images, shifted_img, noisy_img)) aug_y_train = np.concatenate(( aug_y_train, flipped_kepoints, rotated_keypoints_r, rotated_keypoints_l, aug_y_train, shifted_keypoints, aug_y_train)) print("Number of images in the new train dataset using data augmentation :{} {} ".format(aug_x_train.shape, aug_y_train.shape))
Facial Keypoints Detection
8,310,191
if YEAR == 2020: submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') elif YEAR == 2021: submission_df = pd.read_csv(DATA_DIR + f'MSampleSubmissionStage{STAGE}.csv') submission_df['Pred'] = 0.9*y_preds_lgb + 0.1*y_preds_xgb submission_df<save_to_csv>
x_train_dna, x_validation_dna, y_train_dna, y_validation_dna = split_train_validation(X_train_dropna, y_train_dropna) x_train_ffill, x_validation_ffill, y_train_ffill, y_validation_ffill = split_train_validation(X_train_fill_nan, y_train_fill_nan) x_train_da, x_validation_da, y_train_da, y_validation_da = split_train_validation(aug_x_train, aug_y_train, 0.1 )
Facial Keypoints Detection
8,310,191
submission_df.to_csv('submission.csv', index=False )<import_modules>
Facial Keypoints Detection
8,310,191
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV<load_from_csv>
model_06_01 = Sequential() model_06_01.add(Convolution2D(filters=64, kernel_size=(3,3), padding='same', input_shape=(96,96,1))) model_06_01.add(Activation('relu')) model_06_01.add(Dropout(0.1)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.1)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(BatchNormalization()) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.25)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.25)) model_06_01.add(Flatten()) model_06_01.add(Dense(512,activation='relu')) model_06_01.add(Dropout(0.5)) model_06_01.add(Dense(30)) model_06_01.summary() model_06_01.compile(optimizer = 'adam',loss = 'mean_squared_error', metrics=['mae', 'acc'] )
Facial Keypoints Detection
8,310,191
tourney_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv' )<drop_column>
callbacks = [ EarlyStopping(monitor='val_loss', patience=15, mode='min',restore_best_weights=True, verbose=1), ModelCheckpoint(filepath = 'best_model_06_01.hdf5', monitor='val_mae', verbose=1, save_best_only=True, mode='min') ] hist_06_01 = model_06_01.fit(x_train_da, y_train_da, epochs= 80, batch_size=128, validation_data=(x_validation_da, y_validation_da), callbacks=callbacks, verbose=1 )
Facial Keypoints Detection
8,310,191
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result<merge>
Facial Keypoints Detection
8,310,191
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result<categorify>
Facial Keypoints Detection
8,310,191
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) tourney_result<load_from_csv>
test_,_, _ = scale_data(test_data_copy, True) test_img = reshape_data(test_ )
Facial Keypoints Detection
8,310,191
season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv' )<concatenate>
best_model = load_model('best_model_06_01.hdf5') pred = best_model.predict(test_img )
Facial Keypoints Detection
8,310,191
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result<groupby>
feature_name = list(idlookup['FeatureName']) image_id = list(idlookup['ImageId']-1) row_id = list(idlookup['RowId']) feature_list = [] for feature in feature_name: feature_list.append(feature_name.index(feature)) predictions = [] for x,y in zip(image_id, feature_list): predictions.append(pred[x][y]) row_id = pd.Series(row_id, name = 'RowId') locations = pd.Series(predictions, name = 'Location') locations = locations*mean_cols +mean_cols submission_result = pd.concat([row_id,locations],axis = 1) submission_result.to_csv('best_perf_15_1600.csv',index = False )
Facial Keypoints Detection
3,331,229
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score<merge>
train = pd.read_csv('.. /input/training/training.csv' )
Facial Keypoints Detection
3,331,229
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result<drop_column>
print('size of traning data {}'.format(len(train))) print('Missing vlaue col ') print(train.isnull().any().value_counts()) train.isnull().sum().sort_values(ascending=False )
Facial Keypoints Detection
3,331,229
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True) tourney_win_result<prepare_output>
train.fillna(method='ffill',inplace=True) train.isnull().any().value_counts()
Facial Keypoints Detection
3,331,229
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result<feature_engineering>
image_list=[] for i in train['Image']: i=i.split(' ') image_list.append(i) len(image_list)
Facial Keypoints Detection
3,331,229
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']<concatenate>
image_list = np.array(object=image_list,dtype=float )
Facial Keypoints Detection
3,331,229
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result<load_from_csv>
y_train=train.drop(labels='Image',axis=1) y_train.shape
Facial Keypoints Detection
3,331,229
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )<feature_engineering>
X_train=X_train/255 X_train[1]
Facial Keypoints Detection
3,331,229
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df<merge>
model= tf.keras.models.Sequential( layers=[ tf.keras.layers.Conv2D(filters=32,kernel_size=(3,3),activation=tf.nn.relu,input_shape=(96,96,1)) , tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.Conv2D(filters=32,kernel_size=(3,3),activation=tf.nn.relu,input_shape=(96,96,1)) , tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.Conv2D(filters=64,kernel_size=(3,3),activation=tf.nn.relu,input_shape=(96,96,1)) , tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(units=526,activation='relu'), tf.keras.layers.Dense(units=526,activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(units=30,activation='relu') ] )
Facial Keypoints Detection
3,331,229
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df<feature_engineering>
model.compile(optimizer='adam', loss='mse', metrics=['acc'] )
Facial Keypoints Detection
3,331,229
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df<prepare_x_and_y>
hist=model.fit(x=X_train,y=y_train,batch_size=128,epochs=200,verbose=2,validation_split=0.2) hist
Facial Keypoints Detection
3,331,229
X = tourney_result.drop('result', axis=1) y = tourney_result.result<init_hyperparams>
from sklearn.metrics import r2_score
Facial Keypoints Detection
3,331,229
params = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': 50, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": -1, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, }<prepare_x_and_y>
y_pred =model.predict(X_train) score = r2_score(y_train,y_pred) score
Facial Keypoints Detection
3,331,229
NFOLDS = 50 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds = np.zeros(test_df.shape[0]) y_oof = np.zeros(X.shape[0]) feature_importances = pd.DataFrame() feature_importances['feature'] = columns for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) feature_importances[f'fold_{fold_n + 1}'] = clf.feature_importance() y_pred_valid = clf.predict(X_valid) y_oof[valid_index] = y_pred_valid y_preds += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()<load_from_csv>
test = pd.read_csv('.. /input/test/test.csv' )
Facial Keypoints Detection
3,331,229
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds submission_df<save_to_csv>
y=np.arange(1,501 )
Facial Keypoints Detection
3,331,229
submission_df.to_csv('submission.csv', index=False )<save_to_csv>
image_list=[] for i in test['Image']: i=i.split(' ') image_list.append(i) len(image_list )
Facial Keypoints Detection
3,331,229
feature_importances['average'] = feature_importances[[f'fold_{fold_n + 1}' for fold_n in range(folds.n_splits)]].mean(axis=1) feature_importances.to_csv('feature_importances.csv') plt.figure(figsize=(8, 8)) sns.barplot(data=feature_importances.sort_values(by='average', ascending=False ).head(50), x='average', y='feature'); plt.title('50 TOP feature importance over {} folds average'.format(folds.n_splits)) ;<import_modules>
image_list=np.array(image_list,dtype=float) images=image_list.reshape(-1,96,96,1) X_test =images/255.0
Facial Keypoints Detection
3,331,229
import os import json import numpy as np import pandas as pd import keras from keras import layers from keras.applications import DenseNet121 from keras.callbacks import Callback, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.models import Sequential from keras.utils.vis_utils import plot_model from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score<data_type_conversions>
predicted_value =model.predict(X_test )
Facial Keypoints Detection
3,331,229
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .<compute_train_metric>
pv =pd.DataFrame(data=predicted_value) img_show(image_list,pv )
Facial Keypoints Detection
3,331,229
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_pred = self.model.predict(X_val) y_pred_cat = keras.utils.to_categorical( y_pred.argmax(axis=1), num_classes=14 ) _val_f1 = f1_score(y_val, y_pred_cat, average='macro') _val_recall = recall_score(y_val, y_pred_cat, average='macro') _val_precision = precision_score(y_val, y_pred_cat, average='macro') self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(( f"val_f1: {_val_f1:.4f}" f" — val_precision: {_val_precision:.4f}" f" — val_recall: {_val_recall:.4f}")) return f1_metrics = Metrics()<choose_model_class>
pred = model.predict(X_test) lookid_data = pd.read_csv('.. /input/IdLookupTable.csv') lookid_list = list(lookid_data['FeatureName']) imageID = list(lookid_data['ImageId']-1) pre_list = list(pred) rowid = lookid_data['RowId'] rowid=list(rowid) feature = [] for f in list(lookid_data['FeatureName']): feature.append(lookid_list.index(f)) preded = [] for x,y in zip(imageID,feature): preded.append(pre_list[x][y]) rowid = pd.Series(rowid,name = 'RowId') loc = pd.Series(preded,name = 'Location') submission = pd.concat([rowid,loc],axis = 1) submission.to_csv('submision.csv',index = False )
Facial Keypoints Detection
3,331,229
<choose_model_class><EOS>
df=pd.read_csv('submision.csv' )
Facial Keypoints Detection
7,630,243
<SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<train_model>
%matplotlib inline
Facial Keypoints Detection
7,630,243
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit( x=x_train, y=y_train, batch_size=256, epochs=30, callbacks=[checkpoint, f1_metrics], validation_split=0.2 )<save_to_csv>
horizontal_flip = False rotation_augmentation = True brightness_augmentation = True shift_augmentation = True random_noise_augmentation = True include_unclean_data = True sample_image_index = 20 rotation_angles = [12] pixel_shifts = [12] NUM_EPOCHS = 80 BATCH_SIZE = 64
Facial Keypoints Detection
7,630,243
model.load_weights('model.h5') y_test = model.predict(x_test) submission_df = pd.read_csv('.. /input/iwildcam-2019-fgvc6/sample_submission.csv') submission_df['Predicted'] = y_test.argmax(axis=1) print(submission_df.shape) submission_df.head() submission_df.to_csv('submission.csv',index=False )<set_options>
print("Contents of input/facial-keypoints-detection directory: ") !ls.. /input/facial-keypoints-detection/ print(" Extracting.zip dataset files to working directory...") !unzip -u.. /input/facial-keypoints-detection/test.zip !unzip -u.. /input/facial-keypoints-detection/training.zip print(" Current working directory:") !pwd print(" Contents of working directory:") !ls
Facial Keypoints Detection
7,630,243
warnings.filterwarnings('ignore' )<load_from_csv>
%%time train_file = 'training.csv' test_file = 'test.csv' idlookup_file = '.. /input/facial-keypoints-detection/IdLookupTable.csv' train_data = pd.read_csv(train_file) test_data = pd.read_csv(test_file) idlookup_data = pd.read_csv(idlookup_file )
Facial Keypoints Detection
7,630,243
%%time train_df = pd.read_csv(os.path.join(PATH, 'train.csv')) test_df = pd.read_csv(os.path.join(PATH, 'test.csv'))<prepare_x_and_y>
print("Length of train data: {}".format(len(train_data))) print("Number of Images with missing pixel values: {}".format(len(train_data)- int(train_data.Image.apply(lambda x: len(x.split())).value_counts().values)) )
Facial Keypoints Detection
7,630,243
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy' )<define_variables>
train_data.isnull().sum()
Facial Keypoints Detection
7,630,243
classes_wild = {0: 'empty', 1: 'deer', 2: 'moose', 3: 'squirrel', 4: 'rodent', 5: 'small_mammal', \ 6: 'elk', 7: 'pronghorn_antelope', 8: 'rabbit', 9: 'bighorn_sheep', 10: 'fox', 11: 'coyote', \ 12: 'black_bear', 13: 'raccoon', 14: 'skunk', 15: 'wolf', 16: 'bobcat', 17: 'cat',\ 18: 'dog', 19: 'opossum', 20: 'bison', 21: 'mountain_goat', 22: 'mountain_lion'}<feature_engineering>
%%time def load_images(image_data): images = [] for idx, sample in image_data.iterrows() : image = np.array(sample['Image'].split(' '), dtype=int) image = np.reshape(image,(96,96,1)) images.append(image) images = np.array(images)/255. return images def load_keypoints(keypoint_data): keypoint_data = keypoint_data.drop('Image',axis = 1) keypoint_features = [] for idx, sample_keypoints in keypoint_data.iterrows() : keypoint_features.append(sample_keypoints) keypoint_features = np.array(keypoint_features, dtype = 'float') return keypoint_features clean_train_images = load_images(clean_train_data) print("Shape of clean_train_images: {}".format(np.shape(clean_train_images))) clean_train_keypoints = load_keypoints(clean_train_data) print("Shape of clean_train_keypoints: {}".format(np.shape(clean_train_keypoints))) test_images = load_images(test_data) print("Shape of test_images: {}".format(np.shape(test_images))) train_images = clean_train_images train_keypoints = clean_train_keypoints fig, axis = plt.subplots() plot_sample(clean_train_images[sample_image_index], clean_train_keypoints[sample_image_index], axis, "Sample image & keypoints") if include_unclean_data: unclean_train_images = load_images(unclean_train_data) print("Shape of unclean_train_images: {}".format(np.shape(unclean_train_images))) unclean_train_keypoints = load_keypoints(unclean_train_data) print("Shape of unclean_train_keypoints: {} ".format(np.shape(unclean_train_keypoints))) train_images = np.concatenate(( train_images, unclean_train_images)) train_keypoints = np.concatenate(( train_keypoints, unclean_train_keypoints))
Facial Keypoints Detection
7,630,243
train_df['classes_wild'] = train_df['category_id'].apply(lambda cw: classes_wild[cw] )<define_variables>
def left_right_flip(images, keypoints): flipped_keypoints = [] flipped_images = np.flip(images, axis=2) for idx, sample_keypoints in enumerate(keypoints): flipped_keypoints.append([96.-coor if idx%2==0 else coor for idx,coor in enumerate(sample_keypoints)]) return flipped_images, flipped_keypoints if horizontal_flip: flipped_train_images, flipped_train_keypoints = left_right_flip(clean_train_images, clean_train_keypoints) print("Shape of flipped_train_images: {}".format(np.shape(flipped_train_images))) print("Shape of flipped_train_keypoints: {}".format(np.shape(flipped_train_keypoints))) train_images = np.concatenate(( train_images, flipped_train_images)) train_keypoints = np.concatenate(( train_keypoints, flipped_train_keypoints)) fig, axis = plt.subplots() plot_sample(flipped_train_images[sample_image_index], flipped_train_keypoints[sample_image_index], axis, "Horizontally Flipped" )
Facial Keypoints Detection
7,630,243
train_image_files = list(os.listdir(os.path.join(PATH,'train_images'))) test_image_files = list(os.listdir(os.path.join(PATH,'test_images'))) print("Number of image files: train:{} test:{}".format(len(train_image_files), len(test_image_files)) )<train_model>
def rotate_augmentation(images, keypoints): rotated_images = [] rotated_keypoints = [] print("Augmenting for angles(in degrees): ") for angle in rotation_angles: for angle in [angle,-angle]: print(f'{angle}', end=' ') M = cv2.getRotationMatrix2D(( 48,48), angle, 1.0) angle_rad = -angle*pi/180. for image in images: rotated_image = cv2.warpAffine(image, M,(96,96), flags=cv2.INTER_CUBIC) rotated_images.append(rotated_image) for keypoint in keypoints: rotated_keypoint = keypoint - 48. for idx in range(0,len(rotated_keypoint),2): rotated_keypoint[idx] = rotated_keypoint[idx]*cos(angle_rad)-rotated_keypoint[idx+1]*sin(angle_rad) rotated_keypoint[idx+1] = rotated_keypoint[idx]*sin(angle_rad)+rotated_keypoint[idx+1]*cos(angle_rad) rotated_keypoint += 48. rotated_keypoints.append(rotated_keypoint) return np.reshape(rotated_images,(-1,96,96,1)) , rotated_keypoints if rotation_augmentation: rotated_train_images, rotated_train_keypoints = rotate_augmentation(clean_train_images, clean_train_keypoints) print(" Shape of rotated_train_images: {}".format(np.shape(rotated_train_images))) print("Shape of rotated_train_keypoints: {} ".format(np.shape(rotated_train_keypoints))) train_images = np.concatenate(( train_images, rotated_train_images)) train_keypoints = np.concatenate(( train_keypoints, rotated_train_keypoints)) fig, axis = plt.subplots() plot_sample(rotated_train_images[sample_image_index], rotated_train_keypoints[sample_image_index], axis, "Rotation Augmentation" )
Facial Keypoints Detection
7,630,243
%%time train_file_names = list(train_df['file_name']) print("Matching train image names: {}".format(len(set(train_file_names ).intersection(train_image_files))))<train_model>
def alter_brightness(images, keypoints): altered_brightness_images = [] inc_brightness_images = np.clip(images*1.2, 0.0, 1.0) dec_brightness_images = np.clip(images*0.6, 0.0, 1.0) altered_brightness_images.extend(inc_brightness_images) altered_brightness_images.extend(dec_brightness_images) return altered_brightness_images, np.concatenate(( keypoints, keypoints)) if brightness_augmentation: altered_brightness_train_images, altered_brightness_train_keypoints = alter_brightness(clean_train_images, clean_train_keypoints) print(f"Shape of altered_brightness_train_images: {np.shape(altered_brightness_train_images)}") print(f"Shape of altered_brightness_train_keypoints: {np.shape(altered_brightness_train_keypoints)}") train_images = np.concatenate(( train_images, altered_brightness_train_images)) train_keypoints = np.concatenate(( train_keypoints, altered_brightness_train_keypoints)) fig, axis = plt.subplots() plot_sample(altered_brightness_train_images[sample_image_index], altered_brightness_train_keypoints[sample_image_index], axis, "Increased Brightness") fig, axis = plt.subplots() plot_sample(altered_brightness_train_images[len(altered_brightness_train_images)//2+sample_image_index], altered_brightness_train_keypoints[len(altered_brightness_train_images)//2+sample_image_index], axis, "Decreased Brightness" )
Facial Keypoints Detection
7,630,243
%%time test_file_names = list(test_df['file_name']) print("Matching test image names: {}".format(len(set(test_file_names ).intersection(test_image_files))))<count_unique_values>
model = Sequential() model.add(Convolution2D(32,(3,3), padding='same', use_bias=False, input_shape=(96,96,1))) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(32,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(64,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(96,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(128,(3,3),padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(256,(3,3),padding='same',use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Convolution2D(512,(3,3), padding='same', use_bias=False)) model.add(LeakyReLU(alpha = 0.1)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(512,activation='relu')) model.add(Dropout(0.1)) model.add(Dense(30)) model.summary()
Facial Keypoints Detection
7,630,243
cnt_classes_images = train_df.classes_wild.nunique() print("There are {} classes of images".format(cnt_classes_images)) pd.DataFrame(train_df.classes_wild.value_counts() ).transpose()<feature_engineering>
%%time if os.path.exists('.. /input/data-augmentation-for-facial-keypoint-detection/best_model.hdf5'): model = load_model('.. /input/data-augmentation-for-facial-keypoint-detection/best_model.hdf5') checkpointer = ModelCheckpoint(filepath = 'best_model.hdf5', monitor='val_mae', verbose=1, save_best_only=True, mode='min') model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae', 'acc']) history = model.fit(train_images, train_keypoints, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, validation_split=0.05, callbacks=[checkpointer] )
Facial Keypoints Detection
7,630,243
try: train_df['date_time'] = pd.to_datetime(train_df['date_captured'], errors='coerce') train_df["year"] = train_df['date_time'].dt.year train_df["month"] = train_df['date_time'].dt.month train_df["day"] = train_df['date_time'].dt.day train_df["hour"] = train_df['date_time'].dt.hour train_df["minute"] = train_df['date_time'].dt.minute except Exception as ex: print("Exception:".format(ex))<define_variables>
%%time checkpointer = ModelCheckpoint(filepath = 'best_model.hdf5', monitor='mae', verbose=1, save_best_only=True, mode='min') model.fit(train_images, train_keypoints, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, callbacks=[checkpointer] )
Facial Keypoints Detection
7,630,243
IMAGE_PATH = os.path.join(PATH,'train_images/') draw_category_images('classes_wild' )<data_type_conversions>
%%time model = load_model('best_model.hdf5') test_preds = model.predict(test_images )
Facial Keypoints Detection
7,630,243
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .<compute_train_metric>
feature_names = list(idlookup_data['FeatureName']) image_ids = list(idlookup_data['ImageId']-1) row_ids = list(idlookup_data['RowId']) feature_list = [] for feature in feature_names: feature_list.append(feature_names.index(feature)) predictions = [] for x,y in zip(image_ids, feature_list): predictions.append(test_preds[x][y]) row_ids = pd.Series(row_ids, name = 'RowId') locations = pd.Series(predictions, name = 'Location') locations = locations.clip(0.0,96.0) submission_result = pd.concat([row_ids,locations],axis = 1) submission_result.to_csv('submission.csv',index = False )
Facial Keypoints Detection
5,157,700
train_data.isnull().sum()<count_missing_values>
batch_size = 32
empty
5,157,700
test_data.isnull().sum()<count_values>
start = time()
empty
5,157,700
train_data['IsBadBuy'].value_counts()<count_values>
def seed_everything(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()
empty
5,157,700
train_data['Model'].value_counts()<drop_column>
class DataGenerator(Dataset): def __init__(self, directory, transform=None, n_samples=np.inf): self.directory = directory self.transform = transform self.n_samples = n_samples self.samples = self._load_subfolders_images(directory) if len(self.samples)== 0: raise RuntimeError("Found 0 files in subfolders of: {}".format(directory)) def _load_subfolders_images(self, root): IMG_EXTENSIONS =( '.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') def is_valid_file(x): return torchvision.datasets.folder.has_file_allowed_extension(x, IMG_EXTENSIONS) required_transforms = torchvision.transforms.Compose([ torchvision.transforms.Resize(64), torchvision.transforms.CenterCrop(64), ]) imgs = [] paths = [] for root, _, fnames in sorted(os.walk(root)) : for fname in sorted(fnames)[:min(self.n_samples, 999999999999999)]: path = os.path.join(root, fname) paths.append(path) for path in paths: if is_valid_file(path): img = dset.folder.default_loader(path) annotation_basename = os.path.splitext(os.path.basename(path)) [0] annotation_dirname = next( dirname for dirname in os.listdir('.. /input/annotation/Annotation/')if dirname.startswith(annotation_basename.split('_')[0])) annotation_filename = os.path.join('.. /input/annotation/Annotation/', annotation_dirname, annotation_basename) tree = ET.parse(annotation_filename) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin' ).text) ymin = int(bndbox.find('ymin' ).text) xmax = int(bndbox.find('xmax' ).text) ymax = int(bndbox.find('ymax' ).text) w = np.min(( xmax - xmin, ymax - ymin)) bbox =(xmin, ymin, xmin+w, ymin+w) object_img = required_transforms(img.crop(bbox)) imgs.append(object_img) return imgs def __getitem__(self, index): sample = self.samples[index] if self.transform is not None: sample = self.transform(sample) return np.asarray(sample) def __len__(self): return len(self.samples)
empty
5,157,700
train_data.drop('Model',axis=1,inplace=True) test_data.drop("Model",axis=1,inplace=True )<count_values>
%%time database = '.. /input/all-dogs/all-dogs/' transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.3), transforms.ToTensor() , transforms.Normalize(( 0.5, 0.5, 0.5),(0.5, 0.5, 0.5)) ]) train_data = DataGenerator(database, transform=transform,n_samples=25000) train_loader = torch.utils.data.DataLoader(train_data, shuffle=True,batch_size=batch_size, num_workers = 4)
empty
5,157,700
train_data['Trim'].value_counts()<drop_column>
class PixelwiseNorm(nn.Module): def __init__(self): super(PixelwiseNorm, self ).__init__() def forward(self, x, alpha=1e-8): y = x.pow(2.).mean(dim=1, keepdim=True ).add(alpha ).sqrt() y = x / y return y
empty
5,157,700
train_data.drop('Trim',inplace=True,axis=1) test_data.drop('Trim',inplace=True,axis=1 )<count_values>
class MinibatchStdDev(th.nn.Module): def __init__(self): super(MinibatchStdDev, self ).__init__() def forward(self, x, alpha=1e-8): batch_size, _, height, width = x.shape y = x - x.mean(dim=0, keepdim=True) y = th.sqrt(y.pow(2.).mean(dim=0, keepdim=False)+ alpha) y = y.mean().view(1, 1, 1, 1) y = y.repeat(batch_size,1, height, width) y = th.cat([x, y], 1) return y
empty
5,157,700
<drop_column><EOS>
class Generator(nn.Module): def __init__(self, nz, nfeats, nchannels): super(Generator, self ).__init__() self.conv1 = spectral_norm(nn.ConvTranspose2d(nz, nfeats * 8, 4, 1, 0, bias=False)) self.conv2 = spectral_norm(nn.ConvTranspose2d(nfeats * 8, nfeats * 8, 4, 2, 1, bias=False)) self.conv3 = spectral_norm(nn.ConvTranspose2d(nfeats * 8, nfeats * 4, 4, 2, 1, bias=False)) self.conv4 = spectral_norm(nn.ConvTranspose2d(nfeats * 4, nfeats * 2, 4, 2, 1, bias=False)) self.conv5 = spectral_norm(nn.ConvTranspose2d(nfeats * 2, nfeats, 4, 2, 1, bias=False)) self.conv6 = spectral_norm(nn.ConvTranspose2d(nfeats, nchannels, 3, 1, 1, bias=False)) self.pixnorm = PixelwiseNorm() def forward(self, x): x = F.leaky_relu(self.conv1(x)) x = self.conv2(x) x = F.leaky_relu(self.pixnorm(x)) x = self.conv3(x) x = F.leaky_relu(self.pixnorm(x)) x = self.conv4(x) x = F.leaky_relu(self.pixnorm(x)) x = self.conv5(x) x = F.leaky_relu(self.pixnorm(x)) x = torch.tanh(self.conv6(x)) return x class Discriminator(nn.Module): def __init__(self, nchannels, nfeats): super(Discriminator, self ).__init__() self.conv1 = nn.Conv2d(nchannels, nfeats, 4, 2, 1, bias=False) self.conv2 = spectral_norm(nn.Conv2d(nfeats, nfeats * 2, 4, 2, 1, bias=False)) self.bn2 = nn.BatchNorm2d(nfeats * 2) self.conv3 = spectral_norm(nn.Conv2d(nfeats * 2, nfeats * 4, 4, 2, 1, bias=False)) self.bn3 = nn.BatchNorm2d(nfeats * 4) self.conv4 = spectral_norm(nn.Conv2d(nfeats * 4, nfeats * 8, 4, 2, 1, bias=False)) self.bn4 = nn.MaxPool2d(2) self.batch_discriminator = MinibatchStdDev() self.pixnorm = PixelwiseNorm() self.conv5 = spectral_norm(nn.Conv2d(nfeats * 8 +1, 1, 2, 1, 0, bias=False)) def forward(self, x): x = F.leaky_relu(self.conv1(x), 0.2) x = F.leaky_relu(self.bn2(self.conv2(x)) , 0.2) x = F.leaky_relu(self.bn3(self.conv3(x)) , 0.2) x = F.leaky_relu(self.bn4(self.conv4(x)) , 0.2) x = self.batch_discriminator(x) x = torch.sigmoid(self.conv5(x)) return x.view(-1, 1) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") lr = 0.0002 lr_d = 0.0002 beta1 = 0.5 epochs = 1600 netG = Generator(100, 32, 3 ).to(device) netD = Discriminator(3, 48 ).to(device) criterion = nn.BCELoss() optimizerD = optim.Adam(netD.parameters() , lr=lr, betas=(beta1, 0.999)) optimizerG = optim.Adam(netG.parameters() , lr=lr_d, betas=(beta1, 0.999)) lr_schedulerG = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizerG, T_0=epochs//200, eta_min=0.00005) lr_schedulerD = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizerD, T_0=epochs//200, eta_min=0.00005) nz = 100 fixed_noise = torch.randn(25, nz, 1, 1, device=device) real_label = 0.7 fake_label = 0.0 batch_size = train_loader.batch_size step = 0 for epoch in range(epochs): for ii,(real_images)in tqdm(enumerate(train_loader), total=len(train_loader)) : end = time() if(end -start)> 15000 : break netD.zero_grad() real_images = real_images.to(device) batch_size = real_images.size(0) labels = torch.full(( batch_size, 1), real_label, device=device)+ np.random.uniform(-0.1, 0.1) output = netD(real_images) errD_real = criterion(output, labels) errD_real.backward() D_x = output.mean().item() noise = torch.randn(batch_size, nz, 1, 1, device=device) fake = netG(noise) labels.fill_(fake_label)+ np.random.uniform(0, 0.2) output = netD(fake.detach()) errD_fake = criterion(output, labels) errD_fake.backward() D_G_z1 = output.mean().item() errD = errD_real + errD_fake optimizerD.step() if step % 5 != 0: netG.zero_grad() labels.fill_(real_label) output = netD(fake) errG = criterion(output, labels) errG.backward() D_G_z2 = output.mean().item() optimizerG.step() if step % 5 == 0: netG.zero_grad() labels.fill_(fake_label) output = netD(fake) errG = criterion(output, labels) errG.backward() D_G_z2 = output.mean().item() optimizerG.step() if step % 500 == 0: print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)) : %.4f / %.4f' %(epoch + 1, epochs, ii, len(train_loader), errD.item() , errG.item() , D_x, D_G_z1, D_G_z2)) valid_image = netG(fixed_noise) step += 1 lr_schedulerG.step(epoch) lr_schedulerD.step(epoch) if epoch % 200 == 0: show_generated_img() def truncated_normal(size, threshold=1): values = truncnorm.rvs(-threshold, threshold, size=size) return values if not os.path.exists('.. /output_images'): os.mkdir('.. /output_images') im_batch_size = 100 n_images=10000 for i_batch in range(0, n_images, im_batch_size): z = truncated_normal(( im_batch_size, 100, 1, 1), threshold=1) gen_z = torch.from_numpy(z ).float().to(device) gen_images = netG(gen_z) images = gen_images.to("cpu" ).clone().detach() images = images.numpy().transpose(0, 2, 3, 1) for i_image in range(gen_images.size(0)) : save_image(( gen_images[i_image, :, :, :] +1.0)/2.0, os.path.join('.. /output_images', f'image_{i_batch+i_image:05d}.png')) shutil.make_archive('images', 'zip', '.. /output_images' )
empty
10,493,342
<SOS> metric: auc Kaggle data source: summer-analytics-2020-capstone-project<count_values>
%matplotlib inline plt.style.use('seaborn-whitegrid') warnings.filterwarnings('ignore' )
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Color'].value_counts()<count_values>
df=pd.read_csv('.. /input/summeranalytics2020/train.csv') df.shape
Summer Analytics 2020 Capstone Project
10,493,342
test_data['Color'].value_counts()<data_type_conversions>
pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) df.head()
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Color'].fillna(value='Color_Unknown',inplace=True) test_data['Color'].fillna(value='Color_Unknown',inplace=True )<count_missing_values>
test_df=pd.read_csv('.. /input/summeranalytics2020/test.csv') print(test_df.shape )
Summer Analytics 2020 Capstone Project
10,493,342
print("Number of null values in Color column "+str(train_data['Color'].isnull().sum())) print("Number of null values in Color column "+str(test_data['Color'].isnull().sum()))<count_values>
pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) test_df.head()
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Transmission'].value_counts()<count_values>
df_combine=pd.concat([df,test_df],axis=0, sort = False,ignore_index = True) df_combine.shape
Summer Analytics 2020 Capstone Project
10,493,342
test_data['Transmission'].value_counts()<filter>
ordinal_catg_col = ["Education", "EnvironmentSatisfaction", "JobInvolvement","JobSatisfaction", "PerformanceRating","StockOptionLevel","CommunicationSkill", "Behaviour" ] print(ordinal_catg_col )
Summer Analytics 2020 Capstone Project
10,493,342
train_data[train_data['Transmission']=='Manual']<rename_columns>
df_BusinessTravel_one_hot = pd.get_dummies(df_combine['BusinessTravel'], prefix='BusinessTravel') df_Department_one_hot = pd.get_dummies(df_combine['Department'], prefix='Department') df_EducationField_one_hot = pd.get_dummies(df_combine['EducationField'], prefix='EducationField') df_Gender_one_hot = pd.get_dummies(df_combine['Gender'], prefix='Gender',drop_first=True) df_JobRole_one_hot = pd.get_dummies(df_combine['JobRole'], prefix='JobRole') df_MaritalStatus_one_hot = pd.get_dummies(df_combine['MaritalStatus'], prefix='MaritalStatus') df_OverTime_one_hot = pd.get_dummies(df_combine['OverTime'], prefix='OverTime',drop_first=True )
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Transmission'].replace("Manual","MANUAL",inplace=True )<count_values>
df_nominal_catg = pd.concat([df_BusinessTravel_one_hot, df_Department_one_hot, df_EducationField_one_hot, df_Gender_one_hot, df_JobRole_one_hot, df_MaritalStatus_one_hot, df_OverTime_one_hot], axis=1) print(df_nominal_catg.shape) df_nominal_catg.head()
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Transmission'].value_counts()<data_type_conversions>
final_catg_col = nominal_catg_col + ordinal_catg_col + ["Attrition"] df_numeric = df_combine.drop(final_catg_col , axis=1) df_numeric.shape
Summer Analytics 2020 Capstone Project
10,493,342
train_data['Transmission'].fillna(value="Transmission_unk",inplace=True) test_data['Transmission'].fillna(value="Transmission_unk",inplace=True )<count_values>
numeric_col = list(df_numeric.columns) scaler = MinMaxScaler(feature_range=(0, 1)) df_numeric = scaler.fit_transform(df_numeric) df_numeric = pd.DataFrame(df_numeric, columns= numeric_col )
Summer Analytics 2020 Capstone Project
10,493,342
train_data['WheelTypeID'].value_counts()<drop_column>
df_pre_process = pd.concat([df_numeric, df_ordinal_catg.reset_index(drop=True), df_nominal_catg.reset_index(drop=True)],axis=1,sort = False) print(df_pre_process.shape) df_pre_process.head()
Summer Analytics 2020 Capstone Project