kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,310,908
seeds = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneySeeds.csv') seeds = pd.concat( [seeds.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ]) seeds = seeds.set_index(['Season', 'TeamID'] ).sort_index() seeds['Region'] = seeds['Seed'].str[0] seeds['Number'] = seeds['Seed'].str[1:3].astype(int) del seeds['Seed']<load_from_csv>
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
regular = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MRegularSeasonDetailedResults.csv') regular = regular.drop(columns=['DayNum', 'LTeamID']) regular = pd.concat( [regular.query('Season == @season and WTeamID in @teams')for season, teams in tourney_teams.items() ]) regular = regular.groupby(['Season', 'WTeamID'] ).sum() regular = regular.rename_axis(index=['Season', 'TeamID'] )<concatenate>
X = train_df.drop('result', axis=1) y = train_df.result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
ctcsr = pd.concat([coaches, teams, conferences, seeds, regular], axis=1 )<load_from_csv>
params_lgb = {'num_leaves': lgb_num_leaves_max, 'min_data_in_leaf': lgb_in_leaf, 'objective': 'binary', 'max_depth': -1, 'learning_rate': lgb_lr, "boosting_type": "gbdt", "bagging_seed": lgb_bagging, "metric": 'logloss', "verbosity": -1, 'random_state': 42, }
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
result = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneyCompactResults.csv') result = result[result['Season'] >= 2015].set_index(['Season', 'WTeamID', 'LTeamID'] )<concatenate>
NFOLDS = 5 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_lgb = np.zeros(test_df.shape[0]) y_train_lgb = np.zeros(X.shape[0]) y_oof = np.zeros(X.shape[0]) feature_importances = pd.DataFrame() feature_importances['feature'] = columns for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) feature_importances[f'fold_{fold_n + 1}'] = clf.feature_importance() y_pred_valid = clf.predict(X_valid) y_oof[valid_index] = y_pred_valid y_train_lgb += clf.predict(X)/ NFOLDS y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
merged_teams = pd.concat( [ctcsr.loc[[(season, wteam),(season, lteam)], :] for season, wteam, lteam, in result.index]) team1 = merged_teams.iloc[::2, :].reset_index('TeamID') team2 = merged_teams.iloc[1::2, :].reset_index('TeamID') merged_teams = pd.concat([ pd.concat([team1.add_suffix('1'), team2.add_suffix('2')], axis=1 ).assign(Res=1), pd.concat([team2.add_suffix('1'), team1.add_suffix('2')], axis=1 ).assign(Res=0), ] ).reset_index().set_index(['Season', 'TeamID1', 'TeamID2'] ).sort_index()<normalization>
params_xgb = {'max_depth': xgb_max_depth, 'objective': 'binary:logistic', 'min_child_weight': xgb_min_child_weight, 'learning_rate': xgb_lr, 'eta' : 0.3, 'subsample': 0.8, 'eval_metric': 'logloss', 'colsample_bylevel': 1 }
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
x_columns = merged_teams.columns[merged_teams.columns != 'Res'] X = merged_teams[x_columns] columns_number = X.select_dtypes(include='number' ).columns X.loc[:, columns_number] = MinMaxScaler().fit_transform(X[columns_number]) X = pd.get_dummies(X, columns=x_columns[X.dtypes == 'object']) X<prepare_x_and_y>
NFOLDS = 5 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_xgb = np.zeros(test_df.shape[0]) y_train_xgb = np.zeros(X.shape[0]) y_oof_xgb = np.zeros(X.shape[0]) train_df_set = xgb.DMatrix(X) test_set = xgb.DMatrix(test_df) for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] train_set = xgb.DMatrix(X_train, y_train) val_set = xgb.DMatrix(X_valid, y_valid) clf = xgb.train(params_xgb, train_set, num_boost_round=xgb_num_boost_round_max, evals=[(train_set, 'train'),(val_set, 'val')], verbose_eval=100) y_train_xgb += clf.predict(train_df_set)/ NFOLDS y_preds_xgb += clf.predict(test_set)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
y = merged_teams['Res'] y<choose_model_class>
%%time scaler = StandardScaler() train_log = pd.DataFrame( scaler.fit_transform(X), columns=X.columns, index=X.index ) test_log = pd.DataFrame( scaler.transform(test_df), columns=test_df.columns, index=test_df.index )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
clfs = {} clfs['SVC'] = { 'instance': SVC(probability=True), 'params': [ {'kernel': ['linear'], 'C': [0.01, 0.05, 0.1, 0.5, 1]}, {'kernel': ['rbf'], 'C': [1, 10, 50, 100, 250], 'gamma': [0.1, 0.2, 0.3]} ] } clfs['RandomForestClassifier'] = { 'instance': RandomForestClassifier(n_jobs=-1), 'params': { 'n_estimators': [25, 50, 100], 'criterion': ['gini', 'entropy'], 'max_depth': [10, 25, 50, None] } } clfs['LogisticRegression'] = { 'instance': LogisticRegression(max_iter=200, n_jobs=-1), 'params': [ {'penalty': ['l2'], 'C': [0.1, 0.5, 1, 5, 10]}, {'penalty': ['l1'], 'solver': ['liblinear', 'saga'], 'C': [0.1, 0.5, 1, 5, 10]}, {'penalty': ['elasticnet'], 'C': [0.1, 0.5, 1, 5, 10], 'l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9]} ] }<train_on_grid>
logreg = LogisticRegression() logreg.fit(train_log, y) coeff_logreg = pd.DataFrame(train_log.columns.delete(0)) coeff_logreg.columns = ['feature'] coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0]) coeff_logreg.sort_values(by='score_logreg', ascending=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
for clf_name, clf in clfs.items() : print('<{}>'.format(clf_name)) print(' training...') gs = GridSearchCV(clf['instance'], param_grid=clf['params'], cv=5, n_jobs=-1) gs.fit(X, y) print(' best_score: {:.3f}'.format(gs.best_score_)) print(' best_params: {}'.format(gs.best_params_)) clfs[clf_name]['best_params'] = gs.best_params_<train_model>
eli5.show_weights(logreg )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
for clf_name, clf in clfs.items() : clf['best_estimator'] = clf['instance'].set_params(**clf['best_params'] ).fit(X, y )<find_best_params>
y_logreg_train = logreg.predict(train_log) y_logreg_pred = logreg.predict(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
[clf['best_estimator'] for clf in clfs.values() ]<train_on_grid>
def plot_cm(y_true, y_pred, title, figsize=(7,6)) : y_pred = y_pred.round().astype(int) cm = confusion_matrix(y_true, y_pred, labels=np.unique(y_true)) cm_sum = np.sum(cm, axis=1, keepdims=True) cm_perc = cm / cm_sum.astype(float)* 100 annot = np.empty_like(cm ).astype(str) nrows, ncols = cm.shape for i in range(nrows): for j in range(ncols): c = cm[i, j] p = cm_perc[i, j] if i == j: s = cm_sum[i] annot[i, j] = '%.1f%% %d/%d' %(p, c, s) elif c == 0: annot[i, j] = '' else: annot[i, j] = '%.1f%% %d' %(p, c) cm = pd.DataFrame(cm, index=np.unique(y_true), columns=np.unique(y_true)) cm.index.name = 'Actual' cm.columns.name = 'Predicted' fig, ax = plt.subplots(figsize=figsize) plt.title(title) sns.heatmap(cm, cmap= "YlGnBu", annot=annot, fmt='', ax=ax )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
vote = VotingClassifier( estimators=[(clf_name, clf['best_estimator'])for clf_name, clf in clfs.items() ], voting='soft', n_jobs=-1 ) vote.fit(X, y) clfs['Vote'] = {} clfs['Vote']['best_estimator'] = vote<compute_test_metric>
y_preds = w_lgb*y_preds_lgb + w_xgb*y_preds_xgb + w_logreg*y_logreg_pred
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
for clf_name, clf in clfs.items() : score = accuracy_score(y, clf['best_estimator'].predict(X)) print(clf_name, score )<predict_on_test>
sub['Pred'] = y_preds sub.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
predict = pd.DataFrame( { **{'pp_' + clf_name: clf['best_estimator'].predict_proba(X)[:, 1] for clf_name, clf in clfs.items() }, **{'p_' + clf_name: clf['best_estimator'].predict(X)for clf_name, clf in clfs.items() } }, index=X.index) predict<feature_engineering>
sub.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,310,908
for column in predict.columns: sub[column] = 0.5 mask = [idx for idx in sub.index if idx in X.index] sub.loc[mask, predict.columns] = predict.loc[mask, predict.columns] sub<save_to_csv>
sub.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
for column in predict.columns: sub[['ID', column]].rename(columns={column: 'pred'} ).to_csv('{}.csv'.format(column), index=False )<import_modules>
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV from sklearn import preprocessing import lightgbm as lgb import optuna import glob
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
import os from pathlib import Path import numpy as np import pandas as pd from sklearn.model_selection import cross_validate, GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score<define_variables>
path = '.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/' Files = 'WDataFiles_Stage1/'
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
FILEDIR = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament' )<load_from_csv>
TourneyCompactResults = pd.read_csv(path + Files + 'WNCAATourneyCompactResults.csv') GameCities = pd.read_csv(path+Files+'WGameCities.csv') Seasons = pd.read_csv(path+Files+'WSeasons.csv') TourneySeeds = pd.read_csv(path+Files+'WNCAATourneySeeds.csv') RegularSeasonCompactResults = pd.read_csv(path+Files+'WRegularSeasonCompactResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
sub = pd.read_csv(FILEDIR / 'MSampleSubmissionStage1_2020.csv') id_splited = sub['ID'].str.split('_', expand=True ).astype(int ).rename(columns={0: 'Season', 1: 'Team1', 2: 'Team2'}) sub = pd.concat([sub, id_splited], axis=1 ).set_index(['Season', 'Team1', 'Team2'] ).sort_index()<count_duplicates>
test= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
tourney_teams = {} tourney_teams_all = set() for season in sub.index.get_level_values('Season' ).drop_duplicates() : tourney_teams[season] = set() tourney_teams[season].update(sub.loc[season].index.get_level_values('Team1')) tourney_teams[season].update(sub.loc[season].index.get_level_values('Team2')) tourney_teams_all.update(tourney_teams[season]) {k: len(v)for k, v in tourney_teams.items() }<load_from_csv>
TourneySeeds['Seed'] = TourneySeeds['Seed'].apply(lambda x: int(x[1:3])) print(TourneySeeds.shape) TourneySeeds.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
conferences = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamConferences.csv') conferences = pd.concat( [conferences.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ]) conferences = conferences.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv>
train = train.replace({'H':0,'A':1,'N':2}) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
coaches = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamCoaches.csv') coaches = pd.concat( [coaches.query('Season == @season and TeamID in @team')for season, team in tourney_teams.items() ]) coaches = coaches[coaches['LastDayNum'] == 154].set_index(['Season', 'TeamID'] ).sort_index() [['CoachName']]<load_from_csv>
le = preprocessing.LabelEncoder() for column in ['CRType']: le.fit(GameCities[column]) GameCities[column] = le.transform(GameCities[column]) GameCities.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
teams = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeams.csv', usecols=['TeamID', 'FirstD1Season']) teams['FirstD1Season'] = 2020 - teams['FirstD1Season'] teams = pd.concat( [teams.query('TeamID in @team' ).assign(Season=season)for season, team in tourney_teams.items() ]) teams = teams.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv>
le = preprocessing.LabelEncoder() for column in ['RegionW','RegionX','RegionY','RegionZ']: le.fit(Seasons[column]) Seasons[column] = le.transform(Seasons[column]) for i in range(0,23): print(Seasons['DayZero'][i].split('/')) Seasons['ZeroMonth'] = Seasons['DayZero'][i].split('/')[0] Seasons['ZeroDay'] = Seasons['DayZero'][i].split('/')[1] Seasons['ZeroYear'] = Seasons['DayZero'][i].split('/')[2] Seasons = Seasons.drop('DayZero',axis=1) Seasons['ZeroMonth'] = Seasons['ZeroMonth'].astype(int) Seasons['ZeroDay'] = Seasons['ZeroMonth'].astype(int) Seasons['ZeroYear'] = Seasons['ZeroMonth'].astype(int) Seasons.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
seeds = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneySeeds.csv') seeds = pd.concat( [seeds.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ]) seeds = seeds.set_index(['Season', 'TeamID'] ).sort_index() seeds['Region'] = seeds['Seed'].str[0] seeds['Number'] = seeds['Seed'].str[1:3].astype(int) del seeds['Seed']<load_from_csv>
train = train.merge(Seasons, how='left',on=['Season']) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
regular = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MRegularSeasonDetailedResults.csv') regular = regular.drop(columns=['DayNum', 'LTeamID']) regular = pd.concat( [regular.query('Season == @season and WTeamID in @teams')for season, teams in tourney_teams.items() ]) regular = regular.groupby(['Season', 'WTeamID'] ).sum() regular = regular.rename_axis(index=['Season', 'TeamID'] )<concatenate>
train = train.merge(TourneySeeds, how='left', left_on=['Season', 'WTeamID'], right_on=['Season','TeamID']) train = train.drop('TeamID',axis=1) train = train.rename(columns={'Seed': 'WSeed'}) train = train.merge(TourneySeeds, how='left', left_on=['Season', 'LTeamID'], right_on=['Season','TeamID']) train = train.drop('TeamID',axis=1) train = train.rename(columns={'Seed': 'LSeed'}) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
ctcsr = pd.concat([coaches, teams, conferences, seeds, regular], axis=1 )<load_from_csv>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
result = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneyCompactResults.csv') result = result[result['Season'] >= 2015].set_index(['Season', 'WTeamID', 'LTeamID'] )<concatenate>
test = test.merge(TourneyCompactResults,how='left',on=['Season','WTeamID','LTeamID'] )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
merged_teams = pd.concat( [ctcsr.loc[[(season, wteam),(season, lteam)], :] for season, wteam, lteam, in result.index]) team1 = merged_teams.iloc[::2, :].reset_index('TeamID') team2 = merged_teams.iloc[1::2, :].reset_index('TeamID') merged_teams = pd.concat([ pd.concat([team1.add_suffix('1'), team2.add_suffix('2')], axis=1 ).assign(Res=1), pd.concat([team2.add_suffix('1'), team1.add_suffix('2')], axis=1 ).assign(Res=0), ] ).reset_index().set_index(['Season', 'TeamID1', 'TeamID2'] ).sort_index()<categorify>
test = test.replace({'H':0,'A':1,'N':2}) test = test.merge(Seasons, how='left',on=['Season']) test = test.merge(TourneySeeds, how='left', left_on=['Season', 'WTeamID'], right_on=['Season','TeamID']) test = test.drop('TeamID',axis=1) test = test.rename(columns={'Seed': 'WSeed'}) test = test.merge(TourneySeeds, how='left', left_on=['Season', 'LTeamID'], right_on=['Season','TeamID']) test = test.drop('TeamID',axis=1) test = test.rename(columns={'Seed': 'LSeed'})
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
x_columns = merged_teams.columns[merged_teams.columns != 'Res'] X = merged_teams[x_columns] columns_number = X.select_dtypes(include='number' ).columns X = pd.get_dummies(X, columns=x_columns[X.dtypes == 'object']) X<prepare_x_and_y>
test.merge(test,how='left',on=['ID','Season','WTeamID','LTeamID'] )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
y = merged_teams['Res'] y<train_on_grid>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
params = { 'n_estimators': [25, 50, 100], 'criterion': ['gini', 'entropy'], 'max_depth': [10, 25, 50, None] } gs = GridSearchCV(RandomForestClassifier(n_jobs=-1, random_state=0), param_grid=params, cv=8, n_jobs=-1) gs.fit(X, y )<train_model>
team_win_score = RegularSeasonCompactResults.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = RegularSeasonCompactResults.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] print(team_win_score.shape) team_win_score.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
clf = RandomForestClassifier(n_jobs=-1, random_state=0, **gs.best_params_) clf.fit(X, y) accuracy_score(y, clf.predict(X))<predict_on_test>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
predict = pd.DataFrame(clf.predict_proba(X), index=X.index, columns=clf.classes_) predict<save_to_csv>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
sub.to_csv('RandomForest.csv', index=False )<set_options>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test) test.shape
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
%reload_ext autoreload %autoreload 2 %matplotlib inline<import_modules>
train_win = train.copy() train_los = train.copy() train_win = train_win[['WSeed', 'LSeed', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['LSeed', 'WSeed', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'WSeed', 'LSeed', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
from fastai import * from fastai.vision import * import pandas as pd<set_options>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test) test.shape
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
print('Make sure cuda is installed:', torch.cuda.is_available()) print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled )<define_variables>
train_win["result"] = 1 print(train_win.shape) train_win.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
path = Path('.. /input' )<load_from_csv>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
train_df = pd.read_csv(path/'train.csv') train_df = pd.concat([train_df['id'],train_df['category_id']],axis=1,keys=['id','category_id']) train_df.head()<load_from_csv>
test = test.drop(['ID'],axis=1) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
test_df = pd.read_csv(path/'test.csv') test_df = pd.DataFrame(test_df['id']) test_df['predicted'] = 0 test_df.head()<define_variables>
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
data.show_batch()<choose_model_class>
y_train=data['result'] X_train=data.drop(columns='result' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
learn = cnn_learner(data, base_arch=models.densenet121, metrics=[FBeta() ,accuracy], wd=1e-5 ).mixup()<train_model>
params_lgb = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': 50, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": -1, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, } params_xgb = {'colsample_bytree': 0.8, 'learning_rate': 0.0004, 'max_depth': 31, 'subsample': 1, 'objective':'binary:logistic', 'eval_metric':'logloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':5000 }
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
lr = 2e-2 learn.fit_one_cycle(2, slice(lr))<save_model>
NFOLDS = 10 folds = KFold(n_splits=NFOLDS) columns = X_train.columns splits = folds.split(X_train, y_train )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
learn.save('stage-1-sz32' )<train_model>
y_preds_lgb = np.zeros(test.shape[0]) y_oof_lgb = np.zeros(X_train.shape[0])
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
lr = 1e-3 learn.fit_one_cycle(4, slice(lr/100, lr))<save_model>
for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train1, X_valid1 = X_train[columns].iloc[train_index], X_train[columns].iloc[valid_index] y_train1, y_valid1 = y_train.iloc[train_index], y_train.iloc[valid_index] dtrain = lgb.Dataset(X_train1, label=y_train1) dvalid = lgb.Dataset(X_valid1, label=y_valid1) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) y_pred_valid = clf.predict(X_valid1) y_oof_lgb[valid_index] = y_pred_valid y_preds_lgb += clf.predict(test)/ NFOLDS
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
learn.save('stage-2-sz32' )<find_best_params>
submission_df = pd.read_csv(path + 'WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds_lgb submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs )<feature_engineering>
test= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv') test.shape
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,315,903
test_preds = learn.TTA(DatasetType.Test) test_df['predicted'] = test_preds[0].argmax(dim=1 )<save_to_csv>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
test_df.to_csv('submission.csv', index=False )<load_from_csv>
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
print(check_output(["cp", ".. /input/sampleSubmission.csv", "sub.csv"] ).decode("utf8"))<load_from_csv>
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data = pd.read_csv('/kaggle/input/DontGetKicked/training.csv') train_data.head()<load_from_csv>
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
test_data = pd.read_csv('/kaggle/input/DontGetKicked/test.csv') test_data.head()<count_missing_values>
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data.isnull().sum()<count_values>
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['IsBadBuy'].value_counts()<count_values>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Model'].value_counts()<drop_column>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data.drop('Model',axis = 1,inplace = True) test_data.drop('Model',axis = 1,inplace = True) <count_values>
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Trim'].value_counts()<drop_column>
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data.drop('Trim',axis = 1,inplace = True) test_data.drop('Trim',axis = 1,inplace = True )<count_values>
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['SubModel'].value_counts()<drop_column>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data.drop('SubModel',axis = 1,inplace = True) test_data.drop('SubModel',axis = 1,inplace = True )<count_values>
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Color'].value_counts()<data_type_conversions>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Color'].fillna(value = 'Color_Unkown',inplace=True) test_data['Color'].fillna(value= 'Color_Unknown',inplace =True )<count_values>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Transmission'].value_counts()<rename_columns>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Transmission'].replace('Manual','MANUAL',inplace=True )<count_values>
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data["Transmission"].value_counts()<count_values>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['WheelTypeID'].value_counts()<drop_column>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data.drop('WheelTypeID',axis = 1,inplace = True) test_data.drop('WheelTypeID',axis = 1,inplace = True )<count_values>
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['WheelType'].value_counts()<data_type_conversions>
target = 'result' features = data.columns.values.tolist() features.remove(target )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['WheelType'].fillna(value = 'WheelType_unk',inplace=True) test_data['WheelType'].fillna(value= 'WheelType_unk',inplace =True )<count_values>
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
train_data['Nationality'].value_counts()<data_type_conversions>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = lgbm.y_pred submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,082,740
<count_values><EOS>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-womens-tournament<data_type_conversions>
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['Size'].fillna(value = 'Size_unk',inplace=True) test_data['Size'].fillna(value= 'Size_unk',inplace =True )<count_values>
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['TopThreeAmericanName'].value_counts()<data_type_conversions>
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['TopThreeAmericanName'].fillna(value = 'TopThreeAmericanName_unk',inplace=True) test_data['TopThreeAmericanName'].fillna(value= 'TopThreeAmericanName_unk',inplace =True )<count_values>
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['PRIMEUNIT'].value_counts()<data_type_conversions>
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['PRIMEUNIT'].fillna(value = 'PRIMEUNIT_unk',inplace=True) test_data['PRIMEUNIT'].fillna(value= 'PRIMEUNIT_unk',inplace =True )<count_values>
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 128, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 128, 'epochs': 80 } return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['AUCGUART'].value_counts()<data_type_conversions>
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data['AUCGUART'].fillna(value = 'AUCGUART_unk',inplace=True) test_data['AUCGUART'].fillna(value= 'AUCGUART_unk',inplace =True )<drop_column>
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice','MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice','MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice','MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice' ],axis = 1,inplace = True) test_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice','MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice','MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice','MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice' ],axis = 1,inplace = True )<drop_column>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
train_data.drop('PurchDate',axis = 1,inplace = True) test_data.drop('PurchDate',axis = 1,inplace = True )<drop_column>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
non_categorical = train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object']<feature_engineering>
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
for i in non_categorical: maximum = np.max(train_data[i]) train_data[i]/=maximum maximum_test = np.max(test_data[i]) test_data[i]/=maximum_test<drop_column>
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
categorical = train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes=='object']<categorify>
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
pd.get_dummies(train_data[categorical[0]] )<categorify>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
for i in categorical: dummies = pd.get_dummies(train_data[i]) dummies.columns=str(i)+'_'+dummies.columns train_data = pd.concat([train_data,dummies],axis=1) train_data.drop(i,inplace=True,axis=1) dummies = pd.get_dummies(test_data[i]) dummies.columns=str(i)+'_'+dummies.columns test_data = pd.concat([test_data,dummies],axis=1) test_data.drop(i,inplace=True,axis=1 )<feature_engineering>
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
for i in train_data.drop('IsBadBuy',axis=1 ).columns: if i not in test_data.columns: test_data[i]=np.zeros(len(test_data))<feature_engineering>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
for i in test_data.columns: if i not in train_data.columns: train_data[i]=np.zeros(len(train_data))<drop_column>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
test_data = test_data[train_data.drop('IsBadBuy',axis=1 ).columns]<prepare_x_and_y>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
X = train_data.drop(['RefId','IsBadBuy'],axis=1) y = train_data['IsBadBuy']<split>
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=42 )<import_modules>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
from sklearn.neighbors import KNeighborsClassifier<import_modules>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
from sklearn.neighbors import KNeighborsClassifier<import_modules>
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
from sklearn.neighbors import KNeighborsClassifier<train_model>
target = 'result' features = data.columns.values.tolist() features.remove(target )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
KNN = KNeighborsClassifier(n_neighbors=11) KNN.fit(X_train,y_train )<compute_test_metric>
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
KNN.score(X_test,y_test )<predict_on_test>
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
predict = KNN.predict(test_data.drop('RefId',axis=1))<prepare_output>
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
Submissions = pd.DataFrame(predict,columns=['IsBadBuy']) Submissions.head()<prepare_output>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,079,138
<save_to_csv><EOS>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-womens-tournament<load_from_csv>
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )
Google Cloud & NCAA® ML Competition 2020-NCAAW