kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,985,375
dtest = xgb.DMatrix(test_x) my_submission = pd.DataFrame() my_submission["Id"] = test_ID my_submission["SalePrice"] = np.exp(bst.predict(dtest)) my_submission.to_csv('submission.csv', index=False )<load_from_csv>
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv" )<feature_engineering>
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
frames = [train_data,test_data] full_data = pd.concat(frames) master_data = full_data print(train_data.shape) print(test_data.shape) print(full_data.shape) full_data = full_data.drop(['PassengerId','Survived','Ticket','Cabin'],axis = 1) print(full_data.shape) full_data['Name'] = full_data['Name'].str.rsplit(',' ).str[-1] full_data['Name'] = full_data['Name'].str.split('.' ).str[0] full_data['Name'] = full_data['Name'].replace(['Lady','the Countess','Capt', 'Col', 'Dona','Don', 'Dr', 'Major','','Rev', 'Sir', 'Jonkheer'], 'Rare',regex=True) full_data['Name'] = full_data['Name'].replace('Mlle', 'Miss', regex=True) full_data['Name'] = full_data['Name'].replace('Ms', 'Miss',regex=True) full_data['Name'] = full_data['Name'].replace('Mme', 'Mrs',regex=True) pd.crosstab(full_data['Name'], full_data['Sex']) full_data.dropna(subset = ["Age"], inplace=True) print(np.where(pd.isnull(full_data))) full_data.dropna(how='any', inplace=True) print(np.where(pd.isnull(full_data))) y = full_data['Age'] X = full_data.drop(['Age'],axis = 1) X_train ,X_test ,y_train ,y_test = train_test_split(X ,y ,test_size=0.2,random_state = 14) numerical_features = ['SibSp','Parch','Fare'] categorical_features = ['Sex','Embarked','Pclass','Name'] numerical_pipeline = make_pipeline(MinMaxScaler()) categorical_pipeline = make_pipeline(OneHotEncoder()) preprocessor = make_column_transformer(( numerical_pipeline, numerical_features),(categorical_pipeline, categorical_features)) model = make_pipeline(preprocessor, GradientBoostingRegressor()) model.fit(X_train, y_train) params = {'gradientboostingregressor__n_estimators' : [2000] , 'gradientboostingregressor__learning_rate' : [0.01]} grid = GridSearchCV(model, param_grid= params, cv=5) grid.fit(X_train, y_train) print(grid.best_params_) print('train score :' , grid.best_score_) best_model = grid.best_estimator_ print('test score :', best_model.score(X_test,y_test)) train_data.dropna(subset = ["Fare","Embarked"], inplace=True) train_data = train_data.drop(['Ticket','Cabin'], axis = 1) print(train_data.shape) train_data['Name'] = train_data['Name'].str.rsplit(',' ).str[-1] train_data['Name'] = train_data['Name'].str.split('.' ).str[0] train_data['Name'] = train_data['Name'].replace(['Lady','the Countess','Capt','Col','Dona','Don', 'Dr','Major','','Rev','Sir','Jonkheer'], 'Rare',regex=True) train_data['Name'] = train_data['Name'].replace('Mlle', 'Miss', regex=True) train_data['Name'] = train_data['Name'].replace('Ms', 'Miss',regex=True) train_data['Name'] = train_data['Name'].replace('Mme', 'Mrs',regex=True) train_data train_data['age_predicted'] = best_model.predict(train_data) train_data['Age'].fillna(train_data['age_predicted'], inplace=True) train_data = train_data.drop(['age_predicted'],axis = 1) train_data train_data['family_size'] = train_data['SibSp'] + train_data['Parch']+1 train_data split = [0, 3, 14, 24, 34, 44, 54, 64] names = ['0', '1', '2', '3', '4','5','6','7'] d = dict(enumerate(names, 1)) train_data['Age'] = np.vectorize(d.get )(np.digitize(train_data['Age'], split)) train_data master_y = train_data['Survived'] master_X = train_data.drop(['Survived'], axis = 1) print('master_y',master_y.shape) print('master_X',master_X.shape) master_X_train ,master_X_test ,master_y_train ,master_y_test = train_test_split(master_X ,master_y ,test_size=0.2,random_state = 14) master_numerical_features = ['SibSp','Parch','Fare'] master_categorical_features = ['Sex','Embarked','Pclass','Name','family_size'] master_numerical_pipeline = make_pipeline(MinMaxScaler()) master_categorical_pipeline = make_pipeline(OneHotEncoder()) master_preprocessor = make_column_transformer(( master_numerical_pipeline, master_numerical_features),(master_categorical_pipeline, master_categorical_features)) master_model = make_pipeline(master_preprocessor, GradientBoostingClassifier()) master_model.fit(master_X_train, master_y_train) master_model.fit(master_X_test, master_y_test) master_params = {'gradientboostingclassifier__n_estimators' : [1000] , 'gradientboostingclassifier__learning_rate' : [0.01]} master_grid = GridSearchCV(master_model, param_grid= master_params, cv=5) master_grid.fit(master_X_train, master_y_train) print(master_grid.best_params_) print('Master train score :' , master_grid.best_score_) master_best_model = master_grid.best_estimator_ print('Master test score :', master_best_model.score(master_X_test,master_y_test)) test_data = test_data.drop(['Ticket','Cabin'], axis = 1) test_data['Fare'].fillna(value = test_data['Fare'].median() , inplace = True) test_data['Name'] = test_data['Name'].str.rsplit(',' ).str[-1] test_data['Name'] = test_data['Name'].str.split('.' ).str[0] test_data['Name'] = test_data['Name'].replace(['Lady','the Countess','Capt','Col','Dona','Don', 'Dr','Major','','Rev','Sir','Jonkheer'], 'Rare',regex=True) test_data['Name'] = test_data['Name'].replace('Mlle', 'Miss', regex=True) test_data['Name'] = test_data['Name'].replace('Ms', 'Miss',regex=True) test_data['Name'] = test_data['Name'].replace('Mme', 'Mrs',regex=True) test_data['age_predicted'] = best_model.predict(test_data) test_data['Age'].fillna(test_data['age_predicted'], inplace=True) test_data = test_data.drop(['age_predicted'],axis = 1) split = [0, 3, 14, 24, 34, 44, 54, 64] names = ['0', '1', '2', '3', '4','5','6','7'] d = dict(enumerate(names, 1)) test_data['Age'] = np.vectorize(d.get )(np.digitize(test_data['Age'], split)) test_data test_data['family_size'] = test_data['SibSp'] + test_data['Parch']+1 test_data predictions = master_best_model.predict(test_data ).astype(int) predictions output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!") <load_from_csv>
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
train = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv") test = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv") train.columns<prepare_x_and_y>
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True) tourney_win_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
train_x = train.drop("SalePrice",axis=1) train_y = train["SalePrice"] all_data = pd.concat([train_x,test],axis=0,sort=True) train_ID = train['Id'] test_ID = test['Id'] all_data.drop("Id", axis = 1, inplace = True) print("train_x: "+str(train_x.shape)) print("train_y: "+str(train_y.shape)) print("test: "+str(test.shape)) print("all_data: "+str(all_data.shape))<sort_values>
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data_na = all_data.isnull().sum() [all_data.isnull().sum() >0].sort_values(ascending=False) all_data_na<sort_values>
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
na_col_list = all_data.isnull().sum() [all_data.isnull().sum() >0].index.tolist() all_data[na_col_list].dtypes.sort_values()<data_type_conversions>
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median())) float_list = all_data[na_col_list].dtypes[all_data[na_col_list].dtypes == "float64"].index.tolist() obj_list = all_data[na_col_list].dtypes[all_data[na_col_list].dtypes == "object"].index.tolist() all_data[float_list] = all_data[float_list].fillna(0) all_data[obj_list] = all_data[obj_list].fillna("None") all_data.isnull().sum() [all_data.isnull().sum() > 0]<data_type_conversions>
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str) all_data['YrSold'] = all_data['YrSold'].astype(str) all_data['MoSold'] = all_data['MoSold'].astype(str )<filter>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
skewed_feats_over = skewed_feats[abs(skewed_feats)> 0.5].index for i in skewed_feats_over: print(min(all_data[i]))<feature_engineering>
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data["FeetPerRoom"] = all_data["TotalSF"]/all_data["TotRmsAbvGrd"] all_data['YearBuiltAndRemod']=all_data['YearBuilt']+all_data['YearRemodAdd'] all_data['Total_Bathrooms'] =(all_data['FullBath'] +(0.5 * all_data['HalfBath'])+ all_data['BsmtFullBath'] +(0.5 * all_data['BsmtHalfBath'])) all_data['Total_porch_sf'] =(all_data['OpenPorchSF'] + all_data['3SsnPorch'] + all_data['EnclosedPorch'] + all_data['ScreenPorch'] + all_data['WoodDeckSF']) all_data['haspool'] = all_data['PoolArea'].apply(lambda x: 1 if x > 0 else 0) all_data['has2ndfloor'] = all_data['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0) all_data['hasgarage'] = all_data['GarageArea'].apply(lambda x: 1 if x > 0 else 0) all_data['hasbsmt'] = all_data['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0) all_data['hasfireplace'] = all_data['Fireplaces'].apply(lambda x: 1 if x > 0 else 0 )<count_values>
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data.dtypes.value_counts()<categorify>
X = tourney_result.drop('result', axis=1) y = tourney_result.result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
all_data = pd.get_dummies(all_data,columns=cal_list) all_data.shape<import_modules>
params = {'num_leaves': 470, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': -1, 'learning_rate': 0.01, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": -1, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, }
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import KFold, cross_val_score, train_test_split from sklearn.metrics import mean_squared_error import xgboost as xgb from sklearn.model_selection import GridSearchCV import lightgbm as lgb<split>
NFOLDS = 5 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds = np.zeros(test_df.shape[0]) y_oof = np.zeros(X.shape[0]) feature_importances = pd.DataFrame() feature_importances['feature'] = columns for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) feature_importances[f'fold_{fold_n + 1}'] = clf.feature_importance() y_pred_valid = clf.predict(X_valid) y_oof[valid_index] = y_pred_valid y_preds += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
train_x, valid_x, train_y, valid_y = train_test_split( train_x, train_y, test_size=0.3, random_state=0) <train_model>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
dtrain = xgb.DMatrix(train_x, label=train_y) dvalid = xgb.DMatrix(valid_x,label=valid_y) num_round = 5000 evallist = [(dvalid, 'eval'),(dtrain, 'train')] evals_result = {} param = { 'max_depth': 3, 'eta': 0.01, 'objective': 'reg:squarederror', } bst = xgb.train( param, dtrain, num_round, evallist, evals_result=evals_result, early_stopping_rounds=1000 )<save_to_csv>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,985,375
<set_options><EOS>
feature_importances['average'] = feature_importances[[f'fold_{fold_n + 1}' for fold_n in range(folds.n_splits)]].mean(axis=1) feature_importances.to_csv('feature_importances.csv') plt.figure(figsize=(10, 6)) sns.barplot(data=feature_importances.sort_values(by='average', ascending=False ).head(7), x='average', y='feature'); plt.title('7 TOP feature importance over {} folds average'.format(folds.n_splits)) ;
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<load_from_csv>
import os from pathlib import Path import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_validate, GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
train = pd.read_csv("/kaggle/input/titanic/train.csv", index_col=0) test = pd.read_csv("/kaggle/input/titanic/test.csv", index_col=0 )<prepare_x_and_y>
FILEDIR = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
Y = train["Survived"].values<drop_column>
sub = pd.read_csv(FILEDIR / 'MSampleSubmissionStage1_2020.csv', usecols=['ID']) id_splited = sub['ID'].str.split('_', expand=True ).astype(int ).rename(columns={0: 'Season', 1: 'Team1', 2: 'Team2'}) sub = pd.concat([sub, id_splited], axis=1 ).set_index(['Season', 'Team1', 'Team2'] ).sort_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
train = train.drop(columns=["Name", "Ticket", "Cabin"] )<categorify>
tourney_teams = {} tourney_teams_all = set() for season in sub.index.get_level_values('Season' ).drop_duplicates() : tourney_teams[season] = set() tourney_teams[season].update(sub.loc[season].index.get_level_values('Team1')) tourney_teams[season].update(sub.loc[season].index.get_level_values('Team2')) tourney_teams_all.update(tourney_teams[season]) {k: len(v)for k, v in tourney_teams.items() }
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
one_hot_sex = OneHotEncoder(sparse=False) sex_encoded = one_hot_sex.fit_transform(train["Sex"].values.reshape(-1, 1)) X_categorical = sex_encoded<categorify>
conferences = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamConferences.csv') conferences = pd.concat( [conferences.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ]) conferences = conferences.set_index(['Season', 'TeamID'] ).sort_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
train["Pclass"].describe() train["Pclass"].hist() train["Pclass"].isnull().sum() one_hot_pclass = OneHotEncoder(sparse=False) pclass_encoded = one_hot_pclass.fit_transform(train["Pclass"].values.reshape(-1, 1)) X_categorical = np.hstack(( X_categorical, pclass_encoded))<categorify>
coaches = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamCoaches.csv') coaches = pd.concat( [coaches.query('Season == @season and TeamID in @team')for season, team in tourney_teams.items() ]) coaches = coaches[coaches['LastDayNum'] == 154].set_index(['Season', 'TeamID'] ).sort_index() [['CoachName']]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
train["Embarked"].isnull().sum() train["Embarked"].loc[train["Embarked"].isnull() ] = train["Embarked"].value_counts().idxmax() train["Embarked"].isnull().sum() train["Embarked"].hist() one_hot_embarked = OneHotEncoder(sparse=False) embarked_encoded = one_hot_embarked.fit_transform(train["Embarked"].values.reshape(-1, 1)) X_categorical = np.hstack(( X_categorical, embarked_encoded))<normalization>
teams = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeams.csv', usecols=['TeamID', 'FirstD1Season']) teams['FirstD1Season'] = 2020 - teams['FirstD1Season'] teams = pd.concat( [teams.query('TeamID in @team' ).assign(Season=season)for season, team in tourney_teams.items() ]) teams = teams.set_index(['Season', 'TeamID'] ).sort_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
scaler = StandardScaler() X_numerical = scaler.fit_transform(X_numerical) X = np.hstack(( X_numerical, X_categorical))<train_model>
seeds = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneySeeds.csv') seeds = pd.concat( [seeds.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ]) seeds = seeds.set_index(['Season', 'TeamID'] ).sort_index() seeds['Region'] = seeds['Seed'].str[0] seeds['Number'] = seeds['Seed'].str[1:3].astype(int) del seeds['Seed']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
kf = KFold(20, shuffle=True, random_state=41) classifier = GradientBoostingClassifier() classifier_accuracy = [] for train_index, test_index in kf.split(train): train_X, train_Y = X[train_index], Y[train_index] test_X, test_Y = X[test_index], Y[test_index] classifier.fit(train_X, train_Y) predictions = classifier.predict(test_X) accuracy = accuracy_score(test_Y, predictions) classifier_accuracy.append(accuracy) print(sum(classifier_accuracy)/len(classifier_accuracy))<categorify>
regular = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MRegularSeasonDetailedResults.csv') regular = regular.drop(columns=['DayNum', 'LTeamID']) regular = pd.concat( [regular.query('Season == @season and WTeamID in @teams')for season, teams in tourney_teams.items() ]) regular = regular.groupby(['Season', 'WTeamID'] ).sum() regular = regular.rename_axis(index=['Season', 'TeamID'] )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
test_sex_encoded = one_hot_sex.transform(test["Sex"].values.reshape(-1, 1)) test["Age"].loc[test["Age"].isnull() ] = mean_age test_pclass_encoded = one_hot_pclass.transform(test["Pclass"].values.reshape(-1, 1)) test["Embarked"].loc[test["Embarked"].isnull() ] = train["Embarked"].value_counts().idxmax() test_embarked_encoded = one_hot_embarked.transform(test["Embarked"].values.reshape(-1, 1)) test["Fare"].loc[test["Fare"].isnull() ] = train["Fare"].mean() test_X_categorical = np.hstack(( test_sex_encoded, test_pclass_encoded, test_embarked_encoded)) test_X_numerical = np.hstack(( test["Age"].values.reshape(-1, 1), test["SibSp"].values.reshape(-1, 1), test["Parch"].values.reshape(-1, 1), test["Fare"].values.reshape(-1, 1))) test_X_numerical = scaler.transform(test_X_numerical) test_X = np.hstack(( test_X_numerical, test_X_categorical))<predict_on_test>
ctcsr = pd.concat([coaches, teams, conferences, seeds, regular], axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
classifier.fit(train_X, train_Y) predictions = classifier.predict(test_X )<save_to_csv>
result = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneyCompactResults.csv') result = result[result['Season'] >= 2015].set_index(['Season', 'WTeamID', 'LTeamID'] )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
data = [] for(passenger_id, passenger_data), prediction in zip(test.iterrows() , predictions): data.append([passenger_id, prediction]) series = pd.DataFrame(data, columns=["PassengerID", "Survived"] ).set_index("PassengerID") series.to_csv("predictions.csv" )<define_variables>
merged_teams = pd.concat( [ctcsr.loc[[(season, wteam),(season, lteam)], :] for season, wteam, lteam, in result.index]) team1 = merged_teams.iloc[::2, :].reset_index('TeamID') team2 = merged_teams.iloc[1::2, :].reset_index('TeamID') merged_teams = pd.concat([ pd.concat([team1.add_suffix('1'), team2.add_suffix('2')], axis=1 ).assign(Res=1), pd.concat([team2.add_suffix('1'), team1.add_suffix('2')], axis=1 ).assign(Res=0), ] ).reset_index().set_index(['Season', 'TeamID1', 'TeamID2'] ).sort_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
inputFolderPath = '/kaggle/input/data-series-summarization-project-v3/' outputFolderPath = '/kaggle/working/' filename = 'synthetic_size50k_len256_znorm.bin' inputFilePath = inputFolderPath + filename<categorify>
x_columns = merged_teams.columns[merged_teams.columns != 'Res'] X = merged_teams[x_columns] for column in X.select_dtypes(include='number'): X[column] = MinMaxScaler().fit_transform(X[column].to_numpy().reshape(-1,1)) X = pd.get_dummies(X, columns=x_columns[X.dtypes == 'object'] )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
def sum32(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum32' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec32(summary_filepath): reconstructed_filepath = summary_filepath + '_rec32' summary50k = np.fromfile(summary_filepath, dtype=np.float32) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath def sum64(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum64' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec64(summary_filepath): reconstructed_filepath = summary_filepath + '_rec64' summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 2) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary[0]]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath def sum128(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum128' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec128(summary_filepath): reconstructed_filepath = summary_filepath + '_rec128' summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 4) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary[0]]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath <save_to_csv>
y = merged_teams['Res']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
s32= sum32(inputFilePath) r32 = rec32(s32) pred32=np.fromfile(r32, dtype=np.float32) s64= sum64(inputFilePath) r64 = rec64(s64) pred64=np.fromfile(r64, dtype=np.float32) s128= sum128(inputFilePath) r128 = rec128(s128) pred128=np.fromfile(r128, dtype=np.float32) output = [] globalCsvIndex = 0 for i in range(len(pred32)) : output.append([globalCsvIndex,pred32[i]]) globalCsvIndex = globalCsvIndex+1 for i in range(len(pred64)) : output.append([globalCsvIndex,pred64[i]]) globalCsvIndex = globalCsvIndex+1 for i in range(len(pred128)) : output.append([globalCsvIndex,pred128[i]]) globalCsvIndex = globalCsvIndex+1 with open('submission.csv', 'w', newline='')as file: writer = csv.writer(file) writer.writerow(['id','expected']) writer.writerows(output )<set_options>
clfs = {} clfs['SVC'] = { 'instance': SVC(probability=True), 'params': [ {'kernel': ['linear'], 'C': [0.01, 0.05, 0.1, 0.5, 1]}, {'kernel': ['rbf'], 'C': [1, 10, 50, 100, 250], 'gamma': [0.1, 0.2, 0.3]} ] } clfs['RandomForestClassifier'] = { 'instance': RandomForestClassifier(n_jobs=-1), 'params': { 'n_estimators': [25, 50, 100], 'criterion': ['gini', 'entropy'], 'max_depth': [10, 25, 50, None] } } clfs['LogisticRegression'] = { 'instance': LogisticRegression(max_iter=500, n_jobs=-1), 'params': [ {'penalty': ['l2'], 'C': [0.1, 0.5, 1, 5, 10]}, {'penalty': ['l1'], 'solver': ['liblinear', 'saga'], 'C': [0.1, 0.5, 1, 5, 10]}, {'penalty': ['elasticnet'], 'C': [0.1, 0.5, 1, 5, 10], 'l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9]} ] }
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
%matplotlib inline sns.set_style('darkgrid') <load_from_csv>
for clf_name, clf in clfs.items() : print('<{}>'.format(clf_name)) print(' training...'.format(clf_name)) gs = GridSearchCV(clf['instance'], param_grid=clf['params'], cv=5, n_jobs=-1) gs.fit(X, y) clfs[clf_name]['best_estimator'] = gs.best_estimator_ print(' best_score: {:.3f}'.format(gs.best_score_)) print(' best_params: {}'.format(gs.best_params_))
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
train=pd.read_csv('.. /input/titanic/train.csv') test=pd.read_csv('.. /input/titanic/test.csv' )<concatenate>
vote = VotingClassifier( estimators=[(clf_name, clf['best_estimator'])for clf_name, clf in clfs.items() ], voting='soft', n_jobs=-1 ) vote.fit(X, y) vote.estimators_
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
all_data = pd.concat([train.drop(['Survived'],axis=1),test]) combine_data = [train,test]<sort_values>
for clf_name, clf in clfs.items() : score = accuracy_score(y, clf['best_estimator'].predict(X)) print(clf_name, score) print('Vote', accuracy_score(y, vote.predict(X)) )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
vacant_data = all_data.isnull().sum().sort_values(ascending=False) total_data = all_data.isnull().count().sort_values(ascending=False) vacant_percent =(vacant_data/total_data) missing_df = pd.concat([vacant_data,vacant_percent],axis=1,keys=['Total','Percent']) missing_df.head()<feature_engineering>
predict_proba = pd.DataFrame( {clf_name: clf['best_estimator'].predict_proba(X)[:, 1] for clf_name, clf in clfs.items() }, index=X.index) predict_proba['Vote'] = vote.predict_proba(X)[:, 1] _ = predict_proba.plot(kind='hist', bins=50, grid=True, alpha=0.5, figsize=(16,8))
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
for dataset in combine_data: dataset['family_members']= dataset['SibSp']+dataset['Parch']+1 train.head()<feature_engineering>
columns = predict_proba.columns for column in columns: sub[column] = 0.5 mask = [idx for idx in sub.index if idx in X.index] sub.loc[mask, columns] = predict_proba.loc[mask, columns] for column in columns: sub[['ID', column]].rename(columns={column: 'pred'} ).to_csv('predict_proba-{}.csv'.format(column), index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
for dataset in combine_data: dataset['Title']=dataset['Name'].str.extract('([A-Za-z]+)\.') common_title = [] for title in train['Title'].unique() : if train[train['Title'] == title]['Title'].count() > 20: common_title.append(title) print(common_title) def id_rare(cols): if cols in common_title: return cols else: return 'Rare' for dataset in combine_data: dataset['Title']=dataset['Title'].apply(id_rare) title_mapping = {'Mrs':1,'Miss':2,'Master':3,'Mr':4,'Rare':5} for dataset in combine_data: dataset['Title']=dataset['Title'].map(title_mapping) train.head()<categorify>
predict = pd.DataFrame( {clf_name: clf['best_estimator'].predict(X)for clf_name, clf in clfs.items() }, index=X.index) predict['Vote'] = vote.predict(X) columns = predict.columns for column in columns: sub[column] = 0.5 mask = [idx for idx in sub.index if idx in X.index] sub.loc[mask, columns] = predict.loc[mask, columns] for column in columns: sub[['ID', column]].rename(columns={column: 'pred'} ).to_csv('predict-{}.csv'.format(column), index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,327,652
map_sex = {'male':0,'female':1} for dataset in combine_data: dataset['Sex']=dataset['Sex'].map(map_sex )<feature_engineering>
target_name = 'predict_proba-RandomForestClassifier.csv' new_name = 'final-submission.csv' shutil.copy(target_name, new_name )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
for dataset in combine_data: dataset['Age']=dataset.groupby(['Sex','Pclass'])['Age'].apply(lambda x : x.fillna(x.median()))<feature_engineering>
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.model_selection import RandomizedSearchCV from sklearn.ensemble import RandomForestRegressor
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
train['Embarked']=train["Embarked"].fillna('C' )<feature_engineering>
Tourney_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') Tourney_Seeds = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
fill_fare = test[(test['Pclass']==3)&(test['Sex']==0)]['Fare'].median() test['Fare']=test['Fare'].fillna(fill_fare )<feature_engineering>
RegularSeason_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') MSeasons = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MSeasons.csv') MTeams=pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MTeams.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
for dataset in combine_data: dataset['Age']=dataset['Age'].astype(int) for dataset in combine_data: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 <feature_engineering>
Tourney_Results_Compact=pd.merge(Tourney_Compact_Results, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'WinningSeed'},inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID'],axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'LoosingSeed'}, inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID','NumOT','WLoc'],axis=1) Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
for dataset in combine_data: dataset['is_alone']=0 dataset.loc[dataset['family_members']==1,'is_alone'] = 1<categorify>
Tourney_Results_Compact=Tourney_Results_Compact.drop(['WScore','LScore'],axis=1) Tourney_Results_Compact.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
map_embarked = {'S':0,'C':1,'Q':2} for dataset in combine_data: dataset['Embarked']=dataset['Embarked'].map(map_embarked) train.head(2 )<drop_column>
Tourney_Results_Compact['WinningSeed'] = Tourney_Results_Compact['WinningSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact['LoosingSeed'] = Tourney_Results_Compact['LoosingSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact.WinningSeed = pd.to_numeric(Tourney_Results_Compact.WinningSeed, errors='coerce') Tourney_Results_Compact.LoosingSeed = pd.to_numeric(Tourney_Results_Compact.LoosingSeed, errors='coerce' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
train = train.drop(['PassengerId','Name','Ticket','Fare','Cabin','SibSp','Parch'],axis=1) test = test.drop(['PassengerId','Name','Ticket','Fare','Cabin','SibSp','Parch'],axis=1 )<split>
season_winning_team = RegularSeason_Compact_Results[['Season', 'WTeamID', 'WScore']] season_losing_team = RegularSeason_Compact_Results[['Season', 'LTeamID', 'LScore']] season_winning_team.rename(columns={'WTeamID':'TeamID','WScore':'Score'}, inplace=True) season_losing_team.rename(columns={'LTeamID':'TeamID','LScore':'Score'}, inplace=True) RegularSeason_Compact_Results = pd.concat(( season_winning_team, season_losing_team)).reset_index(drop=True) RegularSeason_Compact_Results
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
X=train.drop('Survived',axis=1) y=train['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=100 )<compute_train_metric>
RegularSeason_Compact_Results_Final = RegularSeason_Compact_Results.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() RegularSeason_Compact_Results_Final
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
log_model=LogisticRegression() log_model.fit(X_train,y_train) log_model_pred = log_model.predict(X_test) acc_log_model = round(log_model.score(X_train,y_train)*100,2) print('Accuracy of Logestic Model:',acc_log_model) print('--*'*10) print(classification_report(y_test,log_model_pred)) print('--*'*10) print(confusion_matrix(y_test,log_model_pred))<choose_model_class>
Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'WScoreTotal'}, inplace=True) Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
tree_model=DecisionTreeClassifier() tree_model.fit(X_train,y_train) tree_model_pred = tree_model.predict(X_test) acc_tree_model = round(tree_model.score(X_train,y_train)*100,2) print('Accuracy of tree Model:',acc_tree_model) print('--*'*10) print(classification_report(y_test,tree_model_pred)) print('--*'*10) print(confusion_matrix(y_test,tree_model_pred))<compute_train_metric>
Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'LScoreTotal'}, inplace=True) Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact.to_csv('Tourney_Win_Results_Train.csv', index=False) Tourney_Results_Compact=Tourney_Results_Compact[Tourney_Results_Compact['Season'] < 2015] Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
rf_model=RandomForestClassifier() rf_model.fit(X_train,y_train) rf_model_pred = rf_model.predict(X_test) acc_rf_model = round(rf_model.score(X_train,y_train)*100,2) print('Accuracy of Random Foresrt Model:',acc_rf_model) print('--*'*10) print(classification_report(y_test,rf_model_pred)) print('--*'*10) print(confusion_matrix(y_test,rf_model_pred))<choose_model_class>
Tourney_Win_Results=Tourney_Results_Compact.drop(['Season','WTeamID','LTeamID','DayNum'],axis=1) Tourney_Win_Results
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
final_log = LogisticRegression() final_log.fit(X,y) final_log_pred = final_log.predict(test) final_tree_model=DecisionTreeClassifier() final_tree_model.fit(X,y) final_tree_model_pred = final_tree_model.predict(test) final_rf=RandomForestClassifier() final_rf.fit(X,y) final_rf_pred = final_rf.predict(test) <load_from_csv>
Tourney_Win_Results.rename(columns={'WinningSeed':'Seed1', 'LoosingSeed':'Seed2', 'WScoreTotal':'ScoreT1', 'LScoreTotal':'ScoreT2'}, inplace=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
result= pd.read_csv('.. /input/titanic/gender_submission.csv') result=result.set_index('PassengerId') result.head(3 )<save_to_csv>
tourney_lose_result = Tourney_Win_Results.copy() tourney_lose_result['Seed1'] = Tourney_Win_Results['Seed2'] tourney_lose_result['Seed2'] = Tourney_Win_Results['Seed1'] tourney_lose_result['ScoreT1'] = Tourney_Win_Results['ScoreT2'] tourney_lose_result['ScoreT2'] = Tourney_Win_Results['ScoreT1'] tourney_lose_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
result['Survived']= final_log_pred result.to_csv('output_log.csv') result['Survived']= final_tree_model_pred result.to_csv('output_tree.csv') result['Survived']= final_rf_pred result.to_csv('output_rf.csv') <load_from_csv>
Tourney_Win_Results['Seed_diff'] = Tourney_Win_Results['Seed1'] - Tourney_Win_Results['Seed2'] Tourney_Win_Results['ScoreT_diff'] = Tourney_Win_Results['ScoreT1'] - Tourney_Win_Results['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
train_data = pd.read_csv("/kaggle/input/titanic/train.csv", index_col='PassengerId') train_data.head()<load_from_csv>
Tourney_Win_Results['result'] = 1 tourney_lose_result['result'] = 0 tourney_result_Final = pd.concat(( Tourney_Win_Results, tourney_lose_result)).reset_index(drop=True) tourney_result_Final.to_csv('Tourneyvalidate.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
test_data = pd.read_csv("/kaggle/input/titanic/test.csv", index_col='PassengerId') test_data.head()<count_missing_values>
tourney_result_Final1 = tourney_result_Final[[ 'Seed1', 'Seed2', 'ScoreT1', 'ScoreT2', 'Seed_diff', 'ScoreT_diff', 'result']]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any() ] cols_with_missing<define_variables>
tourney_result_Final1.loc[lambda x:(x['Seed1'].isin([14,15,16])) &(x['Seed2'].isin([1,2,3])) ,'result' ] = 0
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
cat =(train_data.dtypes == 'object') object_cols = list(cat[cat].index) object_cols<count_missing_values>
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any() ] test_cols_with_missing<filter>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
test_cat =(test_data.dtypes == 'object') test_object_cols = list(test_cat[test_cat].index) test_object_cols<count_unique_values>
test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
object_nunique = list(map(lambda col: train_data[col].nunique() , object_cols)) d = dict(zip(object_cols, object_nunique)) object_nunique_test = list(map(lambda col: test_data[col].nunique() , test_object_cols)) d_test = dict(zip(test_object_cols, object_nunique_test)) sorted(d.items() , key=lambda x: x[1] )<sort_values>
test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df test_df.to_csv('test_df_Test.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
sorted(d.items() , key=lambda x: x[1] )<categorify>
test_df['Seed1'] = test_df['Seed1'].str.extract('(\d+)', expand=True) test_df['Seed2'] = test_df['Seed2'].str.extract('(\d+)', expand=True) test_df.Seed1 = pd.to_numeric(test_df.Seed1, errors='coerce') test_df.Seed2 = pd.to_numeric(test_df.Seed2, errors='coerce' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
low_cardinality_cols = [col for col in object_cols if train_data[col].nunique() < 10] high_cardinality_cols = list(set(object_cols)-set(low_cardinality_cols)) print('Categorical columns that will be one-hot encoded:', low_cardinality_cols) print(' Categorical columns that will be dropped from the dataset:', high_cardinality_cols )<define_variables>
test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
object_cols = [col for col in train_data.columns if train_data[col].dtype == "object"] good_label_cols = [col for col in object_cols if set(train_data[col])== set(test_data[col])] bad_label_cols = list(set(object_cols)-set(good_label_cols)) print('Categorical columns that will be label encoded:', good_label_cols) print(' Categorical columns that will be dropped from the dataset:', bad_label_cols )<split>
X = tourney_result_Final1.drop('result', axis=1) y = tourney_result_Final1.result
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
X = train_data X_test = test_data X.dropna(axis=0, subset=['Survived'], inplace=True) y = X.Survived X.drop(['Survived'], axis=1, inplace=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) cols_with_missing = [col for col in X.columns if X[col].isnull().any() ] categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == "object"] numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']] <categorify>
df = pd.concat([X, test_df], axis=0, sort=False ).reset_index(drop=True) df_log = pd.DataFrame( preprocessing.MinMaxScaler().fit_transform(df), columns=df.columns, index=df.index ) train_log, test_log = df_log.iloc[:len(X),:], df_log.iloc[len(X):,:].reset_index(drop=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
numerical_transformer = SimpleImputer(strategy='mean') categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')) , ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ] )<choose_model_class>
logreg = LogisticRegression() logreg.fit(train_log, y) coeff_logreg = pd.DataFrame(train_log.columns.delete(0)) coeff_logreg.columns = ['feature'] coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0]) coeff_logreg.sort_values(by='score_logreg', ascending=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
model = RandomForestClassifier(n_estimators=100, criterion='mae', max_leaf_nodes=100, random_state=0 )<compute_train_metric>
y_logreg_train = logreg.predict(train_log) y_logreg_pred = logreg.predict_proba(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
def get_score(n_estimators, max_leaf_nodes): my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', RandomForestClassifier(n_estimators=n_estimators,max_leaf_nodes=max_leaf_nodes, random_state=0)) ]) scores = -1 * cross_val_score(my_pipeline, X, y, cv=3, scoring='neg_mean_absolute_error') return scores.mean()<define_variables>
clf = RandomForestClassifier(n_estimators=200,max_depth=90,min_samples_leaf=300,min_samples_split=200,max_features=5) clf.fit(train_log, y) clf_probs = clf.predict_proba(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
k=100 results = {} for i in range(1,9): for k in range(1,8): results[50*i, 50*k] = get_score(50*i,50*k) results<set_options>
y_pred_df_random = pd.DataFrame(clf_probs) y_pred_1 = y_pred_df_random.iloc[:,[1]] y_pred_df_random
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
%matplotlib inline plt.plot(list(results.keys()),list(results.values())) plt.show()<find_best_model_class>
from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', RandomForestClassifier(n_estimators=400,max_leaf_nodes=50,random_state=0)) ]) scores = -1 * cross_val_score(my_pipeline, X, y, cv=3, scoring='neg_mean_absolute_error') my_pipeline.fit(X, y) preds = my_pipeline.predict(X_test) <save_to_csv>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_pred_1 submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,165,598
output = pd.DataFrame({'PassengerId': X_test.index, 'Survived': preds}) output.to_csv('submission_final_5.csv', index=False )<train_model>
submission_df.to_csv('submission_New6.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
my_model_2 = XGBRegressor(n_estimators=300000, learning_rate=1.2) my_model_2.fit(X, y, early_stopping_rounds = 5, eval_set = [(X_valid, y_valid)], verbose=False) predictions_2 = my_model_2.predict(X_test) <save_to_csv>
tourney_result = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyDetailedResults.csv') tourney_seed = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') season_result = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonDetailedResults.csv') test_df = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
output = pd.DataFrame({'PassengerId': X_test.index, 'Survived': predictions_2}) output.to_csv('submission_final_XGBoost.csv', index=False )<set_options>
season_win_result = season_result[['Season', 'WTeamID', 'WScore', 'WFGM', 'WFGA', 'WFGM3', 'WFGA3', 'WFTM', 'WFTA', 'WOR', 'WDR', 'WAst', 'WTO', 'WStl', 'WBlk', 'WPF']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore', 'LFGM', 'LFGA', 'LFGM3', 'LFGA3', 'LFTM', 'LFTA', 'LOR', 'LDR', 'LAst', 'LTO', 'LStl', 'LBlk', 'LPF']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score', 'WFGM':'FGM', 'WFGA':'FGA', 'WFGM3':'FGM3', 'WFGA3':'FGA3', 'WFTM':'FTM', 'WFTA':'FTA', 'WOR':'OR', 'WDR':'DR', 'WAst':'Ast', 'WTO':'TO', 'WStl':'Stl', 'WBlk':'Blk', 'WPF':'PF'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score', 'LFGM':'FGM', 'LFGA':'FGA', 'LFGM3':'FGM3', 'LFGA3':'FGA3', 'LFTM':'FTM', 'LFTA':'FTA', 'LOR':'OR', 'LDR':'DR', 'LAst':'Ast', 'LTO':'TO', 'LStl':'Stl', 'LBlk':'Blk', 'LPF':'PF'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
%matplotlib inline<load_from_csv>
tourney_result['Score_difference'] = tourney_result['WScore'] - tourney_result['LScore'] tourney_result = tourney_result[['Season', 'WTeamID', 'LTeamID', 'Score_difference']] tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result['WSeed'] = tourney_result['WSeed'].apply(lambda x: int(x[1:3])) tourney_result['LSeed'] = tourney_result['LSeed'].apply(lambda x: int(x[1:3])) print(tourney_result.info(null_counts=True))
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
train_data = pd.read_csv('.. /input/titanic/train.csv') train_data.head(10 )<load_from_csv>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
test_data = pd.read_csv('.. /input/titanic/test.csv') test_data.head(10 )<count_missing_values>
for col in season_result.columns[2:]: season_result_map_mean = season_result.groupby(['Season', 'TeamID'])[col].mean().reset_index() tourney_result = pd.merge(tourney_result, season_result_map_mean, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={f'{col}':f'W{col}MeanT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_result_map_mean, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={f'{col}':f'L{col}MeanT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_result_map_mean, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={f'{col}':f'W{col}MeanT'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_result_map_mean, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={f'{col}':f'L{col}MeanT'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
train_data.isnull().sum()<count_missing_values>
tourney_win_result = tourney_result.drop(['WTeamID', 'LTeamID'], axis=1) for col in tourney_win_result.columns[2:]: if col[0] == 'W': tourney_win_result.rename(columns={f'{col}':f'{col[1:]+"1"}'}, inplace=True) elif col[0] == 'L': tourney_win_result.rename(columns={f'{col}':f'{col[1:]+"2"}'}, inplace=True) tourney_lose_result = tourney_win_result.copy() for col in tourney_lose_result.columns: if col[-1] == '1': col2 = col[:-1] + '2' tourney_lose_result[col] = tourney_win_result[col2] tourney_lose_result[col2] = tourney_win_result[col] tourney_lose_result.columns
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
test_data.isnull().sum()<feature_engineering>
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreMeanT_diff'] = tourney_win_result['ScoreMeanT1'] - tourney_win_result['ScoreMeanT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreMeanT_diff'] = tourney_lose_result['ScoreMeanT1'] - tourney_lose_result['ScoreMeanT2'] tourney_lose_result['Score_difference'] = -tourney_lose_result['Score_difference'] tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean()) test_data['Age'] = test_data['Age'].fillna(test_data['Age'].mean() )<count_missing_values>
for col in test_df.columns[2:]: if col[0] == 'W': test_df.rename(columns={f'{col}':f'{col[1:]+"1"}'}, inplace=True) elif col[0] == 'L': test_df.rename(columns={f'{col}':f'{col[1:]+"2"}'}, inplace=True) test_df['Seed1'] = test_df['Seed1'].apply(lambda x: int(x[1:3])) test_df['Seed2'] = test_df['Seed2'].apply(lambda x: int(x[1:3])) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreMeanT_diff'] = test_df['ScoreMeanT1'] - test_df['ScoreMeanT2'] test_df = test_df.drop(['ID', 'Pred', 'Season'], axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
train_data.isnull().sum()<count_missing_values>
features = [x for x in tourney_result.columns if x not in ['result', 'Score_difference', 'Season']] params = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'max_depth': -1, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, } step_size = 20 steps = 250 boosting_rounds = [step_size*(x+1)for x in range(steps)] def run_boost_round_test(boosting_rounds, step_size): training_scores, oof_scores, holdback_scores = [], [], [] model = NCAA_model(params, tourney_result, test_df, use_holdback=[2019], regression=False, verbose=False) print(f'Training for {step_size*steps} rounds.') for rounds in range(step_size,boosting_rounds+1,step_size): print(f'{"*"*50}') print(f'Rounds: {rounds}') if model.use_holdback: tr_score, oof_score, hb_score = model.train(features, n_splits=10, n_boost_round=step_size, early_stopping_rounds=None) else: tr_score, oof_score = model.train(features, n_splits=10, n_boost_round=step_size, early_stopping_rounds=None) clips, clip_s = model.fit_clipper(verbose=True) spline, spline_s = model.fit_spline_model(verbose=True) training_scores.append([tr_score, model.postprocess_preds(clips, use_data = 'train'), model.postprocess_preds(spline, use_data = 'train', method='spline')]) oof_scores.append([oof_score, model.postprocess_preds(clips, use_data = 'oof'), model.postprocess_preds(spline, use_data = 'oof', method='spline')]) holdback_scores.append([hb_score, model.postprocess_preds(clips, use_data = 'hb'), model.postprocess_preds(spline, use_data = 'hb', method='spline')]) return training_scores, oof_scores, holdback_scores, model, clips, spline training_scores, oof_scores, holdback_scores, model, clips, spline = run_boost_round_test(boosting_rounds[-1], step_size )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,067,852
<feature_engineering><EOS>
y_preds = model.postprocess_preds(spline, method='spline') submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds submission_df.to_csv('submission.csv', index=False) submission_df.describe()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<count_missing_values>
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.isnull().sum()<data_type_conversions>
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data['Cabin'] = train_data['Cabin'].fillna("Missing") test_data['Cabin'] = test_data['Cabin'].fillna("Missing" )<count_missing_values>
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.isnull().sum()<count_missing_values>
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
test_data.isnull().sum()<feature_engineering>
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
test_data['Fare'] = test_data['Fare'].median()<count_missing_values>
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 64, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.02, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 256, 'epochs': 80 } return params
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.isnull().sum()<count_missing_values>
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'MTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
test_data.isnull().sum()<categorify>
data_dict['MNCAATourneySeeds']['Seed'] = data_dict['MNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data = pd.get_dummies(train_data, columns=["Sex"], drop_first=True) train_data = pd.get_dummies(train_data, columns=["Embarked"],drop_first=True) train_data['Fare'] = train_data['Fare'].astype(int) train_data.loc[train_data.Fare<=7.91,'Fare']=0 train_data.loc[(train_data.Fare>7.91)&(train_data.Fare<=14.454),'Fare']=1 train_data.loc[(train_data.Fare>14.454)&(train_data.Fare<=31),'Fare']=2 train_data.loc[(train_data.Fare>31),'Fare']=3 train_data['Age']=train_data['Age'].astype(int) train_data.loc[ train_data['Age'] <= 16, 'Age']= 0 train_data.loc[(train_data['Age'] > 16)&(train_data['Age'] <= 32), 'Age'] = 1 train_data.loc[(train_data['Age'] > 32)&(train_data['Age'] <= 48), 'Age'] = 2 train_data.loc[(train_data['Age'] > 48)&(train_data['Age'] <= 64), 'Age'] = 3 train_data.loc[train_data['Age'] > 64, 'Age'] = 4<categorify>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
test_data = pd.get_dummies(test_data, columns=["Sex"], drop_first=True) test_data = pd.get_dummies(test_data, columns=["Embarked"],drop_first=True) test_data['Fare'] = test_data['Fare'].astype(int) test_data.loc[test_data.Fare<=7.91,'Fare']=0 test_data.loc[(test_data.Fare>7.91)&(test_data.Fare<=14.454),'Fare']=1 test_data.loc[(test_data.Fare>14.454)&(test_data.Fare<=31),'Fare']=2 test_data.loc[(test_data.Fare>31),'Fare']=3 test_data['Age']=test_data['Age'].astype(int) test_data.loc[ test_data['Age'] <= 16, 'Age']= 0 test_data.loc[(test_data['Age'] > 16)&(test_data['Age'] <= 32), 'Age'] = 1 test_data.loc[(test_data['Age'] > 32)&(test_data['Age'] <= 48), 'Age'] = 2 test_data.loc[(test_data['Age'] > 48)&(test_data['Age'] <= 64), 'Age'] = 3 test_data.loc[test_data['Age'] > 64, 'Age'] = 4<drop_column>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.drop(['Ticket','Cabin','Name'],axis=1,inplace=True) test_data.drop(['Ticket','Cabin','Name'],axis=1,inplace=True )<count_values>
gameCities = pd.merge(data_dict['MGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["MSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["MSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) train.head() cols_to_use = data_dict["MTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.Survived.value_counts() /len(train_data)*100 <groupby>
cols_to_use = data_dict["MTeamCoaches"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","WTeamID"], right_on=["Season","TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","LTeamID"], right_on=["Season","TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.groupby("Survived" ).mean()<groupby>
cols_to_use = data_dict['MNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
train_data.groupby("Sex_male" ).mean()<import_modules>
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["MSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["MSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["MTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict["MTeamCoaches"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","WTeamID"], right_on=["Season","TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","LTeamID"], right_on=["Season","TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) cols_to_use = data_dict['MNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
from sklearn.metrics import accuracy_score, log_loss from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn import metrics<split>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
X = train_data.drop(['Survived'], axis=1) y = train_data["Survived"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.22, random_state = 5 )<train_model>
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
logReg = LogisticRegression() logReg.fit(X_train,y_train )<predict_on_test>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
logReg_predict = logReg.predict(X_test) logReg_score = logReg.score(X_test,y_test) print("Logistic Regression Score :",logReg_score )<compute_test_metric>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
print("Accuracy Score of Logistic Regression Model:") print(metrics.accuracy_score(y_test,logReg_predict)) print(" ","Classification Report:") print(metrics.classification_report(y_test,logReg_predict),' ' )<train_model>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore var_x'] df['y_var'] = df['WScore var_y'] + df['LScore var_y'] return df train = preprocess(train) test = preprocess(test )
Google Cloud & NCAA® ML Competition 2020-NCAAM