kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,769,803
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()<merge>
reg_case=ElasticNet(random_state=42,l1_ratio=0.1,max_iter=2200) params = [{'alpha': [10**-4,10**-3, 10**-2,10**-1, 10**0,10**1, 10**2,10**3,10**4]}] clf = RandomizedSearchCV(reg_case, params, cv=4, scoring='neg_root_mean_squared_error',return_train_score=True) search=clf.fit(X_train_cases, y_train_cases) results = pd.DataFrame.from_dict(clf.cv_results_) print("The best parameter is:",search.best_params_ )
COVID19 Global Forecasting (Week 3)
8,769,803
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
best_alpha=10000 best_itr=2400 final_reg_case=ElasticNet(random_state=42,alpha=best_alpha,l1_ratio=0.1,max_iter=best_itr) final_reg_case.fit(X_train_cases,y_train_cases )
COVID19 Global Forecasting (Week 3)
8,769,803
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
pred=final_reg_case.predict(X_test_cases) print("The RMSE value",(mean_squared_error(y_test_cases,pred)) **0.5 )
COVID19 Global Forecasting (Week 3)
8,769,803
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()<drop_column>
reg_fatal=ElasticNet(random_state=42,l1_ratio=0.1,max_iter=3500) params = [{'alpha': [10**-4,10**-3, 10**-2,10**-1, 10**0,10**1, 10**2,10**3,10**4]}] clf = RandomizedSearchCV(reg_fatal, params, cv=4, scoring='neg_root_mean_squared_error',return_train_score=True) search=clf.fit(X_train_fatal, y_train_fatal) results = pd.DataFrame.from_dict(clf.cv_results_) print("The best parameter is:",search.best_params_ )
COVID19 Global Forecasting (Week 3)
8,769,803
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()<groupby>
best_alpha=100 best_iter=3500 final_reg_fatal = ElasticNet(random_state=42,alpha=best_alpha,l1_ratio=0.1,max_iter=best_iter) final_reg_fatal.fit(X_train_fatal, y_train_fatal )
COVID19 Global Forecasting (Week 3)
8,769,803
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()<merge>
pred=final_reg_fatal.predict(X_test_fatal) print("The RMSE value",(mean_squared_error(y_test_fatal,pred)) **0.5 )
COVID19 Global Forecasting (Week 3)
8,769,803
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()<merge>
featured=pd.DataFrame(data) X_y_f=shuffle(featured) y_cases_f=X_y_f['target_infection'] y_fatal_f=X_y_f['target_fatal'] X_f=X_y_f.drop(['target_infection','target_fatal'],axis=1) X_train_cases_f, X_test_cases_f, y_train_cases_f, y_test_cases_f = train_test_split(X_f, y_cases_f, test_size=0.33) X_train_fatal_f, X_test_fatal_f, y_train_fatal_f, y_test_fatal_f = train_test_split(X_f, y_fatal_f, test_size=0.33) print("Shape of featurized infection train dataset:",(X_train_cases_f.shape,y_train_cases_f.shape)) print("Shape of featurized infection test dataset:",(X_test_cases_f.shape,y_test_cases_f.shape)) print("Shape of featurized fatal train dataset:",(X_train_fatal_f.shape,y_train_fatal_f.shape)) print("Shape of featurized fatal test dataset:",(X_test_fatal_f.shape,y_test_fatal_f.shape))
COVID19 Global Forecasting (Week 3)
8,769,803
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()<feature_engineering>
reg_case_f=ElasticNet(random_state=42,l1_ratio=0.1,max_iter=2200) params = [{'alpha': [10**-4,10**-3, 10**-2,10**-1, 10**0,10**1, 10**2,10**3,10**4]}] clf_f= RandomizedSearchCV(reg_case_f, params, cv=4, scoring='neg_root_mean_squared_error',return_train_score=True) search_f=clf_f.fit(X_train_cases_f, y_train_cases_f) results_f = pd.DataFrame.from_dict(clf_f.cv_results_) print("The best parameter is:",search_f.best_params_ )
COVID19 Global Forecasting (Week 3)
8,769,803
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test )<drop_column>
best_alpha=100 best_itr=4200 final_reg_case_f=ElasticNet(random_state=42,alpha=best_alpha,l1_ratio=0.1,max_iter=best_itr) final_reg_case_f.fit(X_train_cases_f,y_train_cases_f )
COVID19 Global Forecasting (Week 3)
8,769,803
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering>
pred_f=final_reg_case_f.predict(X_test_cases_f) print("RMSE is:",(mean_squared_error(y_test_cases_f,pred_f)) **0.5 )
COVID19 Global Forecasting (Week 3)
8,769,803
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )<concatenate>
reg_fatal_f=ElasticNet(random_state=42,alpha=best_alpha,l1_ratio=0.1,max_iter=2200) params = [{'alpha': [10**-4,10**-3, 10**-2,10**-1, 10**0,10**1, 10**2,10**3,10**4]}] clf_f= RandomizedSearchCV(reg_fatal_f, params, cv=4, scoring='neg_root_mean_squared_error',return_train_score=True) search_f=clf_f.fit(X_train_fatal_f, y_train_fatal_f) results_f = pd.DataFrame.from_dict(clf_f.cv_results_) print("The best parameter is:",search_f.best_params_)
COVID19 Global Forecasting (Week 3)
8,769,803
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()<categorify>
best_alpha=100 best_itr=2400 final_reg_fatal_f=ElasticNet(random_state=42,alpha=best_alpha,l1_ratio=0.1,max_iter=best_itr) final_reg_fatal_f.fit(X_train_fatal_f,y_train_fatal_f )
COVID19 Global Forecasting (Week 3)
8,769,803
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()<drop_column>
pred_f=final_reg_fatal_f.predict(X_test_fatal_f) print("RMSE is:",(mean_squared_error(y_test_fatal_f,pred_f)) **0.5 )
COVID19 Global Forecasting (Week 3)
8,769,803
target = 'result' features = data.columns.values.tolist() features.remove(target )<train_model>
test["Province_State"].fillna("state", inplace = True) test["Country_Region"] = [country_name.replace("'","")for country_name in test["Country_Region"]]
COVID19 Global Forecasting (Week 3)
8,769,803
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv>
test['ConfirmedCases']=list(map(int,predicted_case)) test['Fatalities']=list(map(int,predicted_fatal))
COVID19 Global Forecasting (Week 3)
8,769,803
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = lgbm.y_pred submission_df<save_to_csv>
submission=test[['ForecastId','ConfirmedCases','Fatalities']] submission=shuffle(submission) submission.to_csv("submission.csv",index=False )
COVID19 Global Forecasting (Week 3)
8,755,132
submission_df.to_csv('submission.csv', index=False )<set_options>
from pandas_profiling import ProfileReport
COVID19 Global Forecasting (Week 3)
8,755,132
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )<train_model>
xtrain = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') xtest = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') xsubmission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv') xtrain.rename(columns={'Country_Region':'Country'}, inplace=True) xtest.rename(columns={'Country_Region':'Country'}, inplace=True) xtrain.rename(columns={'Province_State':'State'}, inplace=True) xtest.rename(columns={'Province_State':'State'}, inplace=True) xtrain.State=xtrain.State.fillna('NA') xtest.State=xtest.State.fillna('NA') xtrain['Date'] = pd.to_datetime(xtrain['Date'], infer_datetime_format=True) xtest['Date'] = pd.to_datetime(xtest['Date'], infer_datetime_format=True) for j in range(14): train_lag=xtrain.groupby(['Country','State'] ).shift(periods=j+1) xtrain['lag_'+str(j+1)+'_ConfirmedCases']=train_lag['ConfirmedCases'].fillna(0) xtrain['lag_'+str(j+1)+'_Fatalities']=train_lag['Fatalities'].fillna(0)
COVID19 Global Forecasting (Week 3)
8,755,132
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df<train_model>
xtrain.rename(columns={'Country_Region':'Country'}, inplace=True) xtest.rename(columns={'Country_Region':'Country'}, inplace=True) xtrain.rename(columns={'Province_State':'State'}, inplace=True) xtest.rename(columns={'Province_State':'State'}, inplace=True) xtrain['Date'] = pd.to_datetime(xtrain['Date'], infer_datetime_format=True) xtest['Date'] = pd.to_datetime(xtest['Date'], infer_datetime_format=True) xtrain.info() xtest.info() for_y=xtest.merge(xtrain,on=['State','Country','Date'],how='inner') y1_xTrain = xtrain.ConfirmedCases y1_xTrain.head() y1_xTest = for_y.ConfirmedCases y1_xTest.head() y2_xTrain = xtrain.Fatalities y2_xTrain.head() y2_xTest = for_y.Fatalities y2_xTest.head() EMPTY_VAL = "NA" def fillState(state, country): if state == EMPTY_VAL: return country return state
COVID19 Global Forecasting (Week 3)
8,755,132
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params<train_model>
X_xTrain = xtrain.copy() X_xTrain['State'].fillna(EMPTY_VAL, inplace=True) X_xTrain['State'] = X_xTrain.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_xTrain.loc[:, 'Date_ACT'] = X_xTrain.Date X_xTrain.loc[:, 'Date'] = X_xTrain.Date.dt.strftime("%m%d") X_xTrain["Date"] = X_xTrain["Date"].astype(int) print(X_xTrain.head()) X_xTest = for_y.copy() X_xTest['State'].fillna(EMPTY_VAL, inplace=True) X_xTest['State'] = X_xTest.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_xTest.loc[:, 'Date'] = X_xTest.Date.dt.strftime("%m%d") X_xTest["Date"] = X_xTest["Date"].astype(int) print(X_xTest.head() )
COVID19 Global Forecasting (Week 3)
8,755,132
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params<normalization>
lec = preprocessing.LabelEncoder() les = preprocessing.LabelEncoder() X_xTrain.Country = lec.fit_transform(X_xTrain.Country) X_xTrain['State'] = les.fit_transform(X_xTrain['State']) print(X_xTrain.head()) X_xTest.Country = lec.transform(X_xTest.Country) X_xTest['State'] = les.transform(X_xTest['State']) print(X_xTest.head()) xtrain.loc[xtrain.Country == 'Afghanistan', :] print(xtest.tail() )
COVID19 Global Forecasting (Week 3)
8,755,132
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs<train_model>
filterwarnings('ignore') le = preprocessing.LabelEncoder() countries = X_xTrain.Country.unique()
COVID19 Global Forecasting (Week 3)
8,755,132
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 128, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 128, 'epochs': 80 } return params<load_from_csv>
COVID19 Global Forecasting (Week 3)
8,755,132
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )<feature_engineering>
xout = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) k_log_col=['lag_'+str(j+1)+'_ConfirmedCases' for j in range(14)]+['lag_'+str(j+1)+'_Fatalities' for j in range(14)] params = {'min_child_weight':[4,5], 'gamma':[i/10.0 for i in range(3,6)], 'subsample':[i/10.0 for i in range(6,8)], 'colsample_bytree':[i/10.0 for i in range(6,8)], 'max_depth': [3,4,5],'n_estimators':[500,1000],'learning_rate': [.03, 0.05,.07], 'objective':['reg:squaredlogerror']} params = {'min_child_weight':[4,5], 'max_depth': [6],'n_estimators':[1000]} def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2))) X_xTrain_CS = X_xTrain.loc[:, ['State', 'Country', 'Date','Date_ACT', 'ConfirmedCases', 'Fatalities']+k_log_col] y1_xTrain_CS = X_xTrain_CS[X_xTrain_CS['ConfirmedCases'] - X_xTrain_CS['lag_1_ConfirmedCases'] >0]['ConfirmedCases']-X_xTrain_CS[X_xTrain_CS['ConfirmedCases'] - X_xTrain_CS['lag_1_ConfirmedCases'] >0]['lag_1_ConfirmedCases'] y2_xTrain_CS = X_xTrain_CS[X_xTrain_CS['ConfirmedCases'] - X_xTrain_CS['lag_1_ConfirmedCases'] >0]['Fatalities']-X_xTrain_CS[X_xTrain_CS['ConfirmedCases'] - X_xTrain_CS['lag_1_ConfirmedCases'] >0]['lag_1_Fatalities'] X_xTrain_CS1=X_xTrain_CS[(X_xTrain_CS['ConfirmedCases'] - X_xTrain_CS['lag_1_ConfirmedCases'])>0].loc[:, ['State', 'Country']+k_log_col].reset_index(drop=True) oh1=preprocessing.OneHotEncoder(sparse=False ).fit(X_xTrain_CS.Country.values.reshape(X_xTrain_CS.Country.shape[0],1)) oh2=preprocessing.OneHotEncoder(sparse=False ).fit(X_xTrain_CS.State.values.reshape(X_xTrain_CS.Country.shape[0],1)) all_val=oh1.transform(X_xTrain_CS1.Country.values.reshape(X_xTrain_CS1.Country.shape[0],1)) col=['cnty_'+str(k)for k in range(all_val.shape[1])] X_xTrain_CS1=pd.concat([X_xTrain_CS1,pd.DataFrame(all_val,columns=col)],axis=1) all_val_State=oh2.transform(X_xTrain_CS1.State.values.reshape(X_xTrain_CS1.State.shape[0],1)) col_State=['state_'+str(k)for k in range(all_val_State.shape[1])] X_xTrain_CS1=pd.concat([X_xTrain_CS1,pd.DataFrame(all_val_State,columns=col_State)],axis=1) X_xTest_CS = X_xTest.loc[:, ['State', 'Country', 'Date', 'ForecastId']+k_log_col] y1_xTest_CS = for_y.loc[:, 'ConfirmedCases']-for_y.loc[:, 'lag_1_ConfirmedCases'] y2_xTest_CS = for_y.loc[:, 'Fatalities']-for_y.loc[:, 'lag_1_Fatalities'] X_xTest_CS_Id = X_xTest_CS.loc[:, 'ForecastId'] X_xTest_CS1 = X_xTest_CS.loc[:, ['State', 'Country']+k_log_col] all_val=oh1.transform(X_xTest_CS1.Country.values.reshape(X_xTest_CS1.Country.shape[0],1)) col=['cnty_'+str(k)for k in range(all_val.shape[1])] X_xTest_CS1=pd.concat([X_xTest_CS1,pd.DataFrame(all_val,columns=col)],axis=1) all_val_State=oh2.transform(X_xTest_CS1.State.values.reshape(X_xTest_CS1.State.shape[0],1)) col_State=['state_'+str(k)for k in range(all_val_State.shape[1])] X_xTest_CS1=pd.concat([X_xTest_CS1,pd.DataFrame(all_val_State,columns=col_State)],axis=1)
COVID19 Global Forecasting (Week 3)
8,755,132
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()<load_from_csv>
xgb1 = XGBRegressor(nthread=-1,n_jobs=-1) grid = GridSearchCV(xgb1, params) grid.fit(X_xTrain_CS1, y1_xTrain_CS)
COVID19 Global Forecasting (Week 3)
8,755,132
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()<feature_engineering>
print(r2_score(y1_xTest_CS, np.where(grid.best_estimator_.predict(X_xTest_CS1)<0,0,grid.best_estimator_.predict(X_xTest_CS1)))) print(RMSLE(np.where(y1_xTest_CS<=0,0.001,y1_xTest_CS), np.where(grid.best_estimator_.predict(X_xTest_CS1)<=0,0.001,grid.best_estimator_.predict(X_xTest_CS1)))) xgb2 = XGBRegressor(nthread=-1,n_jobs=-1) grid1 = GridSearchCV(xgb2, params) grid1.fit(X_xTrain_CS1, y2_xTrain_CS) print(r2_score(y2_xTest_CS, np.where(grid1.best_estimator_.predict(X_xTest_CS1)<0,0,grid1.best_estimator_.predict(X_xTest_CS1)))) print(RMSLE(np.where(y2_xTest_CS<=0,0.001,y2_xTest_CS), np.where(grid1.best_estimator_.predict(X_xTest_CS1)<=0,0.001,grid1.best_estimator_.predict(X_xTest_CS1)))) print(r2_score(y2_xTest_CS, grid1.best_estimator_.predict(X_xTest_CS1))) print(RMSLE(y2_xTest_CS, grid1.best_estimator_.predict(X_xTest_CS1))) y2_xpred = grid1.best_estimator_.predict(X_xTest_CS1)
COVID19 Global Forecasting (Week 3)
8,755,132
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()<merge>
X_xTrain_CS1_bk=X_xTrain_CS1.copy() X_xTrain_CS12=X_xTrain_CS.copy() all_val=oh1.transform(X_xTrain_CS12.Country.values.reshape(X_xTrain_CS12.Country.shape[0],1)) col=['cnty_'+str(k)for k in range(all_val.shape[1])] X_xTrain_CS12=pd.concat([X_xTrain_CS12,pd.DataFrame(all_val,columns=col)],axis=1) all_val_State=oh2.transform(X_xTrain_CS12.State.values.reshape(X_xTrain_CS12.State.shape[0],1)) col_State=['state_'+str(k)for k in range(all_val_State.shape[1])] X_xTrain_CS12=pd.concat([X_xTrain_CS12,pd.DataFrame(all_val_State,columns=col_State)],axis=1) X_xTrain_CS12=X_xTrain_CS12[X_xTrain_CS1.columns].copy() X_xTrain_CS12['Date_ACT']=X_xTrain_CS['Date_ACT'] X_xTrain_CS12['Fatalities']=X_xTrain_CS['Fatalities'] X_xTrain_CS12['ConfirmedCases']=X_xTrain_CS['ConfirmedCases']
COVID19 Global Forecasting (Week 3)
8,755,132
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
for k in range(60): X_xTrain_CS12 prev=X_xTrain_CS12[X_xTrain_CS12['Date_ACT']==X_xTrain_CS12.Date_ACT.max() ] lt=[ 'lag_1_ConfirmedCases','lag_2_ConfirmedCases', 'lag_3_ConfirmedCases', 'lag_4_ConfirmedCases', 'lag_5_ConfirmedCases', 'lag_6_ConfirmedCases', 'lag_7_ConfirmedCases', 'lag_8_ConfirmedCases', 'lag_9_ConfirmedCases', 'lag_10_ConfirmedCases', 'lag_11_ConfirmedCases', 'lag_12_ConfirmedCases', 'lag_13_ConfirmedCases', 'lag_14_ConfirmedCases', 'lag_1_Fatalities','lag_2_Fatalities', 'lag_3_Fatalities', 'lag_4_Fatalities', 'lag_5_Fatalities', 'lag_6_Fatalities', 'lag_7_Fatalities', 'lag_8_Fatalities', 'lag_9_Fatalities', 'lag_10_Fatalities', 'lag_11_Fatalities', 'lag_12_Fatalities', 'lag_13_Fatalities', 'lag_14_Fatalities'] lt1=['ConfirmedCases','lag_1_ConfirmedCases', 'lag_2_ConfirmedCases', 'lag_3_ConfirmedCases', 'lag_4_ConfirmedCases', 'lag_5_ConfirmedCases', 'lag_6_ConfirmedCases', 'lag_7_ConfirmedCases', 'lag_8_ConfirmedCases', 'lag_9_ConfirmedCases', 'lag_10_ConfirmedCases', 'lag_11_ConfirmedCases', 'lag_12_ConfirmedCases', 'lag_13_ConfirmedCases', 'Fatalities', 'lag_1_Fatalities', 'lag_2_Fatalities', 'lag_3_Fatalities', 'lag_4_Fatalities', 'lag_5_Fatalities', 'lag_6_Fatalities', 'lag_7_Fatalities', 'lag_8_Fatalities', 'lag_9_Fatalities', 'lag_10_Fatalities', 'lag_11_Fatalities', 'lag_12_Fatalities', 'lag_13_Fatalities'] prev[lt]=X_xTrain_CS12[X_xTrain_CS12['Date_ACT']==X_xTrain_CS12.Date_ACT.max() ][lt1] cc=grid.best_estimator_.predict(X_xTrain_CS12[X_xTrain_CS12['Date_ACT']==X_xTrain_CS12.Date_ACT.max() ][ ['State', 'Country']+k_log_col+col+col_State]) prev['ConfirmedCases']=prev['lag_1_ConfirmedCases']+np.where(cc<=0,0.001,cc) Fl=grid1.best_estimator_.predict(X_xTrain_CS12[X_xTrain_CS12['Date_ACT']==X_xTrain_CS12.Date_ACT.max() ][ ['State', 'Country']+k_log_col+col+col_State]) prev['Fatalities']=prev['lag_1_Fatalities']+np.where(Fl<=0,0.001,Fl) prev['Date_ACT']=X_xTrain_CS12.Date_ACT.max() + datetime.timedelta(days=1) prev.index=prev.index+1 X_xTrain_CS12=X_xTrain_CS12.append(prev)
COVID19 Global Forecasting (Week 3)
8,755,132
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
X_xTrain_CS2=X_xTrain_CS12.copy()
COVID19 Global Forecasting (Week 3)
8,755,132
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()<drop_column>
X_xTrain_CS2['Country_1']=lec.inverse_transform(X_xTrain_CS2['Country']) X_xTrain_CS2['State_1']=les.inverse_transform(X_xTrain_CS2['State'] )
COVID19 Global Forecasting (Week 3)
8,755,132
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()<groupby>
xtest = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') xsubmission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv') xtest.rename(columns={'Country_Region':'Country'}, inplace=True) xtest.rename(columns={'Province_State':'State'}, inplace=True) EMPTY_VAL = "NA" def fillState(state, country): if state == EMPTY_VAL: return country return state xtest['Date'] = pd.to_datetime(xtest['Date'], infer_datetime_format=True) xtest['State'].fillna(EMPTY_VAL, inplace=True) xtest['State'] = xtest.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) xtest
COVID19 Global Forecasting (Week 3)
8,755,132
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()<merge>
xtest1=xtest.merge(X_xTrain_CS2,left_on=['State','Country','Date'],right_on=['State_1','Country_1','Date_ACT'],how='inner') xtest1
COVID19 Global Forecasting (Week 3)
8,755,132
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()<merge>
xtest1[(xtest1['Country_x']=='US')&(xtest1['State_x']=='New York')][['Date','ConfirmedCases','Fatalities']]
COVID19 Global Forecasting (Week 3)
8,755,132
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()<feature_engineering>
xtest1[(xtest1['Country_x']=='India')&(xtest1['State_x']=='India')][['Date','ConfirmedCases','Fatalities']]
COVID19 Global Forecasting (Week 3)
8,755,132
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test )<drop_column>
xtest1[['Fatalities','ConfirmedCases','ForecastId']].to_csv('submission.csv', index=False) print("Submission file Created....." )
COVID19 Global Forecasting (Week 3)
8,779,099
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering>
import plotly.graph_objects as go import matplotlib.pyplot as plt from tqdm import tqdm import time from datetime import datetime from pathlib import Path from sklearn import preprocessing import keras.backend as K from keras.models import Sequential from keras.layers import Dense, LSTM, RNN, Dropout from keras.callbacks import EarlyStopping from keras import optimizers from sklearn.preprocessing import StandardScaler, MinMaxScaler
COVID19 Global Forecasting (Week 3)
8,779,099
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )<concatenate>
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv") train.tail()
COVID19 Global Forecasting (Week 3)
8,779,099
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()<categorify>
mask = train['Date'].max() world_cum_confirmed = sum(train[train['Date'] == mask].ConfirmedCases) world_cum_fatal = sum(train[train['Date'] == mask].Fatalities )
COVID19 Global Forecasting (Week 3)
8,779,099
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()<drop_column>
print('Number of Countires are: ', len(train['Country_Region'].unique())) print('Training dataset ends at: ', mask) print('Number of cumulative confirmed cases worldwide are: ', world_cum_confirmed) print('Number of cumulative fatal cases worldwide are: ', world_cum_fatal )
COVID19 Global Forecasting (Week 3)
8,779,099
target = 'result' features = data.columns.values.tolist() features.remove(target )<train_on_grid>
cum_per_country = train[train['Date'] == mask].groupby(['Date','Country_Region'] ).sum().sort_values(['ConfirmedCases'], ascending=False) cum_per_country[:10]
COVID19 Global Forecasting (Week 3)
8,779,099
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )<train_model>
date = train['Date'].unique() cc_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().ConfirmedCases ft_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().Fatalities cc_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().ConfirmedCases ft_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().Fatalities cc_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().ConfirmedCases ft_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().Fatalities cc_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().ConfirmedCases ft_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().Fatalities cc_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().ConfirmedCases ft_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().Fatalities fig = go.Figure() fig.add_trace(go.Scatter(x=date, y=cc_us, name='US')) fig.add_trace(go.Scatter(x=date, y=cc_ity, name='Italy')) fig.add_trace(go.Scatter(x=date, y=cc_spn, name='Spain')) fig.add_trace(go.Scatter(x=date, y=cc_gmn, name='Germany')) fig.add_trace(go.Scatter(x=date, y=cc_frc, name='France')) fig.update_layout(title="Plot of Cumulative Cases for Top 5 countires(except China)", xaxis_title="Date", yaxis_title="Cases") fig.update_xaxes(nticks=30) fig.show()
COVID19 Global Forecasting (Week 3)
8,779,099
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<create_dataframe>
train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train['Country_Region'] = train['Country_Region'].astype(str) test['Country_Region'] = test['Country_Region'].astype(str)
COVID19 Global Forecasting (Week 3)
8,779,099
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv>
EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state train['Province_State'].fillna(EMPTY_VAL, inplace=True) train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1 )
COVID19 Global Forecasting (Week 3)
8,779,099
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred submission_df<save_to_csv>
le = preprocessing.LabelEncoder() train['country_encoder'] = le.fit_transform(train['Country_Region']) train['date_int'] = train['Date'].apply(lambda x: datetime.strftime(x, '%m%d')).astype(int) test['country_encoder'] = le.transform(test['Country_Region']) test['date_int'] = test['Date'].apply(lambda x: datetime.strftime(x, '%m%d')).astype(int )
COVID19 Global Forecasting (Week 3)
8,779,099
submission_df.to_csv('submission.csv', index=False )<set_options>
le = preprocessing.LabelEncoder() train['province_encoder'] = le.fit_transform(train['Province_State']) test['province_encoder'] = le.transform(test['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,779,099
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )<train_model>
start_time = time.time() country = train['Country_Region'].drop_duplicates() train_df = train.copy() train_df.rename(columns={'Date': 'date', 'ConfirmedCases': 'cc_cases', 'Fatalities': 'ft_cases', 'Country_Region': 'country', 'Province_State': 'province'}, inplace=True) lags = np.arange(1,8,1) with tqdm(total = len(list(train_df['date'].unique())))as pbar: for d in train_df['date'].drop_duplicates() : for i in country: province = train_df[train_df['country'] == i]['province'].drop_duplicates() for j in province: mask =(train_df['date'] == d)&(train_df['country'] == i)&(train_df['province'] == j) for lag in lags: mask_org =(train_df['date'] ==(d - pd.Timedelta(days=lag)))&(train_df['country'] == i)&(train_df['province'] == j) try: train_df.loc[mask, 'cc_cases_' + str(lag)] = train_df.loc[mask_org, 'cc_cases'].values except: train_df.loc[mask, 'cc_cases_' + str(lag)] = 0 try: train_df.loc[mask, 'ft_cases_' + str(lag)] = train_df.loc[mask_org, 'ft_cases'].values except: train_df.loc[mask, 'ft_cases_' + str(lag)] = 0 pbar.update(1) print('Time spent for building features is {} minutes'.format(round(( time.time() -start_time)/60,1)) )
COVID19 Global Forecasting (Week 3)
8,779,099
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df<train_model>
train_df = train_df[train_df['date_int']>=301] train_df['weekday'] = train_df['date'].dt.weekday train_df[train_df['country'] == 'Italy'].tail(10 )
COVID19 Global Forecasting (Week 3)
8,779,099
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params<train_model>
def split_train_val(df, val_ratio): val_len = int(len(df)* val_ratio) train_set = df[:-val_len] val_set = df[-val_len:] return train_set, val_set
COVID19 Global Forecasting (Week 3)
8,779,099
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params<normalization>
test_fixed_cols = ['ForecastId', 'Province_State', 'Country_Region', 'Date'] fixed_cols = ['Id', 'province', 'country', 'date'] output_cols = ['cc_cases', 'ft_cases'] input_cols = list(set(train_df.columns.to_list())- set(fixed_cols)- set(output_cols)) print('output columns are ', output_cols) print('input columns are ', input_cols) X = train_df[input_cols] y = train_df[output_cols]
COVID19 Global Forecasting (Week 3)
8,779,099
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs<train_model>
cc_input = ['country_encoder', 'province_encoder', 'weekday', 'date_int','cc_cases_1', 'cc_cases_2', 'cc_cases_3', 'cc_cases_4', 'cc_cases_5', 'cc_cases_6', 'cc_cases_7'] ft_input = ['country_encoder', 'province_encoder', 'weekday' , 'date_int', 'ft_cases_1', 'ft_cases_2', 'ft_cases_3', 'ft_cases_4', 'ft_cases_5', 'ft_cases_6', 'ft_cases_7'] cc_output = ['cc_cases'] ft_output = ['ft_cases'] val_ratio = 0.05 X_cc = X[cc_input] X_ft = X[ft_input] y_cc = y[cc_output] y_ft = y[ft_output] train_X_cc, val_X_cc = split_train_val(df = X_cc, val_ratio = val_ratio) train_y_cc, val_y_cc = split_train_val(df = y_cc, val_ratio = val_ratio) train_X_ft, val_X_ft = split_train_val(df = X_ft, val_ratio = val_ratio) train_y_ft, val_y_ft = split_train_val(df = y_ft, val_ratio = val_ratio )
COVID19 Global Forecasting (Week 3)
8,779,099
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 128, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 128, 'epochs': 80 } return params<load_from_csv>
COVID19 Global Forecasting (Week 3)
8,779,099
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )<feature_engineering>
COVID19 Global Forecasting (Week 3)
8,779,099
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()<load_from_csv>
X_train_cc = train_X_cc.to_numpy() X_val_cc = val_X_cc.to_numpy() X_train_ft = train_X_ft.to_numpy() X_val_ft = val_X_ft.to_numpy() y_train_cc = train_y_cc.to_numpy() y_val_cc = val_y_cc.to_numpy() y_train_ft = train_y_ft.to_numpy() y_val_ft = val_y_ft.to_numpy()
COVID19 Global Forecasting (Week 3)
8,779,099
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()<feature_engineering>
def root_mean_squared_log_error(y_true, y_pred): return K.sqrt(K.mean(K.square(K.log(y_pred + 1)- K.log(y_true + 1))))
COVID19 Global Forecasting (Week 3)
8,779,099
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()<merge>
def LSTM_model(n_1, input_dim, output_dim): model = Sequential() model.add(LSTM(n_1,input_shape=(1, input_dim), activation='relu')) model.add(Dense(output_dim, activation='relu')) model.compile(loss=root_mean_squared_log_error, optimizer='adam') return model
COVID19 Global Forecasting (Week 3)
8,779,099
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
K.clear_session() model_cc = LSTM_model(4, X_train_cc.shape[-1], y_train_cc.shape[-1]) model_ft = LSTM_model(4, X_train_ft.shape[-1], y_train_ft.shape[-1]) early_stop_cc = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min') early_stop_ft = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min' )
COVID19 Global Forecasting (Week 3)
8,779,099
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
print('Start model training') start_time = time.time() history_cc = model_cc.fit(X_train_cc, y_train_cc, batch_size = 16, epochs = 100,validation_data =(X_val_cc, y_val_cc), verbose = 2, callbacks=[early_stop_cc]) model_cc.save("model_cc.h5") print('Time spent for model training is {} minutes'.format(round(( time.time() -start_time)/60,1)) )
COVID19 Global Forecasting (Week 3)
8,779,099
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()<drop_column>
print('Start model training') start_time = time.time() history_ft = model_ft.fit(X_train_ft, y_train_ft, batch_size = 16, epochs = 8,validation_data =(X_val_ft, y_val_ft), verbose = 2, callbacks=[early_stop_ft]) model_ft.save("model_ft.h5") print('Time spent for model training is {} minutes'.format(round(( time.time() -start_time)/60,1)) )
COVID19 Global Forecasting (Week 3)
8,779,099
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()<groupby>
yhat_val_cc = model_cc.predict(X_val_cc) print(yhat_val_cc[50:70] )
COVID19 Global Forecasting (Week 3)
8,779,099
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()<merge>
yhat_val_ft = model_ft.predict(X_val_ft) print(yhat_val_ft[60:70] )
COVID19 Global Forecasting (Week 3)
8,779,099
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()<merge>
submission = pd.DataFrame() submission['ForecastId'] = test_df['ForecastId'] submission['ConfirmedCases'] = test_df['cc_cases'] submission['Fatalities'] = test_df['ft_cases']
COVID19 Global Forecasting (Week 3)
8,779,099
<feature_engineering><EOS>
submission.to_csv("submission.csv",index=False )
COVID19 Global Forecasting (Week 3)
8,810,090
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<drop_column>
import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.arima_model import ARIMA from random import random
COVID19 Global Forecasting (Week 3)
8,810,090
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering>
PATH_WEEK2='/kaggle/input/covid19-global-forecasting-week-3' df_train = pd.read_csv(f'{PATH_WEEK2}/train.csv') df_test = pd.read_csv(f'{PATH_WEEK2}/test.csv') df_train.head() df_test.head() df_train.rename(columns={'Country_Region':'Country'}, inplace=True) df_test.rename(columns={'Country_Region':'Country'}, inplace=True) df_train.rename(columns={'Province_State':'State'}, inplace=True) df_test.rename(columns={'Province_State':'State'}, inplace=True) df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True) df_train.info() df_test.info() y1_Train = df_train.iloc[:, -2] y1_Train.head() y2_Train = df_train.iloc[:, -1] y2_Train.head() EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state
COVID19 Global Forecasting (Week 3)
8,810,090
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )<concatenate>
X_Train = df_train.copy() X_Train['State'].fillna(EMPTY_VAL, inplace=True) X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Train.loc[:, 'Date'] = X_Train.Date.dt.strftime("%m%d") X_Train["Date"] = X_Train["Date"].astype(int) X_Train.head() X_Test = df_test.copy() X_Test['State'].fillna(EMPTY_VAL, inplace=True) X_Test['State'] = X_Test.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Test.loc[:, 'Date'] = X_Test.Date.dt.strftime("%m%d") X_Test["Date"] = X_Test["Date"].astype(int) X_Test.head()
COVID19 Global Forecasting (Week 3)
8,810,090
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()<categorify>
le = preprocessing.LabelEncoder() X_Train.Country = le.fit_transform(X_Train.Country) X_Train['State'] = le.fit_transform(X_Train['State']) X_Train.head() X_Test.Country = le.fit_transform(X_Test.Country) X_Test['State'] = le.fit_transform(X_Test['State']) X_Test.head() df_train.head() df_train.loc[df_train.Country == 'Afghanistan', :] df_test.tail()
COVID19 Global Forecasting (Week 3)
8,810,090
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()<drop_column>
filterwarnings('ignore') le = preprocessing.LabelEncoder() countries = X_Train.Country.unique() df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in countries: states = X_Train.loc[X_Train.Country == country, :].State.unique() for state in states: X_Train_CS = X_Train.loc[(X_Train.Country == country)&(X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases'] y2_Train_CS = X_Train_CS.loc[:, 'Fatalities'] X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']] X_Train_CS.Country = le.fit_transform(X_Train_CS.Country) X_Train_CS['State'] = le.fit_transform(X_Train_CS['State']) X_Test_CS = X_Test.loc[(X_Test.Country == country)&(X_Test.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId'] X_Test_CS = X_Test_CS.loc[:, ['State', 'Country', 'Date']] X_Test_CS.Country = le.fit_transform(X_Test_CS.Country) X_Test_CS['State'] = le.fit_transform(X_Test_CS['State']) model1 = XGBRegressor(n_estimators=1000) model1.fit(X_Train_CS, y1_Train_CS) y1_pred = model1.predict(X_Test_CS) model2 = XGBRegressor(n_estimators=1000) model2.fit(X_Train_CS, y2_Train_CS) y2_pred = model2.predict(X_Test_CS) df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred}) df_out = pd.concat([df_out, df], axis=0)
COVID19 Global Forecasting (Week 3)
8,810,090
<train_on_grid><EOS>
df_out.ForecastId = df_out.ForecastId.astype('int') df_out.tail() df_out.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,794,000
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<train_model>
folder = '/kaggle/input/covid19-global-forecasting-week-3/' submission = pd.read_csv(folder+'submission.csv',index_col=0) test = pd.read_csv(folder+'test.csv',index_col=0) test['Date'] = pd.to_datetime(test['Date'],format='%Y-%m-%d') test['Province_State'] = test['Province_State'].fillna(value='NaN') data_cases = format_kaggle(folder,'ConfirmedCases') data_deaths = format_kaggle(folder,'Fatalities' )
COVID19 Global Forecasting (Week 3)
8,794,000
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<create_dataframe>
params_cases = fit_all(data_cases,plot=True,ylabel='Confirmed Cases',p0=1e2,prior=(8,500)) params_cases.to_csv('params_cases.csv' )
COVID19 Global Forecasting (Week 3)
8,794,000
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv>
params_deaths = fit_all(data_deaths,plot=True,ylabel='Fatalities',p0=10,prior=(8,200)) params_deaths.to_csv('params_deaths.csv' )
COVID19 Global Forecasting (Week 3)
8,794,000
<save_to_csv><EOS>
for item in submission.index: country = test.loc[item,'Country_Region'] province = test.loc[item,'Province_State'] t_abs = test.loc[item,'Date'] t =(t_abs-tref)/pd.to_timedelta(1,unit='days') th, logK, sigma = params_cases[['th','logK','sigma']].loc[country,province] if not np.isnan(th): tau =(t-th)/(np.sqrt(2)*sigma) submission.loc[item,'ConfirmedCases'] = np.exp(logK)*(1+erf(tau)) /2 else: if t_abs in data_cases.index: submission.loc[item,'ConfirmedCases'] = data_cases[country,province].loc[t_abs] else: submission.loc[item,'ConfirmedCases'] = data_cases[country,province].max() th, logK, sigma = params_deaths[['th','logK','sigma']].loc[country,province] if not np.isnan(th): tau =(t-th)/(np.sqrt(2)*sigma) submission.loc[item,'Fatalities'] = np.exp(logK)*(1+erf(tau)) /2 else: if t_abs in data_deaths.index: submission.loc[item,'Fatalities'] = data_deaths[country,province].loc[t_abs] else: submission.loc[item,'Fatalities'] = data_deaths[country,province].max() submission.to_csv('submission.csv' )
COVID19 Global Forecasting (Week 3)
8,825,784
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<load_from_csv>
warnings.filterwarnings('ignore') daybasecount = 4 baseday = 98 - float(daybasecount-1)/2. exponent = 1./float(daybasecount) fatalityBaseDayShift = 10 maxincrease = 140 maxDeadPrDay = 1500
COVID19 Global Forecasting (Week 3)
8,825,784
path = '.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/' tourney_result = pd.read_csv(path + 'WDataFiles_Stage1/WNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv(path + 'WDataFiles_Stage1/WNCAATourneySeeds.csv') season_result = pd.read_csv(path + 'WDataFiles_Stage1/WRegularSeasonCompactResults.csv' )<load_from_csv>
dftrain = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date']) dftest = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date']) ppp_tabel = pd.read_csv('.. /input/country-ppp/Country_PPP.csv', sep='\s+') ppp_tabel.drop('Id', 1,inplace=True) ppp_tabel = ppp_tabel.append({'Country' : 'Burma' , 'ppp' : 8000} , ignore_index=True) ppp_tabel = ppp_tabel.append({'Country' : 'MS_Zaandam' , 'ppp' : 40000} , ignore_index=True) ppp_tabel = ppp_tabel.append({'Country' : 'West_Bank_and_Gaza' , 'ppp' : 20000} , ignore_index=True) ppp_tabel["Country"].replace('_',' ', regex=True,inplace=True) ppp_tabel["Country"].replace('United States','US', regex=True,inplace=True) ppp_tabel.rename(columns={'Country':'Country_Region'},inplace=True) ppp_tabel.sort_values('Country_Region',inplace=True )
COVID19 Global Forecasting (Week 3)
8,825,784
test_df= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv' )<drop_column>
dftrain['Dayofyear'] = dftrain['Date'].dt.dayofyear dftest['Dayofyear'] = dftest['Date'].dt.dayofyear dftest['Expo'] = dftest['Dayofyear']-baseday print(dftrain.tail(5)) dftest = dftest.merge(dftrain[['Country_Region','Province_State','Date','ConfirmedCases','Fatalities']] , on=['Country_Region','Province_State','Date'], how='left', indicator=True)
COVID19 Global Forecasting (Week 3)
8,825,784
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result<merge>
grouped=dftrain.groupby(['Country_Region','Province_State'] ).tail(daybasecount*2) grouped=grouped.groupby(['Country_Region','Province_State'] ).head(daybasecount) grouped.drop(['FatalityBasis'],axis=1,inplace=True) to_sum = ['NewCases','NewFatalities'] grouped1 = grouped.groupby(['Country_Region'])[to_sum].sum() grouped1.rename(columns={'NewCases':'NewCases1','NewFatalities':'NewFatalities1'}, inplace=True) grouped = pd.merge(grouped1, grouped2, on=['Country_Region']) grouped['CasesIncreasePct'] = 100*(grouped['NewCases2']/grouped['NewCases1']-1) mask = grouped['CasesIncreasePct'] > maxincrease grouped.loc[mask,'CasesIncreasePct'] = maxincrease mask = grouped['CasesIncreasePct'] < 0 grouped.loc[mask,'CasesIncreasePct'] = 0 mask = grouped['CasesIncreasePct'].isnull() grouped.loc[mask,'CasesIncreasePct'] = 0 grouped['Factor'] =(grouped['CasesIncreasePct']/100+1)**exponent grouped = pd.merge(grouped, ppp_tabel, on=['Country_Region']) grouped['ppp'] = grouped['ppp']/10000. if False: mask =(grouped['FatalityPct2'] > 9)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 5 mask =(grouped['FatalityPct2'] < 5)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 5 mask =(grouped['FatalityPct2'] > 6)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 6 mask =(grouped['FatalityPct2'] < 1.5)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 1.5 mask =(grouped['FatalityPct2'] >(9.5 - 0.43*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(9.5 - 0.43*grouped['ppp']) mask =(grouped['FatalityPct2'] <(5.6 - 0.5*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(5.6 - 0.5*grouped['ppp']) mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 7 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 4 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(7.5 - 0.5*grouped['ppp']) else: mask =(grouped['FatalityPct2'] > 4)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 3 mask =(grouped['FatalityPct2'] < 1)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 2 mask =(grouped['FatalityPct2'] > 1.5)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 1.5 mask =(grouped['FatalityPct2'] < 0.5)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 0.5 mask =(grouped['FatalityPct2'] >(4.5 - 0.43*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(4.5 - 0.43*grouped['ppp']) mask =(grouped['FatalityPct2'] <(1.1 - 0.1*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(1.1 - 0.1*grouped['ppp']) mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 3 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 1 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(2.6 - 0.23*grouped['ppp'] )
COVID19 Global Forecasting (Week 3)
8,825,784
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result<categorify>
dftest.drop('_merge',axis=1,inplace= True) dftest = dftest.merge(grouped[['Country_Region','FatalityPct2','Factor']], on=['Country_Region'], how='left') dftest = dftest.merge(grouped_gem[['Province_State','Country_Region','ConfirmedCases_base','ConfirmedCases_init','NewCases_base','Fatalities_init','FatalityBasis']], on=['Province_State','Country_Region'], how='left')
COVID19 Global Forecasting (Week 3)
8,825,784
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) tourney_result<concatenate>
dftest['ConfirmedCases_shift'] = dftest.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases']].transform(lambda x: x.shift(1)) mask = dftest['ConfirmedCases'].isnull() dftest.loc[mask,'NewCases'] = dftest.loc[mask,'NewCases_base']*(dftest.loc[mask,'Factor']**dftest.loc[mask,'Expo']) dftest['NewCases_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewCases']].cumsum() dftest.loc[mask,'ConfirmedCases'] = dftest.loc[mask,'ConfirmedCases_init'] + dftest.loc[mask,'NewCases_cum'] mask3 = dftest['ConfirmedCases'] > 400000 dftest.loc[mask3,'FatalityPct2'] = dftest.loc[mask3,'FatalityPct2']*0.7 mask4 = dftest['ConfirmedCases'] > 800000 dftest.loc[mask4,'FatalityPct2'] = dftest.loc[mask4,'FatalityPct2']*0.7 dftest['FatalityBasis'] = dftest.groupby(['Country_Region', 'Province_State'])[ ['ConfirmedCases']].transform(lambda x: x.shift(10)) dftest.loc[mask,'NewFatalities'] = dftest.loc[mask,'FatalityBasis'] * dftest.loc[mask,'FatalityPct2']/100 mask2 = dftest['NewFatalities'] > maxDeadPrDay dftest.loc[mask2,'NewFatalities'] = maxDeadPrDay dftest['NewFatalities_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].cumsum() dftest.loc[mask,'Fatalities'] = dftest.loc[mask,'Fatalities_init'] + dftest.loc[mask,'NewFatalities_cum']
COVID19 Global Forecasting (Week 3)
8,825,784
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result<groupby>
dftest.drop(['Dayofyear', 'Expo','FatalityPct2', 'Factor', 'ConfirmedCases_base', 'ConfirmedCases_init', 'NewCases_base', 'Fatalities_init', 'FatalityBasis', 'ConfirmedCases_shift', 'NewCases', 'NewCases_cum', 'NewFatalities','NewFatalities_cum'],axis=1,inplace=True) final = dftest.groupby(['Country_Region','Province_State'] ).tail(1) dftest.drop(['Province_State'],axis=1,inplace=True) dftest.rename(columns={'Province_State_orig':'Province_State'},inplace=True )
COVID19 Global Forecasting (Week 3)
8,825,784
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score<merge>
mask = dftest["ConfirmedCases"].isnull() print(mask.sum()) errors = dftest.loc[mask] print(errors) mask = dftest["Fatalities"].isnull() print(mask.sum()) errors = dftest.loc[mask] print(errors) dftest.drop(['Province_State','Country_Region','Date'],axis=1,inplace=True) print("dftest columns =",dftest.columns)
COVID19 Global Forecasting (Week 3)
8,825,784
<drop_column><EOS>
dftest.ForecastId = dftest.ForecastId.astype('int') dftest['ConfirmedCases'] = dftest['ConfirmedCases'].round().astype(int) dftest['Fatalities'] = dftest['Fatalities'].round().astype(int) dftest.to_csv('submission.csv', index=False)
COVID19 Global Forecasting (Week 3)
8,824,227
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<prepare_output>
%matplotlib inline InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99)
COVID19 Global Forecasting (Week 3)
8,824,227
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result<feature_engineering>
plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 sns.set_palette(sns.color_palette('tab20', 20))
COVID19 Global Forecasting (Week 3)
8,824,227
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']<feature_engineering>
COMP = '.. /input/covid19-global-forecasting-week-3' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1
COVID19 Global Forecasting (Week 3)
8,824,227
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df<merge>
train[train.geo_region.isna() ].Country_Region.unique() train = train.fillna(' test = test.fillna(' train[train.duplicated(['Date', 'Location'])] train.count()
COVID19 Global Forecasting (Week 3)
8,824,227
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df<feature_engineering>
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby([ 'Country_Region', 'continent', 'geo_region', 'country_iso_code_3'] ).sum() [[ 'ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state.to_csv('countries_latest_state.csv', index=False) countries_latest_state.shape countries_latest_state.head()
COVID19 Global Forecasting (Week 3)
8,824,227
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df<concatenate>
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]
COVID19 Global Forecasting (Week 3)
8,824,227
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result<prepare_x_and_y>
regional_progress = train_clean.groupby(['DateTime', 'continent'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() regional_progress['Log10Confirmed'] = np.log10(regional_progress.ConfirmedCases + 1) regional_progress['Log10Fatalities'] = np.log10(regional_progress.Fatalities + 1) regional_progress = regional_progress[regional_progress.continent != '
COVID19 Global Forecasting (Week 3)
8,824,227
X_train = tourney_result.drop('result', axis=1) y_train = tourney_result.result<import_modules>
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )
COVID19 Global Forecasting (Week 3)
8,824,227
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc<init_hyperparams>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head() daily_confirmed_deltas.to_csv('daily_confirmed_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,824,227
params_lgb = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': 50, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": -1, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, } params_xgb = {'colsample_bytree': 0.8, 'learning_rate': 0.0004, 'max_depth': 31, 'subsample': 1, 'objective':'binary:logistic', 'eval_metric':'logloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':5000 }<split>
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )
COVID19 Global Forecasting (Week 3)
8,824,227
NFOLDS = 200 folds = KFold(n_splits=NFOLDS) columns = X_train.columns splits = folds.split(X_train, y_train )<define_variables>
DECAY = 0.95 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.07 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.continent==' confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.09 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.008 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.02 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Czechia', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Serbia', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Romania', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Hungary', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Poland', 'DELTA'] = 0.09 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Luxembourg', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.045 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Thailand', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Belarus', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='South Africa', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Egypt', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Moldova', 'DELTA'] = 0.11 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.11 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Mexico', 'DELTA'] = 0.11 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Peru', 'DELTA'] = 0.11 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.13 confirmed_deltas.loc[confirmed_deltas.Country_Region=='India', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Pakistan', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='United Arab Emirates', 'DELTA'] = 0.13 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.09 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.loc[confirmed_deltas.Location=='US-New York', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Location=='US-Washington', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Location=='Netherlands-', 'DELTA'] = 0.06 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,824,227
y_preds_lgb = np.zeros(test_df.shape[0]) y_oof_lgb = np.zeros(X_train.shape[0] )<train_model>
daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) daily_log_confirmed.to_csv('daily_log_confirmed.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,824,227
for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train1, X_valid1 = X_train[columns].iloc[train_index], X_train[columns].iloc[valid_index] y_train1, y_valid1 = y_train.iloc[train_index], y_train.iloc[valid_index] dtrain = lgb.Dataset(X_train1, label=y_train1) dvalid = lgb.Dataset(X_valid1, label=y_valid1) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) y_pred_valid = clf.predict(X_valid1) y_oof_lgb[valid_index] = y_pred_valid y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train1, X_valid1, y_train1, y_valid1 gc.collect()<prepare_x_and_y>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head() daily_death_deltas.to_csv('daily_death_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,824,227
del X_train2, X_valid2, y_train2, y_valid2 gc.collect()<load_from_csv>
death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.07 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.continent=='Africa', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.025 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.03 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.10 death_deltas.loc[death_deltas.Country_Region=='Sweden', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Belgium', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Country_Region=='Netherlands', 'DELTA'] = 0.10 death_deltas.loc[death_deltas.Country_Region=='Poland', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Hungary', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Romania', 'DELTA'] = 0.11 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.02 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.045 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.045 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.09 death_deltas.loc[death_deltas.Country_Region=='Ireland', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.10 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Saudi Arabia', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Location=='Netherlands-', 'DELTA'] = 0.09 death_deltas.loc[death_deltas.Country_Region=='Mexico', 'DELTA'] = 0.18 death_deltas.loc[death_deltas.Country_Region=='Peru', 'DELTA'] = 0.18 death_deltas.loc[death_deltas.Country_Region=='India', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Pakistan', 'DELTA'] = 0.16 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,824,227
submission_df = pd.read_csv(path + 'WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds_lgb submission_df<save_to_csv>
daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) daily_log_deaths.to_csv('daily_log_deaths.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,824,227
submission_df.to_csv('submission.csv', index=False )<set_options>
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )
COVID19 Global Forecasting (Week 3)
8,824,227
warnings.filterwarnings('ignore' )<load_from_csv>
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head()
COVID19 Global Forecasting (Week 3)
8,824,227
df_features = pd.read_csv('.. /input/walmart-recruiting-store-sales-forecasting/features.csv.zip', sep=',') df_stores = pd.read_csv('.. /input/walmart-recruiting-store-sales-forecasting/stores.csv', sep=',') df_features_stores = df_features.merge(df_stores, how='inner', on='Store') df_features_stores.head()<load_from_csv>
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape
COVID19 Global Forecasting (Week 3)
8,824,227
df_train = pd.read_csv('.. /input/walmart-recruiting-store-sales-forecasting/train.csv.zip', sep=',') train = df_train.merge(df_features_stores, how='inner', on=['Store','Date','IsHoliday']) train.head()<load_from_csv>
end = dt.datetime.now() print('Finished', end,(end - start ).seconds, 's' )
COVID19 Global Forecasting (Week 3)