kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,791,119
y_logreg_train = logreg.predict(train_log) y_logreg_pred = logreg.predict_proba(test_log )<train_model>
xout = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in countries: states = X_xTrain.loc[X_xTrain.Country == country, :].State.unique() for state in states: X_xTrain_CS = X_xTrain.loc[(X_xTrain.Country == country)&(X_xTrain.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y1_xTrain_CS = X_xTrain_CS.loc[:, 'ConfirmedCases'] y2_xTrain_CS = X_xTrain_CS.loc[:, 'Fatalities'] X_xTrain_CS = X_xTrain_CS.loc[:, ['State', 'Country', 'Date']] X_xTrain_CS.Country = le.fit_transform(X_xTrain_CS.Country) X_xTrain_CS['State'] = le.fit_transform(X_xTrain_CS['State']) X_xTest_CS = X_xTest.loc[(X_xTest.Country == country)&(X_xTest.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_xTest_CS_Id = X_xTest_CS.loc[:, 'ForecastId'] X_xTest_CS = X_xTest_CS.loc[:, ['State', 'Country', 'Date']] X_xTest_CS.Country = le.fit_transform(X_xTest_CS.Country) X_xTest_CS['State'] = le.fit_transform(X_xTest_CS['State']) xmodel1 = XGBRegressor(n_estimators=1000) xmodel1.fit(X_xTrain_CS, y1_xTrain_CS) y1_xpred = xmodel1.predict(X_xTest_CS) xmodel2 = XGBRegressor(n_estimators=1000) xmodel2.fit(X_xTrain_CS, y2_xTrain_CS) y2_xpred = xmodel2.predict(X_xTest_CS) xdata = pd.DataFrame({'ForecastId': X_xTest_CS_Id, 'ConfirmedCases': y1_xpred, 'Fatalities': y2_xpred}) xout = pd.concat([xout, xdata], axis=0)
COVID19 Global Forecasting (Week 3)
8,791,119
clf = RandomForestClassifier(n_estimators=200,max_depth=90,min_samples_leaf=300,min_samples_split=200,max_features=5) clf.fit(train_log, y) clf_probs = clf.predict_proba(test_log )<prepare_output>
xout.ForecastId = xout.ForecastId.astype('int') xout.tail() xout.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,827,384
y_pred_df_random = pd.DataFrame(clf_probs) y_pred_1 = y_pred_df_random.iloc[:,[1]] y_pred_df_random<import_modules>
sub = pd.read_csv(".. /input/subsub/submission1.csv") sub.to_csv("submission.csv",index=False )
COVID19 Global Forecasting (Week 3)
8,823,854
from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV<load_from_csv>
input1 = pd.read_csv('/kaggle/input/final1/submit.csv' )
COVID19 Global Forecasting (Week 3)
8,823,854
<save_to_csv><EOS>
input1.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,820,034
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<load_from_csv>
InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99) sns.set_palette(sns.color_palette('tab20', 20))
COVID19 Global Forecasting (Week 3)
8,820,034
tourney_result = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyDetailedResults.csv') tourney_seed = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') season_result = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonDetailedResults.csv') test_df = pd.read_csv('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )<drop_column>
COMP = '.. /input/covid19-global-forecasting-week-3' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1
COVID19 Global Forecasting (Week 3)
8,820,034
season_win_result = season_result[['Season', 'WTeamID', 'WScore', 'WFGM', 'WFGA', 'WFGM3', 'WFGA3', 'WFTM', 'WFTA', 'WOR', 'WDR', 'WAst', 'WTO', 'WStl', 'WBlk', 'WPF']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore', 'LFGM', 'LFGA', 'LFGM3', 'LFGA3', 'LFTM', 'LFTA', 'LOR', 'LDR', 'LAst', 'LTO', 'LStl', 'LBlk', 'LPF']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score', 'WFGM':'FGM', 'WFGA':'FGA', 'WFGM3':'FGM3', 'WFGA3':'FGA3', 'WFTM':'FTM', 'WFTA':'FTA', 'WOR':'OR', 'WDR':'DR', 'WAst':'Ast', 'WTO':'TO', 'WStl':'Stl', 'WBlk':'Blk', 'WPF':'PF'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score', 'LFGM':'FGM', 'LFGA':'FGA', 'LFGM3':'FGM3', 'LFGA3':'FGA3', 'LFTM':'FTM', 'LFTA':'FTA', 'LOR':'OR', 'LDR':'DR', 'LAst':'Ast', 'LTO':'TO', 'LStl':'Stl', 'LBlk':'Blk', 'LPF':'PF'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True )<merge>
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby(['Country_Region'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state['DeathConfirmedRatio'] =(countries_latest_state.Fatalities + 1)/(countries_latest_state.ConfirmedCases + 1) countries_latest_state['DeathConfirmedRatio'] = countries_latest_state['DeathConfirmedRatio'].clip(0, 0.1) countries_latest_state.shape countries_latest_state.head()
COVID19 Global Forecasting (Week 3)
8,820,034
tourney_result['Score_difference'] = tourney_result['WScore'] - tourney_result['LScore'] tourney_result = tourney_result[['Season', 'WTeamID', 'LTeamID', 'Score_difference']] tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result['WSeed'] = tourney_result['WSeed'].apply(lambda x: int(x[1:3])) tourney_result['LSeed'] = tourney_result['LSeed'].apply(lambda x: int(x[1:3])) print(tourney_result.info(null_counts=True))<merge>
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]
COVID19 Global Forecasting (Week 3)
8,820,034
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )<merge>
train_clean['Location'].nunique()
COVID19 Global Forecasting (Week 3)
8,820,034
for col in season_result.columns[2:]: season_result_map_mean = season_result.groupby(['Season', 'TeamID'])[col].mean().reset_index() tourney_result = pd.merge(tourney_result, season_result_map_mean, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={f'{col}':f'W{col}MeanT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_result_map_mean, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={f'{col}':f'L{col}MeanT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_result_map_mean, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={f'{col}':f'W{col}MeanT'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_result_map_mean, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={f'{col}':f'L{col}MeanT'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )<drop_column>
country_progress = train_clean.groupby(['Date', 'DateTime', 'Country_Region'] ).sum() [[ 'ConfirmedCases', 'Fatalities', 'ConfirmedDelta', 'FatalitiesDelta']].reset_index()
COVID19 Global Forecasting (Week 3)
8,820,034
tourney_win_result = tourney_result.drop(['WTeamID', 'LTeamID'], axis=1) for col in tourney_win_result.columns[2:]: if col[0] == 'W': tourney_win_result.rename(columns={f'{col}':f'{col[1:]+"1"}'}, inplace=True) elif col[0] == 'L': tourney_win_result.rename(columns={f'{col}':f'{col[1:]+"2"}'}, inplace=True) tourney_lose_result = tourney_win_result.copy() for col in tourney_lose_result.columns: if col[-1] == '1': col2 = col[:-1] + '2' tourney_lose_result[col] = tourney_win_result[col2] tourney_lose_result[col2] = tourney_win_result[col] tourney_lose_result.columns<feature_engineering>
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )
COVID19 Global Forecasting (Week 3)
8,820,034
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreMeanT_diff'] = tourney_win_result['ScoreMeanT1'] - tourney_win_result['ScoreMeanT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreMeanT_diff'] = tourney_lose_result['ScoreMeanT1'] - tourney_lose_result['ScoreMeanT2'] tourney_lose_result['Score_difference'] = -tourney_lose_result['Score_difference'] tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True )<feature_engineering>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head()
COVID19 Global Forecasting (Week 3)
8,820,034
for col in test_df.columns[2:]: if col[0] == 'W': test_df.rename(columns={f'{col}':f'{col[1:]+"1"}'}, inplace=True) elif col[0] == 'L': test_df.rename(columns={f'{col}':f'{col[1:]+"2"}'}, inplace=True) test_df['Seed1'] = test_df['Seed1'].apply(lambda x: int(x[1:3])) test_df['Seed2'] = test_df['Seed2'].apply(lambda x: int(x[1:3])) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreMeanT_diff'] = test_df['ScoreMeanT1'] - test_df['ScoreMeanT2'] test_df = test_df.drop(['ID', 'Pred', 'Season'], axis=1 )<init_hyperparams>
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )
COVID19 Global Forecasting (Week 3)
8,820,034
features = [x for x in tourney_result.columns if x not in ['result', 'Score_difference', 'Season']] params = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'max_depth': -1, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, } step_size = 20 steps = 250 boosting_rounds = [step_size*(x+1)for x in range(steps)] def run_boost_round_test(boosting_rounds, step_size): training_scores, oof_scores, holdback_scores = [], [], [] model = NCAA_model(params, tourney_result, test_df, use_holdback=[2019], regression=False, verbose=False) print(f'Training for {step_size*steps} rounds.') for rounds in range(step_size,boosting_rounds+1,step_size): print(f'{"*"*50}') print(f'Rounds: {rounds}') if model.use_holdback: tr_score, oof_score, hb_score = model.train(features, n_splits=10, n_boost_round=step_size, early_stopping_rounds=None) else: tr_score, oof_score = model.train(features, n_splits=10, n_boost_round=step_size, early_stopping_rounds=None) clips, clip_s = model.fit_clipper(verbose=True) spline, spline_s = model.fit_spline_model(verbose=True) training_scores.append([tr_score, model.postprocess_preds(clips, use_data = 'train'), model.postprocess_preds(spline, use_data = 'train', method='spline')]) oof_scores.append([oof_score, model.postprocess_preds(clips, use_data = 'oof'), model.postprocess_preds(spline, use_data = 'oof', method='spline')]) holdback_scores.append([hb_score, model.postprocess_preds(clips, use_data = 'hb'), model.postprocess_preds(spline, use_data = 'hb', method='spline')]) return training_scores, oof_scores, holdback_scores, model, clips, spline training_scores, oof_scores, holdback_scores, model, clips, spline = run_boost_round_test(boosting_rounds[-1], step_size )<save_to_csv>
DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.16 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.16 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,820,034
y_preds = model.postprocess_preds(spline, method='spline') submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds submission_df.to_csv('submission.csv', index=False) submission_df.describe()<set_options>
confirmed_prediciton = pd.melt(daily_log_confirmed[:25], id_vars='Location') confirmed_prediciton['ConfirmedCases'] = to_exp(confirmed_prediciton['value'] )
COVID19 Global Forecasting (Week 3)
8,820,034
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )<train_model>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head()
COVID19 Global Forecasting (Week 3)
8,820,034
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df<train_model>
death_deltas = train.groupby(['Location', 'Country_Region'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.000 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.10 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,820,034
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params<train_model>
confirmed_prediciton = pd.melt(daily_log_deaths[:25], id_vars='Location') confirmed_prediciton['Fatalities'] = to_exp(confirmed_prediciton['value']) confirmed_prediciton.shape
COVID19 Global Forecasting (Week 3)
8,820,034
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params<normalization>
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )
COVID19 Global Forecasting (Week 3)
8,820,034
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs<train_model>
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head()
COVID19 Global Forecasting (Week 3)
8,820,034
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 64, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.02, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 256, 'epochs': 80 } return params<load_from_csv>
my_submission.Location.nunique()
COVID19 Global Forecasting (Week 3)
8,820,034
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'MTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )<feature_engineering>
my_submission[['ConfirmedCases','Fatalities']]= my_submission[['ConfirmedCases','Fatalities']].round(1 )
COVID19 Global Forecasting (Week 3)
8,820,034
<load_from_csv><EOS>
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape
COVID19 Global Forecasting (Week 3)
8,816,812
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<feature_engineering>
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) print('--------------------------------------') print('Train data looks like...') trainData = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") print(trainData.head(5)) print('Test data looks like...') testData = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") print(testData.head(5))
COVID19 Global Forecasting (Week 3)
8,816,812
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()<merge>
def kaggle(dfTrain, dfTest): pd.set_option('display.max_columns', None) dfTest['DateNew'] = pd.to_datetime(dfTest['Date']) dfTest = dfTest.drop(['Date'], axis=1) dfTest = dfTest.rename(columns={"DateNew": "Date"}) dfTest['Year'] = dfTest['Date'].dt.year dfTest['Month'] = dfTest['Date'].dt.month dfTest['Day'] = dfTest['Date'].dt.day dfTest = dfTest.drop(['Date'], axis=1) dfTest = dfTest.fillna('DummyProvince') dfTrain['DateNew'] = pd.to_datetime(dfTrain['Date']) dfTrain = dfTrain.drop(['Date'], axis=1) dfTrain = dfTrain.rename(columns={"DateNew": "Date"}) dfTrain['Year'] = dfTrain['Date'].dt.year dfTrain['Month'] = dfTrain['Date'].dt.month dfTrain['Day'] = dfTrain['Date'].dt.day dfTrain = dfTrain.drop(['Date'], axis=1) dfTrain = dfTrain.fillna('DummyProvince') result = pd.merge(dfTest, dfTrain, how='left', on=['Country_Region', 'Province_State', 'Year', 'Month', 'Day']) result = result.fillna(-1) print('Are the Kaggle submission files same? ', np.shape(dfTrain), np.shape(dfTest), np.shape(result)) return result def ErrorCalc(mdl, ref, tag): relError = np.abs(mdl - ref)/ np.abs(ref+1) MeanErrorV = np.mean(relError) print(tag + ': Mean Rel Error in %: ', MeanErrorV * 100) return MeanErrorV def AdjustingErrorsOutliers(tempPred, tempPrev): tempPred = np.round(tempPred) for i in range(len(tempPred)) : if tempPred[i] < tempPrev[i] : tempPred[i] = tempPrev[i] return tempPred def TrainMdl(trainIpData, trainOpData): testSize = 0.1 print('Training starts...') randomState = None X_train, X_test, y_train, y_test = train_test_split(trainIpData, trainOpData, test_size=testSize, random_state=randomState) TrainIP1 = X_train[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I']] TrainOP1 = X_train['gammaFunI'] TestIP1 = X_test[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I']] treeDepth = 10 mdl1 = DecisionTreeRegressor(max_depth=treeDepth) param_grid = { 'n_estimators': [100, 250, 500], 'learning_rate': [0.1, 0.01, 0.001] } regrMdl1 = AdaBoostRegressor(base_estimator=mdl1) clf1 = RandomizedSearchCV(estimator = regrMdl1, param_distributions = param_grid, n_iter = 100, cv = 3, verbose=0, random_state=42, n_jobs = -1) clf1.fit(TrainIP1, TrainOP1) y_predictedTrain = clf1.predict(TrainIP1) y_predictedTrain = AdjustingErrorsOutliers(y_predictedTrain * TrainIP1['day5_I'].to_numpy() , TrainIP1['day5_I'].to_numpy()) ErrorCalc(y_predictedTrain, X_train['dayPredictInf'].to_numpy() , 'Train Data-set model-1(infection rate)') y_predictedTest = clf1.predict(TestIP1) y_predictedTest = AdjustingErrorsOutliers(y_predictedTest * TestIP1['day5_I'].to_numpy() , TestIP1['day5_I'].to_numpy()) ErrorCalc(y_predictedTest, X_test['dayPredictInf'].to_numpy() , 'Test Data-set model-1(infection rate)') TrainIP2 = X_train[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I', 'day1_F', 'day2_F', 'day3_F', 'day4_F', 'day5_F', 'diff1_F', 'diff2_F', 'diff3_F', 'diff4_F']] TrainOP2 = X_train['gammaFunF'] TestIP2 = X_test[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I', 'day1_F', 'day2_F', 'day3_F', 'day4_F', 'day5_F', 'diff1_F', 'diff2_F', 'diff3_F', 'diff4_F']] treeDepth = 10 mdl2 = DecisionTreeRegressor(max_depth=treeDepth) param_grid = { 'n_estimators': [100, 250, 500], 'learning_rate': [0.1, 0.01, 0.001] } regrMdl2 = AdaBoostRegressor(base_estimator=mdl2) clf2 = RandomizedSearchCV(estimator=regrMdl2, param_distributions=param_grid, n_iter=100, cv=3, verbose=0, random_state=42, n_jobs=-1) clf2.fit(TrainIP2, TrainOP2) y_predictedTrain = clf2.predict(TrainIP2)* TrainIP2['day5_F'].to_numpy() y_predictedTrain = AdjustingErrorsOutliers(y_predictedTrain, TrainIP2['day5_F'].to_numpy()) ErrorCalc(y_predictedTrain, y_train.to_numpy() , 'Train Data-set model-2') y_predictedTest = clf2.predict(TestIP2)* TestIP2['day5_F'].to_numpy() y_predictedTest = AdjustingErrorsOutliers(y_predictedTest, TestIP2['day5_F'].to_numpy()) ErrorCalc(y_predictedTest, y_test.to_numpy() , 'Test Data-set model-2') dfValidation_Inf = pd.read_csv(".. /input/week3-covid19-traindata/Validation_Infected.csv" ).reset_index(drop=True) dfValidation_Fat = pd.read_csv(".. /input/week3-covid19-traindata/Validation_Fatality.csv" ).reset_index(drop=True) selRow = 0 startIdx = 12 [rVal, cVal] = np.shape(dfValidation_Inf) lengthZ = cVal-1 lengthZ = 12 * lengthZ arrP = np.zeros(( 2 * lengthZ,)) arrA = np.zeros(( 2 * lengthZ,)) count = 0 error = 0 errorLen = 0 Threshold_I = 0 Threshold_F = 0 print('Validating...') while(selRow < rVal): iDetect = startIdx iArray = 0 while(iDetect < 17): if iDetect == startIdx : day5_I = dfValidation_Inf.iloc[selRow, iDetect] day4_I = dfValidation_Inf.iloc[selRow, iDetect - 1] day3_I = dfValidation_Inf.iloc[selRow, iDetect - 2] day2_I = dfValidation_Inf.iloc[selRow, iDetect - 3] day1_I = dfValidation_Inf.iloc[selRow, iDetect - 4] if day5_I < Threshold_I: day5_I = Threshold_I day5_F = dfValidation_Fat.iloc[selRow, iDetect] day4_F = dfValidation_Fat.iloc[selRow, iDetect - 1] day3_F = dfValidation_Fat.iloc[selRow, iDetect - 2] day2_F = dfValidation_Fat.iloc[selRow, iDetect - 3] day1_F = dfValidation_Fat.iloc[selRow, iDetect - 4] if day5_F < Threshold_F: day5_F = Threshold_F else: day1_I = day2_I day2_I = day3_I day3_I = day4_I day4_I = day5_I day5_I = predictedInfected day1_F = day2_F day2_F = day3_F day3_F = day4_F day4_F = day5_F day5_F = predictedFatality diff1_I = day5_I - day4_I diff2_I = day4_I - day3_I diff3_I = day3_I - day2_I diff4_I = day2_I - day1_I diff1_F = day5_F - day4_F diff2_F = day4_F - day3_F diff3_F = day3_F - day2_F diff4_F = day2_F - day1_F data1 = {'day1_I': [day1_I], 'day2_I': [day2_I], 'day3_I': [day3_I], 'day4_I': [day4_I], 'day5_I': [day5_I], 'diff1_I': [diff1_I], 'diff2_I': [diff2_I], 'diff3_I': [diff3_I], 'diff4_I': [diff4_I]} dfPredict1 = pd.DataFrame(data1) predictedInfected = clf1.predict(dfPredict1[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I']])* day5_I if predictedInfected < day5_I: predictedInfected = day5_I actVal = dfValidation_Inf.iloc[selRow, iDetect + 1] arrP[iArray] = predictedInfected arrA[iArray] = actVal iArray = iArray + 1 data2 = {'day1_I': [day1_I], 'day2_I': [day2_I], 'day3_I': [day3_I], 'day4_I': [day4_I], 'day5_I': [day5_I], 'diff1_I': [diff1_I], 'diff2_I': [diff2_I], 'diff3_I': [diff3_I], 'diff4_I': [diff4_I], 'day1_F': [day1_F], 'day2_F': [day2_F], 'day3_F': [day3_F], 'day4_F': [day4_F], 'day5_F': [day5_F], 'diff1_F': [diff1_F], 'diff2_F': [diff2_F], 'diff3_F': [diff3_F], 'diff4_F': [diff4_F]} dfPredict2 = pd.DataFrame(data2) predictedFatality = clf2.predict(dfPredict2[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I', 'day1_F', 'day2_F', 'day3_F', 'day4_F', 'day5_F', 'diff1_F', 'diff2_F', 'diff3_F', 'diff4_F']])* day5_F if predictedFatality < day5_F: predictedFatality = day5_F actVal = dfValidation_Fat.iloc[selRow, iDetect + 1] arrP[iArray] = predictedFatality arrA[iArray] = actVal iDetect = iDetect + 1 iArray = iArray + 1 error = error + sum(np.square(np.log(arrP[0:iArray-1] + 1)- np.log(arrA[0:iArray-1] + 1))) errorLen = errorLen + iArray selRow = selRow + 1 count = count + 1 error = float(error)/ errorLen print('Validation error: ', error) print('Making Kaggle Submission file...') dfTrain = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") dfTest = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") df = kaggle(dfTrain, dfTest) print('Prediction starts for Kaggle submission...') [rr, cc] = np.shape(df) for iP in range(rr): if df.loc[iP, 'ConfirmedCases'] == -1 : day5_I = df.loc[iP-1, 'ConfirmedCases'] day4_I = df.loc[iP-2, 'ConfirmedCases'] day3_I = df.loc[iP-3, 'ConfirmedCases'] day2_I = df.loc[iP-4, 'ConfirmedCases'] day1_I = df.loc[iP-5, 'ConfirmedCases'] day5_F = df.loc[iP - 1, 'Fatalities'] day4_F = df.loc[iP - 2, 'Fatalities'] day3_F = df.loc[iP - 3, 'Fatalities'] day2_F = df.loc[iP - 4, 'Fatalities'] day1_F = df.loc[iP - 5, 'Fatalities'] diff1_I = day5_I - day4_I diff2_I = day4_I - day3_I diff3_I = day3_I - day2_I diff4_I = day2_I - day1_I diff1_F = day5_F - day4_F diff2_F = day4_F - day3_F diff3_F = day3_F - day2_F diff4_F = day2_F - day1_F data1 = {'day1_I': [day1_I], 'day2_I': [day2_I], 'day3_I': [day3_I], 'day4_I': [day4_I], 'day5_I': [day5_I], 'diff1_I': [diff1_I], 'diff2_I': [diff2_I], 'diff3_I': [diff3_I], 'diff4_I': [diff4_I]} dfPredict1 = pd.DataFrame(data1) predictedInfected = clf1.predict(dfPredict1[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I']])* day5_I if predictedInfected < day5_I: predictedInfected = day5_I df.loc[iP, 'ConfirmedCases'] = np.round(predictedInfected) data2 = {'day1_I': [day1_I], 'day2_I': [day2_I], 'day3_I': [day3_I], 'day4_I': [day4_I], 'day5_I': [day5_I], 'diff1_I': [diff1_I], 'diff2_I': [diff2_I], 'diff3_I': [diff3_I], 'diff4_I': [diff4_I], 'day1_F': [day1_F], 'day2_F': [day2_F], 'day3_F': [day3_F], 'day4_F': [day4_F], 'day5_F': [day5_F], 'diff1_F': [diff1_F], 'diff2_F': [diff2_F], 'diff3_F': [diff3_F], 'diff4_F': [diff4_F]} dfPredict2 = pd.DataFrame(data2) predictedFatality = clf2.predict(dfPredict2[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I', 'day1_F', 'day2_F', 'day3_F', 'day4_F', 'day5_F', 'diff1_F', 'diff2_F', 'diff3_F', 'diff4_F']])* day5_F if predictedFatality < day5_F: predictedFatality = day5_F df.loc[iP, 'Fatalities'] = np.round(predictedFatality) return df df = pd.read_csv(".. /input/week3-covid19-traindata/TrainTest.csv") trainIpData = df[['day1_I', 'day2_I', 'day3_I', 'day4_I', 'day5_I', 'diff1_I', 'diff2_I', 'diff3_I', 'diff4_I', 'day1_F', 'day2_F', 'day3_F', 'day4_F', 'day5_F', 'diff1_F', 'diff2_F', 'diff3_F', 'diff4_F', 'gammaFunF', 'gammaFunI', 'dayPredictInf']] trainOpData = df['dayPredictFat'] predictions_dF = TrainMdl(trainIpData, trainOpData) predictions_dF[['ForecastId', 'ConfirmedCases', 'Fatalities']].to_csv('submission.csv', index=False) print(predictions_dF[['ForecastId', 'ConfirmedCases', 'Fatalities']].head(5)) print(predictions_dF[['ForecastId', 'ConfirmedCases', 'Fatalities']].tail(5)) print('Done!' )
COVID19 Global Forecasting (Week 3)
8,815,496
gameCities = pd.merge(data_dict['MGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["MSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["MSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) train.head() cols_to_use = data_dict["MTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date']) train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax' )
COVID19 Global Forecasting (Week 3)
8,815,496
cols_to_use = data_dict["MTeamCoaches"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","WTeamID"], right_on=["Season","TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","LTeamID"], right_on=["Season","TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head() <merge>
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2))) feature_day = [1,20,50,100,200,500,1000,5000,10000,15000,20000,50000,100000,200000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature]
COVID19 Global Forecasting (Week 3)
8,815,496
cols_to_use = data_dict['MNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()<merge>
!pip install pmdarima
COVID19 Global Forecasting (Week 3)
8,815,496
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["MSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["MSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["MTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["MTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict["MTeamCoaches"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","WTeamID"], right_on=["Season","TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["MTeamCoaches"][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how="left", left_on=["Season","LTeamID"], right_on=["Season","TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) cols_to_use = data_dict['MNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['MNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()<drop_column>
df_val = df_val_2 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0}) submission.to_csv('submission.csv', index=False) submission
COVID19 Global Forecasting (Week 3)
8,879,365
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()<groupby>
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.model_selection import train_test_split from xgboost import XGBRegressor from sklearn.preprocessing import LabelBinarizer,LabelEncoder,StandardScaler,MinMaxScaler
COVID19 Global Forecasting (Week 3)
8,879,365
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()<merge>
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") submission_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/submission.csv" )
COVID19 Global Forecasting (Week 3)
8,879,365
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()<merge>
lb = LabelEncoder() train_df['Country_Region'] = lb.fit_transform(train_df['Country_Region']) test_df['Country_Region'] = lb.transform(test_df['Country_Region']) lb1 = LabelEncoder() train_df['Province_State'] = lb.fit_transform(train_df['Province_State']) test_df['Province_State'] = lb.transform(test_df['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,879,365
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()<feature_engineering>
def split_date(date): date = date.split('-') date[0] = int(date[0]) if(date[1][0] == '0'): date[1] = int(date[1][1]) else: date[1] = int(date[1]) if(date[2][0] == '0'): date[2] = int(date[2][1]) else: date[2] = int(date[2]) return date train_df.Date = train_df.Date.apply(split_date) test_df.Date = test_df.Date.apply(split_date )
COVID19 Global Forecasting (Week 3)
8,879,365
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore var_x'] df['y_var'] = df['WScore var_y'] + df['LScore var_y'] return df train = preprocess(train) test = preprocess(test )<drop_column>
year = [] month = [] day = [] for i in train_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2]) train_df['Year'] = year train_df['Month'] = month train_df['Day'] = day del train_df['Date']
COVID19 Global Forecasting (Week 3)
8,879,365
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'CoachName_W', 'CoachName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'CoachName_L', 'CoachName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'CoachName_W', 'CoachName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering>
year = [] month = [] day = [] for i in test_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2]) test_df['Year'] = year test_df['Month'] = month test_df['Day'] = day del test_df['Date'] del train_df['Id'] del test_df['ForecastId']
COVID19 Global Forecasting (Week 3)
8,879,365
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )<concatenate>
train_df['ConfirmedCases'] = train_df['ConfirmedCases'].apply(int) train_df['Fatalities'] = train_df['Fatalities'].apply(int) cases = train_df.ConfirmedCases fatalities = train_df.Fatalities del train_df['ConfirmedCases'] del train_df['Fatalities']
COVID19 Global Forecasting (Week 3)
8,879,365
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()<categorify>
scaler = MinMaxScaler() x_train = scaler.fit_transform(train_df.values) x_test = scaler.transform(test_df.values )
COVID19 Global Forecasting (Week 3)
8,879,365
categoricals = ["CoachName_1", "CoachName_2", "TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()<drop_column>
rf = XGBRegressor(n_estimators = 1500 , random_state = 0 , max_depth = 15) rf.fit(x_train,cases) cases_pred = rf.predict(x_test) cases_pred = np.around(cases_pred,decimals = 0) cases_pred x_train_cas = [] for i in range(len(x_train)) : x = list(x_train[i]) x.append(cases[i]) x_train_cas.append(x) x_train_cas = np.array(x_train_cas )
COVID19 Global Forecasting (Week 3)
8,879,365
target = 'result' features = data.columns.values.tolist() features.remove(target )<train_on_grid>
rf = XGBRegressor(n_estimators = 1500 , random_state = 0 , max_depth = 15) rf.fit(x_train_cas,fatalities) x_test_cas = [] for i in range(len(x_test)) : x = list(x_test[i]) x.append(cases_pred[i]) x_test_cas.append(x) x_test_cas = np.array(x_test_cas) fatalities_pred = rf.predict(x_test_cas) fatalities_pred = np.around(fatalities_pred,decimals = 0) fatalities_pred
COVID19 Global Forecasting (Week 3)
8,879,365
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )<train_model>
submission_df['ConfirmedCases'] = cases_pred submission_df['Fatalities'] = fatalities_pred
COVID19 Global Forecasting (Week 3)
8,879,365
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<train_model>
submission_df.to_csv("submission.csv" , index = False )
COVID19 Global Forecasting (Week 3)
8,864,857
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv>
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import pandas as pd from datetime import datetime from sklearn import preprocessing from xgboost import XGBRegressor from sklearn.tree import DecisionTreeRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor
COVID19 Global Forecasting (Week 3)
8,864,857
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = test_preds submission_df.head()<save_to_csv>
path = '/kaggle/input/covid19-global-forecasting-week-3/' train = pd.read_csv(path+'train.csv') test = pd.read_csv(path+'test.csv' )
COVID19 Global Forecasting (Week 3)
8,864,857
submission_df.to_csv('submission.csv', index=False )<set_options>
train.isnull().sum()
COVID19 Global Forecasting (Week 3)
8,864,857
%reload_ext autoreload %autoreload 2 %matplotlib inline<import_modules>
test.isnull().sum()
COVID19 Global Forecasting (Week 3)
8,864,857
from fastai import * from fastai.vision import * import pandas as pd from fastai.utils.mem import *<define_variables>
%matplotlib inline
COVID19 Global Forecasting (Week 3)
8,864,857
path = Path('/kaggle/input/iwildcam-2019-fgvc6') debug =1 if debug: train_pct=0.04 else: train_pct=0.5<load_from_csv>
train["Date"] = train["Date"].apply(lambda x: x.replace("-","")) train["Date"] = train["Date"].astype(int) test["Date"] = test["Date"].apply(lambda x: x.replace("-","")) test["Date"] = test["Date"].astype(int )
COVID19 Global Forecasting (Week 3)
8,864,857
train_df = pd.read_csv(path/'train.csv') train_df = pd.concat([train_df['id'],train_df['category_id']],axis=1,keys=['id','category_id']) train_df.head()<load_from_csv>
EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state
COVID19 Global Forecasting (Week 3)
8,864,857
test_df = pd.read_csv(path/'test.csv') test_df = pd.DataFrame(test_df['id']) test_df['predicted'] = 0 test_df.head() <init_hyperparams>
train['Province_State'].fillna(EMPTY_VAL, inplace=True) train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1 )
COVID19 Global Forecasting (Week 3)
8,864,857
free = gpu_mem_get_free_no_cache() if free > 8200: bs=64 else: bs=32 print(f"using bs={bs}, have {free}MB of GPU RAM free") tfms = get_transforms(max_rotate=20, max_zoom=1.3, max_lighting=0.4, max_warp=0.4, p_affine=1., p_lighting=1.) <create_dataframe>
le = LabelEncoder() train['Country_Region'] = le.fit_transform(train['Country_Region']) train['Province_State'] = le.fit_transform(train['Province_State']) test['Country_Region'] = le.fit_transform(test['Country_Region']) test['Province_State'] = le.fit_transform(test['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,864,857
data = get_data(224, bs, 'zeros' )<choose_model_class>
def build_model_1() : model = RandomForestRegressor(n_estimators = 100, random_state = 0) return model def build_model_2() : model = XGBRegressor(n_estimators=1000) return model def build_model_3() : model = DecisionTreeRegressor(random_state=1) return model def build_model_4() : model = LogisticRegression() return model def build_model_5() : model = LinearRegression() return model def build_model_6() : model = LGBMRegressor(random_state=5) return model def build_model_7() : model = LGBMRegressor(iterations=2) return model
COVID19 Global Forecasting (Week 3)
8,864,857
gc.collect() wd=1e-1 learn = cnn_learner(data, models.resnet34, metrics=error_rate, bn_final=True, wd=wd) learn.model_dir= '/kaggle/working/'<prepare_output>
out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []} )
COVID19 Global Forecasting (Week 3)
8,864,857
data = get_data(352,bs) learn.data = data <train_model>
for country in range(len(countries)) : country_train = train.loc[train['Country_Region'] == countries[country]] country_test = test.loc[test['Country_Region'] == countries[country]] print("Current Country: ", countries[country]) xtrain = country_train[['Country_Region', 'Province_State', 'Date']].to_numpy() y1train = country_train[['ConfirmedCases']].to_numpy() y2train = country_train[['Fatalities']].to_numpy() xtest = country_test[['Country_Region', 'Province_State', 'Date']].to_numpy() y1train = y1train.reshape(-1) y2train = y2train.reshape(-1) model1 = build_model_2() model1.fit(xtrain, y1train) res_cnf_cls = model1.predict(xtest) model2 = build_model_2() model2.fit(xtrain, y2train) res_fac = model2.predict(xtest) country_test_Id = country_test.loc[:, 'ForecastId'] country_test_Id = country_test_Id.astype(int) ans = pd.DataFrame({'ForecastId': country_test_Id, 'ConfirmedCases': res_cnf_cls, 'Fatalities': res_fac}) out = pd.concat([out, ans], axis=0)
COVID19 Global Forecasting (Week 3)
8,864,857
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4)) learn.save('352' )<train_model>
out["ForecastId"] = out["ForecastId"].astype(int )
COVID19 Global Forecasting (Week 3)
8,864,857
<save_model><EOS>
out.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,826,297
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<find_best_params>
import numpy as np import pandas as pd import matplotlib.pyplot as plt from itertools import cycle, islice import seaborn as sb import matplotlib.dates as dates import datetime as dt from sklearn import preprocessing from xgboost import XGBRegressor from lightgbm import LGBMRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import StandardScaler import plotly.graph_objects as go import plotly_express as px from sklearn.preprocessing import OrdinalEncoder
COVID19 Global Forecasting (Week 3)
8,826,297
interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs )<predict_on_test>
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv") train.head()
COVID19 Global Forecasting (Week 3)
8,826,297
test_preds = learn.get_preds(DatasetType.Test) test_df['predicted'] = test_preds[0].argmax(dim=1 )<save_to_csv>
train['Date'] = pd.to_datetime(train['Date'], format = '%Y-%m-%d') test['Date'] = pd.to_datetime(test['Date'], format = '%Y-%m-%d' )
COVID19 Global Forecasting (Week 3)
8,826,297
csv_path ='/kaggle/working/submission.csv' test_df.to_csv(csv_path, index=False )<set_options>
def create_features(df): df['day'] = df['Date'].dt.day df['month'] = df['Date'].dt.month df['dayofweek'] = df['Date'].dt.dayofweek df['dayofyear'] = df['Date'].dt.dayofyear df['quarter'] = df['Date'].dt.quarter df['weekofyear'] = df['Date'].dt.weekofyear return df
COVID19 Global Forecasting (Week 3)
8,826,297
%matplotlib inline <load_from_csv>
def categoricalToInteger(df): df.Province_State.fillna('NaN', inplace=True) oe = OrdinalEncoder() df[['Province_State','Country_Region']] = oe.fit_transform(df.loc[:,['Province_State','Country_Region']]) return df
COVID19 Global Forecasting (Week 3)
8,826,297
dr = pd.read_csv(".. /input/RegularSeasonDetailedResults.csv") <prepare_output>
columns = ['day','month','dayofweek','dayofyear','quarter','weekofyear','Province_State', 'Country_Region','ConfirmedCases','Fatalities'] df_train = df_train[columns]
COVID19 Global Forecasting (Week 3)
8,826,297
simple_df_1 = pd.DataFrame() simple_df_1[["team1", "team2"]] =dr[["Wteam", "Lteam"]].copy() simple_df_1["pred"] = 1 simple_df_2 = pd.DataFrame() simple_df_2[["team1", "team2"]] =dr[["Lteam", "Wteam"]] simple_df_2["pred"] = 0 simple_df = pd.concat(( simple_df_1, simple_df_2), axis=0) simple_df.head() <count_unique_values>
df_test = categoricalToInteger(test) df_test = create_features(test) columns = ['day','month','dayofweek','dayofyear','quarter','weekofyear','Province_State', 'Country_Region']
COVID19 Global Forecasting (Week 3)
8,826,297
n = simple_df.team1.nunique() n <feature_engineering>
submission = [] for country in df_train.Country_Region.unique() : df_train1 = df_train[df_train["Country_Region"]==country] for state in df_train1.Province_State.unique() : df_train2 = df_train1[df_train1["Province_State"]==state] train = df_train2.values X_train, y_train = train[:,:-2], train[:,-2:] model1 = XGBRegressor(n_estimators=1100) model1.fit(X_train, y_train[:,0]) model2 = XGBRegressor(n_estimators=1100) model2.fit(X_train, y_train[:,1]) df_test1 = df_test[(df_test["Country_Region"]==country)&(df_test["Province_State"] == state)] ForecastId = df_test1.ForecastId.values df_test2 = df_test1[columns] y_pred1 = np.round(model1.predict(df_test2.values),5) y_pred2 = np.round(model2.predict(df_test2.values),5) for i in range(len(y_pred1)) : d = {'ForecastId':ForecastId[i], 'ConfirmedCases':y_pred1[i], 'Fatalities':y_pred2[i]} submission.append(d )
COVID19 Global Forecasting (Week 3)
8,826,297
<normalization><EOS>
df_submit = pd.DataFrame(submission) df_submit.to_csv(r'submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,798,029
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<categorify>
warnings.filterwarnings("ignore" )
COVID19 Global Forecasting (Week 3)
8,798,029
def embedding_input(name, n_in, n_out, reg): inp = Input(shape=(1,), dtype="int64", name=name) return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp) def create_bias(inp, n_in): x = Embedding(n_in, 1, input_length=1 )(inp) return Flatten()(x) <categorify>
train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv' )
COVID19 Global Forecasting (Week 3)
8,798,029
n_factors = 50 team1_in, t1 = embedding_input("team1_in", n, n_factors, 1e-4) team2_in, t2 = embedding_input("team2_in", n, n_factors, 1e-4) b1 = create_bias(team1_in, n) b2 = create_bias(team2_in, n) <merge>
curr_date = train['Date'].max() world_cum_confirmed = sum(train[train['Date'] == curr_date].ConfirmedCases) world_cum_fatal = sum(train[train['Date'] == curr_date].Fatalities) print('Number of Countires: ', len(train['Country_Region'].unique())) print('End date in train dset: ', curr_date) print('Number of confirmed cases: ', world_cum_confirmed) print('Number of fatal cases: ', world_cum_fatal )
COVID19 Global Forecasting (Week 3)
8,798,029
x = merge([t1, t2], mode="dot") x = Flatten()(x) x = merge([x, b1], mode="sum") x = merge([x, b2], mode="sum") x = Dense(1, activation="sigmoid" )(x) model = Model([team1_in, team2_in], x) model.compile(Adam(0.001), loss="binary_crossentropy") <train_model>
top_country_c = train[train['Date'] == curr_date].groupby(['Date','Country_Region'] ).sum().sort_values(['ConfirmedCases'], ascending=False) top_country_c.head(20 )
COVID19 Global Forecasting (Week 3)
8,798,029
history = model.fit([train[:, 0], train[:, 1]], train[:, 2], batch_size=64, nb_epoch=10, verbose=2) <feature_engineering>
top_country_f = train[train['Date'] == curr_date].groupby(['Date','Country_Region'] ).sum().sort_values(['Fatalities'], ascending=False) top_country_f.head(20 )
COVID19 Global Forecasting (Week 3)
8,798,029
sub = pd.read_csv(".. /input/SampleSubmission.csv") sub["team1"] = sub["Id"].apply(lambda x: trans_dict[int(x.split("_")[1])]) sub["team2"] = sub["Id"].apply(lambda x: trans_dict[int(x.split("_")[2])]) sub.head() <predict_on_test>
train['MortalityRate'] = train['Fatalities'] / train['ConfirmedCases'] train['MortalityRate'] = train['MortalityRate'].fillna(0.0 )
COVID19 Global Forecasting (Week 3)
8,798,029
sub["pred"] = model.predict([sub.team1, sub.team2]) sub = sub[["Id", "pred"]] sub.head() <save_to_csv>
top_country_m = train[train['Date'] == curr_date].groupby(['Country_Region'] ).sum().sort_values(['MortalityRate'], ascending=False) top_country_m.head(10 )
COVID19 Global Forecasting (Week 3)
8,798,029
sub.to_csv("CF.csv", index=False) <load_from_csv>
for df in [test]: df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True) df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True )
COVID19 Global Forecasting (Week 3)
8,798,029
train_data = pd.read_csv('/kaggle/input/DontGetKicked/training.csv') train_data.head()<load_from_csv>
missed = "NA" def State(state, country): if state == missed: return country return state
COVID19 Global Forecasting (Week 3)
8,798,029
test_data = pd.read_csv('/kaggle/input/DontGetKicked/test.csv') test_data.head()<count_missing_values>
for df in [train, test]: df['Province_State'].fillna(missed, inplace=True) df['Province_State'] = df.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : State(x['Province_State'], x['Country_Region']), axis=1) df.loc[:, 'Date'] = df.Date.dt.strftime("%m%d") df["Date"] = df["Date"].astype(int )
COVID19 Global Forecasting (Week 3)
8,798,029
train_data.isnull().sum()<count_missing_values>
label_encoder = preprocessing.LabelEncoder() for df in [train, test]: df['Country_Region'] = label_encoder.fit_transform(df['Country_Region']) df['Province_State'] = label_encoder.fit_transform(df['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,798,029
test_data.isnull().sum()<count_values>
def RF() : model = RandomForestRegressor(n_estimators = 100) return model def XGB() : model = XGBRegressor(n_estimators=1300) return model def LGBM() : model = LGBMRegressor(iterations=2) return model
COVID19 Global Forecasting (Week 3)
8,798,029
train_data['IsBadBuy'].value_counts()<count_values>
sub = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in range(len(unique_countries)) : current_country_train = train.loc[train['Country_Region'] == unique_countries[country]] current_country_test = test.loc[test['Country_Region'] == unique_countries[country]] features = ['Country_Region', 'Province_State', 'Date'] X_train = current_country_train[features].to_numpy() y1_train = current_country_train[['ConfirmedCases']].to_numpy() y2_train = current_country_train[['Fatalities']].to_numpy() X_test = current_country_test[features].to_numpy() y1_train = y1_train.reshape(-1) y2_train = y2_train.reshape(-1) model1 = XGB() model1.fit(X_train, y1_train) res_cnf_cls = np.round(model1.predict(X_test)) model2 = XGB() model2.fit(X_train, y2_train) res_fac = np.round(model2.predict(X_test)) current_country_test_Id = current_country_test.loc[:, 'ForecastId'] pred = pd.DataFrame({'ForecastId': current_country_test_Id, 'ConfirmedCases': res_cnf_cls, 'Fatalities': res_fac}) sub = pd.concat([sub, pred], axis=0 )
COVID19 Global Forecasting (Week 3)
8,798,029
<drop_column><EOS>
sub.ForecastId = sub.ForecastId.astype('int') sub.to_csv('submission.csv', index=False) sub
COVID19 Global Forecasting (Week 3)
8,825,716
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<count_values>
%matplotlib inline InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99)
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Trim'].value_counts()<drop_column>
plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 sns.set_palette(sns.color_palette('tab20', 20))
COVID19 Global Forecasting (Week 3)
8,825,716
train_data.drop('Trim', axis = 1, inplace = True) test_data.drop('Trim', axis = 1, inplace = True )<count_values>
COMP = '.. /input/covid19-global-forecasting-week-3' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['SubModel'].value_counts()<drop_column>
train[train.geo_region.isna() ].Country_Region.unique() train = train.fillna(' test = test.fillna(' train[train.duplicated(['Date', 'Location'])] train.count()
COVID19 Global Forecasting (Week 3)
8,825,716
train_data.drop('SubModel', axis = 1, inplace = True) test_data.drop('SubModel', axis = 1, inplace = True )<count_values>
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby([ 'Country_Region', 'continent', 'geo_region', 'country_iso_code_3'] ).sum() [[ 'ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state.to_csv('countries_latest_state.csv', index=False) countries_latest_state.shape countries_latest_state.head()
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Color'].value_counts()<count_values>
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]
COVID19 Global Forecasting (Week 3)
8,825,716
test_data['Color'].value_counts()<data_type_conversions>
regional_progress = train_clean.groupby(['DateTime', 'continent'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() regional_progress['Log10Confirmed'] = np.log10(regional_progress.ConfirmedCases + 1) regional_progress['Log10Fatalities'] = np.log10(regional_progress.Fatalities + 1) regional_progress = regional_progress[regional_progress.continent != '
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Color'].fillna(value = 'Color_Unknown', inplace = True) test_data['Color'].fillna(value = 'Color_Unknown', inplace = True )<count_missing_values>
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )
COVID19 Global Forecasting (Week 3)
8,825,716
print('Number of null values in Color column of train data is ' + str(train_data['Color'].isnull().sum())) print('Number of null values in Color column of test data is ' + str(test_data['Color'].isnull().sum()))<count_values>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head() daily_confirmed_deltas.to_csv('daily_confirmed_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Transmission'].value_counts()<count_values>
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )
COVID19 Global Forecasting (Week 3)
8,825,716
test_data['Transmission'].value_counts()<filter>
DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.14 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,825,716
train_data[train_data['Transmission'] == 'Manual']<rename_columns>
daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) daily_log_confirmed.to_csv('daily_log_confirmed.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Transmission'].replace('Manual', 'MANUAL', inplace = True )<count_values>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head() daily_death_deltas.to_csv('daily_death_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Transmission'].value_counts()<data_type_conversions>
death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['Transmission'].fillna(value = 'Transmission_Unknown', inplace = True) test_data['Transmission'].fillna(value = 'Transmission_Unknown', inplace = True )<count_values>
daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) daily_log_deaths.to_csv('daily_log_deaths.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['WheelTypeID'].value_counts()<drop_column>
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )
COVID19 Global Forecasting (Week 3)
8,825,716
train_data.drop('WheelTypeID', axis = 1, inplace = True) test_data.drop('WheelTypeID', axis = 1, inplace = True )<count_values>
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head()
COVID19 Global Forecasting (Week 3)
8,825,716
train_data['WheelType'].value_counts()<count_values>
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape
COVID19 Global Forecasting (Week 3)
8,825,716
<data_type_conversions><EOS>
end = dt.datetime.now() print('Finished', end,(end - start ).seconds, 's' )
COVID19 Global Forecasting (Week 3)
8,824,396
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<count_values>
path = '.. /input/covid19-global-forecasting-week-3/' train = pd.read_csv(path + 'train.csv') test = pd.read_csv(path + 'test.csv') sub = pd.read_csv(path + 'submission.csv') train['Date'] = train['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d'))) test['Date'] = test['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d'))) train['days'] =(train['Date'].dt.date - train['Date'].dt.date.min() ).dt.days test['days'] =(test['Date'].dt.date - train['Date'].dt.date.min() ).dt.days train.loc[train['Province_State'].isnull() , 'Province_State'] = 'N/A' test.loc[test['Province_State'].isnull() , 'Province_State'] = 'N/A' train['Area'] = train['Country_Region'] + '_' + train['Province_State'] test['Area'] = test['Country_Region'] + '_' + test['Province_State'] print('train Date max',train['Date'].max()) print('test Date min',test['Date'].min()) print('train days max', train['days'].max()) N_AREAS = train['Area'].nunique() AREAS = np.sort(train['Area'].unique()) START_PUBLIC = test['days'].min() print('public LB start day', START_PUBLIC) print(' ') TRAIN_N = 77 print(train[train['days'] < TRAIN_N]['Date'].max()) print(train[train['days'] >= TRAIN_N]['Date'].min()) print(train[train['days'] >= TRAIN_N]['Date'].max()) train.head() test_orig = test.copy()
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['WheelType'].value_counts()<count_values>
train_p_c_raw = train.pivot(index='Area', columns='days', values='ConfirmedCases' ).sort_index() train_p_f_raw = train.pivot(index='Area', columns='days', values='Fatalities' ).sort_index() train_p_c = np.maximum.accumulate(train_p_c_raw, axis=1) train_p_f = np.maximum.accumulate(train_p_f_raw, axis=1) f_rate =(train_p_f / train_p_c ).fillna(0) X_c = np.log(1+train_p_c.values)[:,:TRAIN_N] X_f = train_p_f.values[:,:TRAIN_N]
COVID19 Global Forecasting (Week 3)