kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,820,802
predict=KNN.predict(test_data.drop('RefId',axis=1))<prepare_output>
print( "Confirmed: " f'Logistic\t{RMSLE(train["ConfirmedCases"], train["yhat_logistic_ConfirmedCases"])} ' f'XGBoost\t{RMSLE(train["ConfirmedCases"], train["yhat_xgb_ConfirmedCases"])} ' f'Hybrid\t{RMSLE(train["ConfirmedCases"], train["yhat_hybrid_ConfirmedCases"])} ' f"Fatalities: " f'Logistic\t{RMSLE(train["Fatalities"], train["yhat_logistic_Fatalities"])} ' f'XGBoost\t{RMSLE(train["Fatalities"], train["yhat_xgb_Fatalities"])} ' f'Hybrid\t{RMSLE(train["Fatalities"], train["yhat_hybrid_Fatalities"])} ' )
COVID19 Global Forecasting (Week 3)
8,820,802
Submission=pd.DataFrame(data=predict,columns=['IsBadBuy']) Submission.head()<prepare_output>
test = pd.merge( test, train[["Country_Region"] + ['ConfirmedCases_p_0', 'ConfirmedCases_p_1', 'ConfirmedCases_p_2']+ ['Fatalities_p_0','Fatalities_p_1', 'Fatalities_p_2'] + ["Fatalities_alpha"] + ["ConfirmedCases_alpha"]].groupby(['Country_Region'] ).head(1), on="Country_Region", how="left" )
COVID19 Global Forecasting (Week 3)
8,820,802
Submission['RefId']=test_data['RefId'] Submission.set_index('RefId',inplace=True )<save_to_csv>
predict_logistic(test) test["yhat_xgb_ConfirmedCases"] = xgb_c_fit.predict(test[x_columns].to_numpy()) test["yhat_xgb_Fatalities"] = xgb_f_fit.predict(test[x_columns].to_numpy()) predict_hybrid(test )
COVID19 Global Forecasting (Week 3)
8,820,802
Submission.head() Submission.to_csv('Submission.csv' )<compute_test_metric>
submission = test[["ForecastId", "yhat_hybrid_ConfirmedCases", "yhat_hybrid_Fatalities"]].round(2 ).rename( columns={ "yhat_hybrid_ConfirmedCases": "ConfirmedCases", "yhat_hybrid_Fatalities": "Fatalities", } ) submission["ConfirmedCases"] = np.maximum(0, submission["ConfirmedCases"]) submission["Fatalities"] = np.maximum(0, submission["Fatalities"] )
COVID19 Global Forecasting (Week 3)
8,820,802
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
submission.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,820,802
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
submission.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,826,842
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 10): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = np.concatenate(( np.repeat(0, len(pred_data)- len(y_hat_confirmed)) , y_hat_confirmed), axis = 0) pred_data['Fatalities_hat'] = np.concatenate(( np.repeat(float(0), len(pred_data)- len(y_hat_fatalities)) , y_hat_fatalities), axis = 0) pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
path = '/kaggle/input/covid19-global-forecasting-week-3' train = pd.read_csv(os.path.join(path, 'train.csv')) test = pd.read_csv(os.path.join(path, 'test.csv')) subm = pd.read_csv(os.path.join(path, 'submission.csv'))
COVID19 Global Forecasting (Week 3)
8,826,842
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
valid_date = pd.to_datetime('2020-04-10' )
COVID19 Global Forecasting (Week 3)
8,826,842
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
train['area'] = train['Country_Region'].astype(str)+ '_' + train['Province_State'].astype(str) test['area'] = test['Country_Region'].astype(str)+ '_' + test['Province_State'].astype(str) train['Date'] = pd.to_datetime(train['Date'] )
COVID19 Global Forecasting (Week 3)
8,826,842
df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<feature_engineering>
path = '/kaggle/input/start-index-to-fit' with open(os.path.join(path, 'dict_bst_ind.pickle'), 'rb')as f: dict_bst_ind = pickle.load(f) with open(os.path.join(path, 'dict_bst_ind_Fat.pickle'), 'rb')as f: dict_bst_ind_fat = pickle.load(f) with open(os.path.join(path, 'pop_dict.pickle'), 'rb')as f: pop_dict = pickle.load(f )
COVID19 Global Forecasting (Week 3)
8,826,842
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
def get_train_piece(area, valid_date): data = train[(train.area == area)&(train.Date < valid_date)].reset_index() data = data[data['ConfirmedCases'] > 0].reset_index(drop = True) return data
COVID19 Global Forecasting (Week 3)
8,826,842
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
pred_df = pd.DataFrame() for pred_area in tqdm(test.area.unique()): train_df = get_train_piece(pred_area, valid_date) len_train = train_df.shape[0] population = -1 if pred_area in pop_dict: population = pop_dict[pred_area] test_df = test[test.area == pred_area].reset_index(drop = True) len_test = test_df.shape[0] ans = pd.DataFrame() ans['ForecastId'] = test_df['ForecastId'].values if pred_area not in dict_bst_ind: ans['ConfirmedCases'] = fit_predict(train_df['ConfirmedCases'].values, len_test, 0, population, 'cases') else: ans['ConfirmedCases'] = fit_predict(train_df['ConfirmedCases'].values, len_test, dict_bst_ind[pred_area], population, 'cases') if pred_area not in dict_bst_ind_fat: ans['Fatalities'] = fit_predict(train_df['Fatalities'].values, len_test, 0, population, 'fat') else: ans['Fatalities'] = fit_predict(train_df['Fatalities'].values, len_test, dict_bst_ind_fat[pred_area], population, 'fat') pred_df = pd.concat([pred_df, ans], axis = 0 ).reset_index(drop = True )
COVID19 Global Forecasting (Week 3)
8,826,842
<save_to_csv><EOS>
pred_df.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,825,650
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<filter>
%matplotlib inline InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99)
COVID19 Global Forecasting (Week 3)
8,825,650
df_worldinfor[df_worldinfor['Country'] == 'Vietnam']<load_from_csv>
plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 sns.set_palette(sns.color_palette('tab20', 20))
COVID19 Global Forecasting (Week 3)
8,825,650
submission = pd.read_csv('/kaggle/input/resultscov19week2/submission(2 ).csv') submission<save_to_csv>
COMP = '.. /input/covid19-global-forecasting-week-3' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1
COVID19 Global Forecasting (Week 3)
8,825,650
submission.to_csv('submission.csv', index=False) <import_modules>
train[train.geo_region.isna() ].Country_Region.unique() train = train.fillna(' test = test.fillna(' train[train.duplicated(['Date', 'Location'])] train.count()
COVID19 Global Forecasting (Week 3)
8,825,650
<load_from_csv>
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby([ 'Country_Region', 'continent', 'geo_region', 'country_iso_code_3'] ).sum() [[ 'ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state.to_csv('countries_latest_state.csv', index=False) countries_latest_state.shape countries_latest_state.head()
COVID19 Global Forecasting (Week 3)
8,825,650
<data_type_conversions>
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]
COVID19 Global Forecasting (Week 3)
8,825,650
<categorify>
regional_progress = train_clean.groupby(['DateTime', 'continent'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() regional_progress['Log10Confirmed'] = np.log10(regional_progress.ConfirmedCases + 1) regional_progress['Log10Fatalities'] = np.log10(regional_progress.Fatalities + 1) regional_progress = regional_progress[regional_progress.continent != '
COVID19 Global Forecasting (Week 3)
8,825,650
<categorify>
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )
COVID19 Global Forecasting (Week 3)
8,825,650
<create_dataframe>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head() daily_confirmed_deltas.to_csv('daily_confirmed_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,825,650
<feature_engineering>
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )
COVID19 Global Forecasting (Week 3)
8,825,650
<count_duplicates>
DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.14 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,825,650
<compute_test_metric>
daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) daily_log_confirmed.to_csv('daily_log_confirmed.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,825,650
<train_model>
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head() daily_death_deltas.to_csv('daily_death_deltas.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,825,650
<train_model>
death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()
COVID19 Global Forecasting (Week 3)
8,825,650
<define_variables>
daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) daily_log_deaths.to_csv('daily_log_deaths.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i
COVID19 Global Forecasting (Week 3)
8,825,650
<compute_test_metric>
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )
COVID19 Global Forecasting (Week 3)
8,825,650
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head()
COVID19 Global Forecasting (Week 3)
8,825,650
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape
COVID19 Global Forecasting (Week 3)
8,825,650
<compute_test_metric><EOS>
end = dt.datetime.now() print('Finished', end,(end - start ).seconds, 's' )
COVID19 Global Forecasting (Week 3)
8,824,671
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<compute_test_metric>
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )
COVID19 Global Forecasting (Week 3)
8,824,671
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
warnings.filterwarnings("ignore") %matplotlib inline %config InlineBackend.figure_format = 'retina'
COVID19 Global Forecasting (Week 3)
8,824,671
df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<feature_engineering>
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv") region_metadata = pd.read_csv("/kaggle/input/covid19-forecasting-metadata/region_metadata.csv") region_date_metadata = pd.read_csv("/kaggle/input/covid19-forecasting-metadata/region_date_metadata.csv" )
COVID19 Global Forecasting (Week 3)
8,824,671
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
train = train.merge(test[["ForecastId", "Province_State", "Country_Region", "Date"]], on = ["Province_State", "Country_Region", "Date"], how = "left") display(train.head()) test = test[~test.Date.isin(train.Date.unique())] display(test.head()) df = pd.concat([train, test], sort = False) df.head()
COVID19 Global Forecasting (Week 3)
8,824,671
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
df["geo"] = df.Country_Region.astype(str)+ ": " + df.Province_State.astype(str) df.loc[df.Province_State.isna() , "geo"] = df[df.Province_State.isna() ].Country_Region df.ConfirmedCases = df.groupby("geo")["ConfirmedCases"].cummax() df.Fatalities = df.groupby("geo")["Fatalities"].cummax() df = df.merge(region_metadata, on = ["Country_Region", "Province_State"]) df = df.merge(region_date_metadata, on = ["Country_Region", "Province_State", "Date"], how = "left") df.continent = LabelEncoder().fit_transform(df.continent) df.Date = pd.to_datetime(df.Date, format = "%Y-%m-%d") df.sort_values(["geo", "Date"], inplace = True) df.head()
COVID19 Global Forecasting (Week 3)
8,824,671
method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA'] method_val = [df_val_1,df_val_2,df_val_3] for i in range(0,3): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )<save_to_csv>
DAYS_SINCE_CASES = [1, 10, 50, 100, 500, 1000, 5000, 10000] min_date_train = np.min(df[~df.Id.isna() ].Date) max_date_train = np.max(df[~df.Id.isna() ].Date) min_date_test = np.min(df[~df.ForecastId.isna() ].Date) max_date_test = np.max(df[~df.ForecastId.isna() ].Date) n_dates_test = len(df[~df.ForecastId.isna() ].Date.unique()) print("Train date range:", str(min_date_train), " - ", str(max_date_train)) print("Test date range:", str(min_date_test), " - ", str(max_date_test)) for lag in range(1, 41): df[f"lag_{lag}_cc"] = df.groupby("geo")["ConfirmedCases"].shift(lag) df[f"lag_{lag}_ft"] = df.groupby("geo")["Fatalities"].shift(lag) df[f"lag_{lag}_rc"] = df.groupby("geo")["Recoveries"].shift(lag) for case in DAYS_SINCE_CASES: df = df.merge(df[df.ConfirmedCases >= case].groupby("geo")["Date"].min().reset_index().rename(columns = {"Date": f"case_{case}_date"}), on = "geo", how = "left" )
COVID19 Global Forecasting (Week 3)
8,824,671
df_val = df_val_3 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission.to_csv('submission.csv', index=False) submission<filter>
def prepare_features(df, gap): df["perc_1_ac"] =(df[f"lag_{gap}_cc"] - df[f"lag_{gap}_ft"] - df[f"lag_{gap}_rc"])/ df[f"lag_{gap}_cc"] df["perc_1_cc"] = df[f"lag_{gap}_cc"] / df.population df["diff_1_cc"] = df[f"lag_{gap}_cc"] - df[f"lag_{gap + 1}_cc"] df["diff_2_cc"] = df[f"lag_{gap + 1}_cc"] - df[f"lag_{gap + 2}_cc"] df["diff_3_cc"] = df[f"lag_{gap + 2}_cc"] - df[f"lag_{gap + 3}_cc"] df["diff_1_ft"] = df[f"lag_{gap}_ft"] - df[f"lag_{gap + 1}_ft"] df["diff_2_ft"] = df[f"lag_{gap + 1}_ft"] - df[f"lag_{gap + 2}_ft"] df["diff_3_ft"] = df[f"lag_{gap + 2}_ft"] - df[f"lag_{gap + 3}_ft"] df["diff_123_cc"] =(df[f"lag_{gap}_cc"] - df[f"lag_{gap + 3}_cc"])/ 3 df["diff_123_ft"] =(df[f"lag_{gap}_ft"] - df[f"lag_{gap + 3}_ft"])/ 3 df["diff_change_1_cc"] = df.diff_1_cc / df.diff_2_cc df["diff_change_2_cc"] = df.diff_2_cc / df.diff_3_cc df["diff_change_1_ft"] = df.diff_1_ft / df.diff_2_ft df["diff_change_2_ft"] = df.diff_2_ft / df.diff_3_ft df["diff_change_12_cc"] =(df.diff_change_1_cc + df.diff_change_2_cc)/ 2 df["diff_change_12_ft"] =(df.diff_change_1_ft + df.diff_change_2_ft)/ 2 df["change_1_cc"] = df[f"lag_{gap}_cc"] / df[f"lag_{gap + 1}_cc"] df["change_2_cc"] = df[f"lag_{gap + 1}_cc"] / df[f"lag_{gap + 2}_cc"] df["change_3_cc"] = df[f"lag_{gap + 2}_cc"] / df[f"lag_{gap + 3}_cc"] df["change_1_ft"] = df[f"lag_{gap}_ft"] / df[f"lag_{gap + 1}_ft"] df["change_2_ft"] = df[f"lag_{gap + 1}_ft"] / df[f"lag_{gap + 2}_ft"] df["change_3_ft"] = df[f"lag_{gap + 2}_ft"] / df[f"lag_{gap + 3}_ft"] df["change_123_cc"] = df[f"lag_{gap}_cc"] / df[f"lag_{gap + 3}_cc"] df["change_123_ft"] = df[f"lag_{gap}_ft"] / df[f"lag_{gap + 3}_ft"] for case in DAYS_SINCE_CASES: df[f"days_since_{case}_case"] =(df[f"case_{case}_date"] - df.Date ).astype("timedelta64[D]") df.loc[df[f"days_since_{case}_case"] < gap, f"days_since_{case}_case"] = np.nan df["country_flag"] = df.Province_State.isna().astype(int) df["density"] = df.population / df.area df["target_cc"] = np.log1p(df.ConfirmedCases)- np.log1p(df[f"lag_{gap}_cc"]) df["target_ft"] = np.log1p(df.Fatalities)- np.log1p(df[f"lag_{gap}_ft"]) features = [ f"lag_{gap}_cc", f"lag_{gap}_ft", f"lag_{gap}_rc", "perc_1_ac", "perc_1_cc", "diff_1_cc", "diff_2_cc", "diff_3_cc", "diff_1_ft", "diff_2_ft", "diff_3_ft", "diff_123_cc", "diff_123_ft", "diff_change_1_cc", "diff_change_2_cc", "diff_change_1_ft", "diff_change_2_ft", "diff_change_12_cc", "diff_change_12_ft", "change_1_cc", "change_2_cc", "change_3_cc", "change_1_ft", "change_2_ft", "change_3_ft", "change_123_cc", "change_123_ft", "days_since_1_case", "days_since_10_case", "days_since_50_case", "days_since_100_case", "days_since_500_case", "days_since_1000_case", "days_since_5000_case", "days_since_10000_case", "country_flag", "lat", "lon", "continent", "population", "area", "density", "target_cc", "target_ft" ] return df[features]
COVID19 Global Forecasting (Week 3)
8,824,671
df_worldinfor[df_worldinfor['Country'] == 'Vietnam']<compute_test_metric>
def build_predict_lgbm(df_train, df_test, gap): df_train.dropna(subset = ["target_cc", "target_ft", f"lag_{gap}_cc", f"lag_{gap}_ft"], inplace = True) target_cc = df_train.target_cc target_ft = df_train.target_ft test_lag_cc = df_test[f"lag_{gap}_cc"].values test_lag_ft = df_test[f"lag_{gap}_ft"].values df_train.drop(["target_cc", "target_ft"], axis = 1, inplace = True) df_test.drop(["target_cc", "target_ft"], axis = 1, inplace = True) categorical_features = ["continent"] dtrain_cc = lgb.Dataset(df_train, label = target_cc, categorical_feature = categorical_features) dtrain_ft = lgb.Dataset(df_train, label = target_ft, categorical_feature = categorical_features) model_cc = lgb.train(LGB_PARAMS, train_set = dtrain_cc, num_boost_round = 200) model_ft = lgb.train(LGB_PARAMS, train_set = dtrain_ft, num_boost_round = 200) y_pred_cc = np.expm1(model_cc.predict(df_test, num_boost_round = 200)+ np.log1p(test_lag_cc)) y_pred_ft = np.expm1(model_ft.predict(df_test, num_boost_round = 200)+ np.log1p(test_lag_ft)) return y_pred_cc, y_pred_ft, model_cc, model_ft
COVID19 Global Forecasting (Week 3)
8,824,671
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
def predict_mad(df_test, gap, val = False): df_test["avg_diff_cc"] =(df_test[f"lag_{gap}_cc"] - df_test[f"lag_{gap + 3}_cc"])/ 3 df_test["avg_diff_ft"] =(df_test[f"lag_{gap}_ft"] - df_test[f"lag_{gap + 3}_ft"])/ 3 if val: y_pred_cc = df_test[f"lag_{gap}_cc"] + gap * df_test.avg_diff_cc -(1 - MAD_FACTOR)* df_test.avg_diff_cc * np.sum([x for x in range(gap)])/ VAL_DAYS y_pred_ft = df_test[f"lag_{gap}_ft"] + gap * df_test.avg_diff_ft -(1 - MAD_FACTOR)* df_test.avg_diff_ft * np.sum([x for x in range(gap)])/ VAL_DAYS else: y_pred_cc = df_test[f"lag_{gap}_cc"] + gap * df_test.avg_diff_cc -(1 - MAD_FACTOR)* df_test.avg_diff_cc * np.sum([x for x in range(gap)])/ n_dates_test y_pred_ft = df_test[f"lag_{gap}_ft"] + gap * df_test.avg_diff_ft -(1 - MAD_FACTOR)* df_test.avg_diff_ft * np.sum([x for x in range(gap)])/ n_dates_test return y_pred_cc, y_pred_ft
COVID19 Global Forecasting (Week 3)
8,824,671
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
SEED = 23 LGB_PARAMS = {"objective": "regression", "num_leaves": 5, "learning_rate": 0.013, "bagging_fraction": 0.91, "feature_fraction": 0.81, "reg_alpha": 0.13, "reg_lambda": 0.13, "metric": "rmse", "seed": SEED } VAL_DAYS = 7 MAD_FACTOR = 0.5
COVID19 Global Forecasting (Week 3)
8,824,671
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 10): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = np.concatenate(( np.repeat(0, len(pred_data)- len(y_hat_confirmed)) , y_hat_confirmed), axis = 0) pred_data['Fatalities_hat'] = np.concatenate(( np.repeat(float(0), len(pred_data)- len(y_hat_fatalities)) , y_hat_fatalities), axis = 0) pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left' )<feature_engineering>
df_train = df[~df.Id.isna() ] df_test_full = df[~df.ForecastId.isna() ]
COVID19 Global Forecasting (Week 3)
8,824,671
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0<compute_test_metric>
df_preds_val = [] df_preds_test = [] for date in df_test_full.Date.unique() : print("[INFO] Date:", date) if date in df_train.Date.values: df_pred_test = df_test_full.loc[df_test_full.Date == date, ["ForecastId", "ConfirmedCases", "Fatalities"]].rename(columns = {"ConfirmedCases": "ConfirmedCases_test", "Fatalities": "Fatalities_test"}) else: df_test = df_test_full[df_test_full.Date == date] gap =(pd.Timestamp(date)- max_date_train ).days if gap <= VAL_DAYS: val_date = max_date_train - pd.Timedelta(VAL_DAYS, "D")+ pd.Timedelta(gap, "D") df_build = df_train[df_train.Date < val_date] df_val = df_train[df_train.Date == val_date] X_build = prepare_features(df_build, gap) X_val = prepare_features(df_val, gap) y_val_cc_lgb, y_val_ft_lgb, _, _ = build_predict_lgbm(X_build, X_val, gap) y_val_cc_mad, y_val_ft_mad = predict_mad(df_val, gap, val = True) df_pred_val = pd.DataFrame({"Id": df_val.Id.values, "ConfirmedCases_val_lgb": y_val_cc_lgb, "Fatalities_val_lgb": y_val_ft_lgb, "ConfirmedCases_val_mad": y_val_cc_mad, "Fatalities_val_mad": y_val_ft_mad, }) df_preds_val.append(df_pred_val) X_train = prepare_features(df_train, gap) X_test = prepare_features(df_test, gap) y_test_cc_lgb, y_test_ft_lgb, model_cc, model_ft = build_predict_lgbm(X_train, X_test, gap) y_test_cc_mad, y_test_ft_mad = predict_mad(df_test, gap) if gap == 1: model_1_cc = model_cc model_1_ft = model_ft features_1 = X_train.columns.values elif gap == 14: model_14_cc = model_cc model_14_ft = model_ft features_14 = X_train.columns.values elif gap == 28: model_28_cc = model_cc model_28_ft = model_ft features_28 = X_train.columns.values df_pred_test = pd.DataFrame({"ForecastId": df_test.ForecastId.values, "ConfirmedCases_test_lgb": y_test_cc_lgb, "Fatalities_test_lgb": y_test_ft_lgb, "ConfirmedCases_test_mad": y_test_cc_mad, "Fatalities_test_mad": y_test_ft_mad, }) df_preds_test.append(df_pred_test )
COVID19 Global Forecasting (Week 3)
8,824,671
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
df = df.merge(pd.concat(df_preds_val, sort = False), on = "Id", how = "left") df = df.merge(pd.concat(df_preds_test, sort = False), on = "ForecastId", how = "left") rmsle_cc_lgb = np.sqrt(mean_squared_error(np.log1p(df[~df.ConfirmedCases_val_lgb.isna() ].ConfirmedCases), np.log1p(df[~df.ConfirmedCases_val_lgb.isna() ].ConfirmedCases_val_lgb))) rmsle_ft_lgb = np.sqrt(mean_squared_error(np.log1p(df[~df.Fatalities_val_lgb.isna() ].Fatalities), np.log1p(df[~df.Fatalities_val_lgb.isna() ].Fatalities_val_lgb))) rmsle_cc_mad = np.sqrt(mean_squared_error(np.log1p(df[~df.ConfirmedCases_val_mad.isna() ].ConfirmedCases), np.log1p(df[~df.ConfirmedCases_val_mad.isna() ].ConfirmedCases_val_mad))) rmsle_ft_mad = np.sqrt(mean_squared_error(np.log1p(df[~df.Fatalities_val_mad.isna() ].Fatalities), np.log1p(df[~df.Fatalities_val_mad.isna() ].Fatalities_val_mad))) print("LGB CC RMSLE Val of", VAL_DAYS, "days for CC:", round(rmsle_cc_lgb, 2)) print("LGB FT RMSLE Val of", VAL_DAYS, "days for FT:", round(rmsle_ft_lgb, 2)) print("LGB Overall RMSLE Val of", VAL_DAYS, "days:", round(( rmsle_cc_lgb + rmsle_ft_lgb)/ 2, 2)) print("MAD CC RMSLE Val of", VAL_DAYS, "days for CC:", round(rmsle_cc_mad, 2)) print("MAD FT RMSLE Val of", VAL_DAYS, "days for FT:", round(rmsle_ft_mad, 2)) print("MAD Overall RMSLE Val of", VAL_DAYS, "days:", round(( rmsle_cc_mad + rmsle_ft_mad)/ 2, 2))
COVID19 Global Forecasting (Week 3)
8,824,671
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<save_to_csv>
test = df.loc[~df.ForecastId.isna() , ["ForecastId", "Country_Region", "Province_State", "Date", "ConfirmedCases_test", "ConfirmedCases_test_lgb", "ConfirmedCases_test_mad", "Fatalities_test", "Fatalities_test_lgb", "Fatalities_test_mad"]].reset_index() test["ConfirmedCases"] = 0.3 * test.ConfirmedCases_test_lgb + 0.7 * test.ConfirmedCases_test_mad test["Fatalities"] = 0.25 * test.Fatalities_test_lgb + 0.75 * test.Fatalities_test_mad test.loc[test.Country_Region.isin(["China", "US", "Diamond Princess"]), "ConfirmedCases"] = test[test.Country_Region.isin(["China", "US", "Diamond Princess"])].ConfirmedCases_test_mad.values test.loc[test.Country_Region.isin(["China", "US", "Diamond Princess"]), "Fatalities"] = test[test.Country_Region.isin(["China", "US", "Diamond Princess"])].Fatalities_test_mad.values test.loc[test.Date.isin(df_train.Date.values), "ConfirmedCases"] = test[test.Date.isin(df_train.Date.values)].ConfirmedCases_test.values test.loc[test.Date.isin(df_train.Date.values), "Fatalities"] = test[test.Date.isin(df_train.Date.values)].Fatalities_test.values sub0 = test[["ForecastId", "ConfirmedCases", "Fatalities"]] sub0.ForecastId = sub0.ForecastId.astype(int) sub0.head()
COVID19 Global Forecasting (Week 3)
8,824,671
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission.to_csv('submission.csv', index=False )<feature_engineering>
test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )
COVID19 Global Forecasting (Week 3)
8,824,671
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val <import_modules>
FirstDate = train.groupby('Country_Region' ).min() ['Date'].unique() [0] train['Last Confirm'] = train['ConfirmedCases'].shift(1) while train[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate)].shape[0] > 0: train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1) train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'ConfirmedCases'] = train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'Last Confirm'] train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Fatalities'] = train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Last Fatalities'] train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1 )
COVID19 Global Forecasting (Week 3)
8,824,671
def create_time_features(df): df['date'] = pd.to_datetime(df['Date'] ).values df['hour'] = df['date'].dt.hour df['dayofweek'] = df['date'].dt.dayofweek df['quarter'] = df['date'].dt.quarter df['month'] = df['date'].dt.month df['year'] = df['date'].dt.year df['dayofyear'] = df['date'].dt.dayofyear df['dayofmonth'] = df['date'].dt.day df['weekofyear'] = df['date'].dt.weekofyear return df def mean_absolute_percentage_error(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs(( y_true - y_pred)/ y_true)) * 10 def wg_func(params): a = params[0] r = params[1] c = params[2] hcnb = params[3] incr = [cdata[i] if i == 0 else cdata[i] - cdata[i-1] for i,item in enumerate(cdata)] ptlt = [(1-(a/(a+item**c)) **r)*(1-hcnb)for item in ts] ptwt = [ptlt[i] if i == 0 else ptlt[i] - ptlt[i-1] for i,item in enumerate(ptlt)] return(np.sum([j * np.log(i)for i,j in zip(ptwt,incr)]) +(pop*(1-hcnb)- np.max(cdata)) * np.log(1-np.max(ptlt)) + np.log(hcnb)*pop*hcnb)*-1 def wg_func2(params): a = params[0] r = params[1] c = params[2] hcnb =.98 incr = [cdata[i] if i == 0 else cdata[i] - cdata[i-1] for i,item in enumerate(cdata)] ptlt = [(1-(a/(a+item**c)) **r)*(1-hcnb)for item in ts] ptwt = [ptlt[i] if i == 0 else ptlt[i] - ptlt[i-1] for i,item in enumerate(ptlt)] return(np.sum([j * np.log(i)for i,j in zip(ptwt,incr)]) +(pop*(1-hcnb)- np.max(cdata)) * np.log(1-np.max(ptlt)) + np.log(hcnb)*pop*hcnb)*-1 def constraint1(inputs): return inputs[0] cons =({'type': 'ineq', "fun": constraint1}) def calc_distance(lat1, lng1, lat2, lng2): R = 6373.0 lat1 = radians(lat1) lng1 = radians(lng1) lat2 = radians(lat2) lng2 = radians(lng2) dlon = lng2 - lng1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1)* cos(lat2)* sin(dlon / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) return R * c def fill_missing_coords(df): print(' Filling in missing lat,lng') df.loc[df.Country_Region=='Zimbabwe', 'Lat'] = 19.0154 df.loc[df.Country_Region=='Zimbabwe', 'Long']= 29.1549 df.loc[(df.Country_Region=='Angola')&(df.Province_State==''), 'Lat'] = -11.2027 df.loc[(df.Country_Region=='Angola')&(df.Province_State==''), 'Long']= 17.8739 df.loc[(df.Country_Region=='Bahamas')&(df.Province_State==''), 'Lat'] = 25.0343 df.loc[(df.Country_Region=='Bahamas')&(df.Province_State==''), 'Long']= -77.3963 df.loc[(df.Country_Region=='Belize')&(df.Province_State==''), 'Lat'] = 17.1899 df.loc[(df.Country_Region=='Belize')&(df.Province_State==''), 'Long']= -88.4976 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State==''), 'Lat'] = 55.3781 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State==''), 'Long']= -3.4360 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State=='Isle of Man'), 'Lat'] = 54.2361 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State=='Isle of Man'), 'Long']= -4.5481 df.loc[(df.Country_Region=='Cabo Verde')&(df.Province_State==''), 'Lat'] = 16.5388 df.loc[(df.Country_Region=='Cabo Verde')&(df.Province_State==''), 'Long']= -23.0418 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State=='Bermuda'), 'Lat'] = 32.3078 df.loc[(df.Country_Region=='United Kingdom')&(df.Province_State=='Bermuda'), 'Long']= -64.7505 df.loc[(df.Country_Region=='Chad')&(df.Province_State==''), 'Lat'] = 15.4542 df.loc[(df.Country_Region=='Chad')&(df.Province_State==''), 'Long']= 18.7322 df.loc[(df.Country_Region=='Uganda')&(df.Province_State==''), 'Lat'] = 1.3733 df.loc[(df.Country_Region=='Uganda')&(df.Province_State==''), 'Long']= 32.2903 df.loc[(df.Country_Region=='Denmark')&(df.Province_State=='Greenland'), 'Lat'] = 71.7069 df.loc[(df.Country_Region=='Denmark')&(df.Province_State=='Greenland'), 'Long']= -42.6043 df.loc[(df.Country_Region=='Denmark')&(df.Province_State==''), 'Lat'] = 56.2639 df.loc[(df.Country_Region=='Denmark')&(df.Province_State==''), 'Long']= 9.5018 df.loc[(df.Country_Region=='Timor-Leste')&(df.Province_State==''), 'Lat'] = -8.8742 df.loc[(df.Country_Region=='Timor-Leste')&(df.Province_State==''), 'Long']= 125.7275 df.loc[(df.Country_Region=='Syria')&(df.Province_State==''), 'Lat'] = 34.8021 df.loc[(df.Country_Region=='Syria')&(df.Province_State==''), 'Long']= 38.9968 df.loc[(df.Country_Region=='Saint Kitts and Nevis')&(df.Province_State==''), 'Lat'] = 17.3578 df.loc[(df.Country_Region=='Saint Kitts and Nevis')&(df.Province_State==''), 'Long']= -62.7830 df.loc[(df.Country_Region=='Papua New Guinea')&(df.Province_State==''), 'Lat'] = -6.3150 df.loc[(df.Country_Region=='Papua New Guinea')&(df.Province_State==''), 'Long']= 143.9555 df.loc[(df.Country_Region=='Niger')&(df.Province_State==''), 'Lat'] = 17.6078 df.loc[(df.Country_Region=='Niger')&(df.Province_State==''), 'Long']= 8.0817 df.loc[(df.Country_Region=='El Salvador')&(df.Province_State==''), 'Lat'] = 13.7942 df.loc[(df.Country_Region=='El Salvador')&(df.Province_State==''), 'Long']= -88.8965 df.loc[(df.Country_Region=='Gambia')&(df.Province_State==''), 'Lat'] = 13.4432 df.loc[(df.Country_Region=='Gambia')&(df.Province_State==''), 'Long']= -15.3101 df.loc[(df.Country_Region=='Libya')&(df.Province_State==''), 'Lat'] = 26.3351 df.loc[(df.Country_Region=='Libya')&(df.Province_State==''), 'Long']= 17.2283 df.loc[(df.Country_Region=='Mali')&(df.Province_State==''), 'Lat'] = 17.5707 df.loc[(df.Country_Region=='Mali')&(df.Province_State==''), 'Long']= -3.9962 df.loc[(df.Country_Region=='Grenada')&(df.Province_State==''), 'Lat'] = 12.1165 df.loc[(df.Country_Region=='Grenada')&(df.Province_State==''), 'Long']= -61.6790 df.loc[(df.Country_Region=='Laos')&(df.Province_State==''), 'Lat'] = 19.8563 df.loc[(df.Country_Region=='Laos')&(df.Province_State==''), 'Long']= 102.4955 df.loc[(df.Country_Region=='Madagascar')&(df.Province_State==''), 'Lat'] = -18.7669 df.loc[(df.Country_Region=='Madagascar')&(df.Province_State==''), 'Long']= 46.8691 df.loc[(df.Country_Region=='Guinea-Bissau')&(df.Province_State==''), 'Lat'] = 11.8037 df.loc[(df.Country_Region=='Guinea-Bissau')&(df.Province_State==''), 'Long']= -15.1804 df.loc[(df.Country_Region=='Fiji')&(df.Province_State==''), 'Lat'] = -17.7134 df.loc[(df.Country_Region=='Fiji')&(df.Province_State==''), 'Long']= 178.0650 df.loc[(df.Country_Region=='Nicaragua')&(df.Province_State==''), 'Lat'] = 12.8654 df.loc[(df.Country_Region=='Nicaragua')&(df.Province_State==''), 'Long']= -85.2072 df.loc[(df.Country_Region=='Eritrea')&(df.Province_State==''), 'Lat'] = 15.1794 df.loc[(df.Country_Region=='Eritrea')&(df.Province_State==''), 'Long']= 39.7823 df.loc[(df.Country_Region=='Haiti')&(df.Province_State==''), 'Lat'] = 18.9712 df.loc[(df.Country_Region=='Haiti')&(df.Province_State==''), 'Long']= -72.2852 df.loc[(df.Country_Region=='Dominica')&(df.Province_State==''), 'Lat'] = 15.4150 df.loc[(df.Country_Region=='Dominica')&(df.Province_State==''), 'Long']= -61.3710 df.loc[(df.Country_Region=='Mozambique')&(df.Province_State==''), 'Lat'] = -18.6657 df.loc[(df.Country_Region=='Mozambique')&(df.Province_State==''), 'Long']= 35.5296 df.loc[(df.Country_Region=='Netherlands')&(df.Province_State==''), 'Lat'] = 52.1326 df.loc[(df.Country_Region=='Netherlands')&(df.Province_State==''), 'Long']= 5.2913 df.loc[(df.Country_Region=='Netherlands')&(df.Province_State=='Sint Maarten'), 'Lat'] = 18.0425 df.loc[(df.Country_Region=='Netherlands')&(df.Province_State=='Sint Maarten'), 'Long']= -63.0548 df.loc[(df.Country_Region=='France')&(df.Province_State==''), 'Lat'] = 46.2276 df.loc[(df.Country_Region=='France')&(df.Province_State==''), 'Long']= 2.2137 df.loc[(df.Country_Region=='France')&(df.Province_State=='New Caledonia'), 'Lat'] = -20.9043 df.loc[(df.Country_Region=='France')&(df.Province_State=='New Caledonia'), 'Long']= 165.6180 df.loc[(df.Country_Region=='France')&(df.Province_State=='Martinique'), 'Lat'] = 14.6415 df.loc[(df.Country_Region=='France')&(df.Province_State=='Martinique'), 'Long']= -61.0242 print('done', datetime.now()) <load_from_csv>
from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.arima_model import ARIMA
COVID19 Global Forecasting (Week 3)
8,824,671
PATH = '/kaggle/input/covid19-global-forecasting-week-2/' train = pd.read_csv(PATH + 'train.csv') test = pd.read_csv(PATH + 'test.csv') df_geo = pd.read_csv('.. /input/df-geo/df_geo2.csv') df_geo.Province_State.fillna('', inplace=True) dfp = pd.read_csv('/kaggle/input/population3/' + 'population.csv') dfp.columns = ['Province_State', 'Country_Region', 'pop'] print(train.shape, dfp.shape) print('Cleaning Data') train.loc[train['Province_State'].isnull() , 'Province_State'] = '' test.loc[test['Province_State'].isnull() , 'Province_State'] = '' dfp.loc[dfp['Province_State'].isnull() , 'Province_State'] = '' print('Joining Data') print(train.shape, test.shape) n = train.shape[0] train = pd.merge(train, dfp, on=['Country_Region','Province_State'], how='left') assert train.shape[0] == n n = test.shape[0] test = pd.merge(test, dfp, on=['Country_Region','Province_State'], how='left') assert test.shape[0] == n test.reset_index(drop=True, inplace=True) train.reset_index(drop=True, inplace=True) n = train.shape[0] train = pd.merge(train, df_geo, on=['Country_Region','Province_State'], how='left') assert train.shape[0] == n n = test.shape[0] test = pd.merge(test, df_geo, on=['Country_Region','Province_State'], how='left') assert test.shape[0] == n test.reset_index(drop=True, inplace=True) train.reset_index(drop=True, inplace=True) train.loc[train['pop'].isnull() ,'pop'] = 0 test.loc[test['pop'].isnull() ,'pop'] = 0 print(' Number of countries with missing Lat/Lng: ', train[train.Lat.isnull() ]['Country_Region'].value_counts().shape[0]) fill_missing_coords(train) fill_missing_coords(test) print(' Number of countries with missing Lat/Lng after fixing: ', train[train.Lat.isnull() ]['Country_Region'].value_counts().shape[0]) print(train.shape, test.shape) print(' Enriching Data') mo = train['Date'].apply(lambda x: x[5:7]) da = train['Date'].apply(lambda x: x[8:10]) train['day_from_jan_first'] =(da.apply(int) + 31*(mo=='02') + 60*(mo=='03') + 91*(mo=='04') ) mo = test['Date'].apply(lambda x: x[5:7]) da = test['Date'].apply(lambda x: x[8:10]) test['day_from_jan_first'] =(da.apply(int) + 31*(mo=='02') + 60*(mo=='03') + 91*(mo=='04') ) create_time_features(train) create_time_features(test) print('done', datetime.now() )<categorify>
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()
COVID19 Global Forecasting (Week 3)
8,824,671
country1 = 'Luxembourg' lat1 = df_geo[df_geo.Country_Region==country1]['Lat'].item() lng1 = df_geo[df_geo.Country_Region==country1]['Long'].item() country2 = 'Singapore' lat2 = df_geo[df_geo.Country_Region==country2]['Lat'].item() lng2 = df_geo[df_geo.Country_Region==country2]['Long'].item() print(' Distance between ' + country1, country2, calc_distance(lat1, lng1, lat2, lng2)) print('This should be 10,436km') print(' Label Encoding the geographic features...') df1 = train[['Country_Region','Province_State','Lat','Long']].copy() df2 = test[['Country_Region','Province_State','Lat','Long']].copy() geo = pd.concat([df1,df2], axis=0) geo = geo.groupby(['Country_Region','Province_State'])[['Lat','Long']].max().reset_index() le_country = LabelEncoder().fit(geo.Country_Region) le_state = LabelEncoder().fit(geo.Province_State) train['country'] = le_country.transform(train.Country_Region) train['state'] = le_state.transform(train.Province_State) test['country'] = le_country.transform(test.Country_Region) test['state'] = le_state.transform(test.Province_State) print('done', train.shape) train['wg'] = 0 train['wga'] = 0 train['wgr'] = 0 train['wgc'] = 0 train['wghcnb'] = 0 test['wg'] = 0 test['wga'] = 0 test['wgr'] = 0 test['wgc'] = 0 test['wghcnb'] = 0 test['SARIMAX'] = 0 test['ARIMA'] = 0 countries = ['Afghanistan'] for country in train.Country_Region.unique() : bool1 = train.Country_Region == country print(country) for state in train[bool1].Province_State.unique() : bool2 = bool1 &(train.Province_State == state)&(train.ConfirmedCases>0) pop = np.max(train[bool2]['pop']) data = train[ bool2 ].copy().reset_index() data['ts'] = data.index+1 dfj = data.iloc[0]['day_from_jan_first'] cdata = data['ConfirmedCases'].values ts = data['ts'].values boolt =(test.Country_Region==country)&(test.Province_State== state) datat = test[boolt].copy().reset_index() datat['ts'] = datat.day_from_jan_first - dfj datat.loc[datat.ts<=0,'ts'] = 1 x0 = [1343, 5.440110881178935, 2.188935325131958, 0.9897823619555628] sol = minimize(wg_func, x0, constraints = cons) a,r,c,hcnb = sol.x[0], sol.x[1], sol.x[2], sol.x[3] if np.isnan(sol.x[0]): train.loc[bool2, 'wg'] = np.NaN train.loc[bool2,'wga'] = np.NaN train.loc[bool2,'wgr'] = np.NaN train.loc[bool2,'wgc'] = np.NaN train.loc[bool2,'wghcnb'] = np.NaN test.loc[boolt, 'wg'] = np.NaN test.loc[boolt,'wga'] = np.NaN test.loc[boolt,'wgr'] = np.NaN test.loc[boolt,'wgc'] = np.NaN test.loc[boolt,'wghcnb'] = np.NaN else: datat['wg'] = datat.ts.apply(lambda x:(1-(a/(a+x**c)) **r)*pop*(1-hcnb)).values data['wg'] = data.ts.apply(lambda x:(1-(a/(a+x**c)) **r)*pop*(1-hcnb)).values train.loc[bool2, 'wg'] = data['wg'].values train.loc[bool2,'wga'] = a train.loc[bool2,'wgr'] = r train.loc[bool2,'wgc'] = c train.loc[bool2,'wghcnb'] = hcnb test.loc[boolt, 'wg'] = datat.wg.values test.loc[boolt,'wga'] = a test.loc[boolt,'wgr'] = r test.loc[boolt,'wgc'] = c test.loc[boolt,'wghcnb'] = hcnb incr = [cdata[i] if i == 0 else cdata[i] - cdata[i-1] for i,item in enumerate(cdata)] cc = np.max(data[data.day_from_jan_first <=np.min(datat.day_from_jan_first)]['ConfirmedCases']) try: model_arima = ARIMA(incr, order=(1,1,0)).fit() preds = [item if item >=0 else 0 for item in model_arima.predict(datat.ts[0].item() , datat.ts[-1:].item())] cum_sum = cc preds_cc = [] for item in preds: cum_sum = cum_sum + item preds_cc.append(cum_sum) test.loc[boolt, 'ARIMA'] = pd.Series(preds_cc ).values except: test.loc[boolt, 'ARIMA']= np.NaN try: model_SARIMAX = SARIMAX(incr, order=(1,1,0), seasonal_order=(1,1,0,12),enforce_stationarity=False ).fit() preds = [item if item >=0 else 0 for item in model_SARIMAX.predict(datat.ts[0].item() , datat.ts[-1:].item())] cum_sum = cc preds_cc = [] for item in preds: cum_sum = cum_sum + item preds_cc.append(cum_sum) test.loc[boolt, 'SARIMAX']= pd.Series(preds_cc ).values except: test.loc[boolt, 'SARIMAX']= np.NaN print('done', datetime.now() )<feature_engineering>
sub1 = df_val_3 submission = sub1[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
COVID19 Global Forecasting (Week 3)
8,824,671
train.loc[train.wg<0,'wg'] = 0 test.loc[test.wg<0,'wg'] = 0 train.loc[train.wg.isnull() ,'wg'] = 0 test.loc[test.wg.isnull() ,'wg'] = 0 train.loc[np.isinf(train.wg),'wg'] = 0 test.loc[np.isinf(test.wg),'wg'] = 0 print('done', datetime.now() )<feature_engineering>
TARGETS = ["ConfirmedCases", "Fatalities"] sub_df = sub0.copy() for t in TARGETS: sub_df[t] = np.expm1(np.log1p(submission[t].values)*0.4 + np.log1p(sub0[t].values)*0.6) sub_df.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,824,671
<feature_engineering><EOS>
sub0.isna().sum()
COVID19 Global Forecasting (Week 3)
8,824,956
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<feature_engineering>
path = '.. /input/covid19-global-forecasting-week-3/' train = pd.read_csv(path + 'train.csv') test = pd.read_csv(path + 'test.csv') sub = pd.read_csv(path + 'submission.csv') train['Date'] = train['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d'))) test['Date'] = test['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d'))) train['days'] =(train['Date'].dt.date - train['Date'].dt.date.min() ).dt.days test['days'] =(test['Date'].dt.date - train['Date'].dt.date.min() ).dt.days train.loc[train['Province_State'].isnull() , 'Province_State'] = 'N/A' test.loc[test['Province_State'].isnull() , 'Province_State'] = 'N/A' train['Area'] = train['Country_Region'] + '_' + train['Province_State'] test['Area'] = test['Country_Region'] + '_' + test['Province_State'] print(train['Date'].max()) print(test['Date'].min()) print(train['days'].max()) N_AREAS = train['Area'].nunique() AREAS = np.sort(train['Area'].unique()) TRAIN_N = 77 print(train[train['days'] < TRAIN_N]['Date'].max()) print(train[train['days'] >= TRAIN_N]['Date'].min()) print(train[train['days'] >= TRAIN_N]['Date'].max()) train.head() test_orig = test.copy()
COVID19 Global Forecasting (Week 3)
8,824,956
train.loc[train.wg2<0,'wg2'] = 0 test.loc[test.wg2<0,'wg2'] = 0 train.loc[train.wg2.isnull() ,'wg2'] = 0 test.loc[test.wg2.isnull() ,'wg2'] = 0 train.loc[np.isinf(train.wg2),'wg2'] = 0 test.loc[ np.isinf(test.wg2),'wg2'] = 0 train.loc[train.wg3<0,'wg3'] = 0 test.loc[test.wg3<0,'wg3'] = 0 train.loc[train.wg3.isnull() ,'wg3'] = 0 test.loc[test.wg3.isnull() ,'wg3'] = 0 train.loc[np.isinf(train.wg3),'wg3'] = 0 test.loc[ np.isinf(test.wg3),'wg3'] = 0 print('done', datetime.now() )<compute_train_metric>
train_p_c_raw = train.pivot(index='Area', columns='days', values='ConfirmedCases' ).sort_index() train_p_f_raw = train.pivot(index='Area', columns='days', values='Fatalities' ).sort_index() train_p_c = np.maximum.accumulate(train_p_c_raw, axis=1) train_p_f = np.maximum.accumulate(train_p_f_raw, axis=1) f_rate =(train_p_f / train_p_c ).fillna(0) X_c = np.log(1+train_p_c.values)[:,:TRAIN_N] X_f = train_p_f.values[:,:TRAIN_N]
COVID19 Global Forecasting (Week 3)
8,824,956
print(mean_squared_log_error(train.ConfirmedCases, train.wg)) print(mean_squared_log_error(train.ConfirmedCases, train.wg2)) print(mean_squared_log_error(train.ConfirmedCases, train.wg3)) <compute_test_metric>
def eval1(y, p): val_len = y.shape[1] - TRAIN_N return np.sqrt(mean_squared_error(y[:, TRAIN_N:TRAIN_N+val_len].flatten() , p[:, TRAIN_N:TRAIN_N+val_len].flatten())) def run_c(params, X, test_size=50): gr_base = [] gr_base_factor = [] x_min = np.ma.MaskedArray(X, X<1) x_min = x_min.argmin(axis=1) for i in range(X.shape[0]): temp = X[i,:] threshold = np.log(1+params['min cases for growth rate']) num_days = params['last N days'] if(temp > threshold ).sum() > num_days: d = np.diff(temp[temp > threshold])[-num_days:] w = np.arange(len(d)) +1 w = w**5 w = w / np.sum(w) gr_base.append(np.clip(np.average(d, weights=w), 0, params['growth rate max'])) d2 = np.diff(d) w = np.arange(len(d2)) +1 w = w**10 w = w / np.sum(w) gr_base_factor.append(np.clip(np.average(d2, weights=w), -0.5, params["growth rate factor max"])) else: gr_base.append(params['growth rate default']) gr_base_factor.append(params['growth rate factor']) gr_base = np.array(gr_base) gr_base_factor = np.array(gr_base_factor) preds = X.copy() for i in range(test_size): delta = np.clip(preds[:, -1], np.log(2), None)+ gr_base *(1 + params['growth rate factor']*(1 + params['growth rate factor factor'])**(i)) **(np.log1p(i)) preds = np.hstack(( preds, delta.reshape(-1,1))) return preds params = { "min cases for growth rate": 0, "last N days": 15, "growth rate default": 0.10, "growth rate max": 0.2, "growth rate factor max": -0.1, "growth rate factor": -0.3, "growth rate factor factor": 0.01, } x = train_p_c preds_c = run_c(params, np.log(1+x.values)[:,:TRAIN_N])
COVID19 Global Forecasting (Week 3)
8,824,956
for country in train.Country_Region.unique() : bool1 = train.Country_Region == country for state in train[bool1].Province_State.unique() : bool2 = bool1 &(train.Province_State == state) data = train[ bool2 ].copy().reset_index() wg1 = np.round(mean_squared_log_error(data.ConfirmedCases, data.wg),2) wg2 = np.round(mean_squared_log_error(data.ConfirmedCases, data.wg2),2) wg3 = np.round(mean_squared_log_error(data.ConfirmedCases, data.wg3),2) boolt =(test.Country_Region==country)&(test.Province_State== state) datat = test[boolt].copy().reset_index() if wg1<wg2 and wg1<wg3: data['best'] = data.wg datat['best'] = datat.wg elif wg2<wg1 and wg2 < wg3: data['best'] = data.wg2 datat['best'] = datat.wg2 else: data['best'] = data.wg3 datat['best'] = datat.wg3 train.loc[bool2, 'wg'] = data['best'].values test.loc[boolt, 'wg'] = datat['best'].values print('done', datetime.now() )<compute_test_metric>
for i in range(N_AREAS): if 'China' in AREAS[i] and preds_c[i, TRAIN_N-1] < np.log(31): preds_c[i, TRAIN_N:] = preds_c[i, TRAIN_N-1]
COVID19 Global Forecasting (Week 3)
8,824,956
print(mean_squared_log_error(train.ConfirmedCases, train.wg)) train.drop('wg2', inplace=True, axis=1) train.drop('wg3', inplace=True, axis=1) test.drop('wg2', inplace=True, axis=1) test.drop('wg3', inplace=True, axis=1) print('done', datetime.now() )<feature_engineering>
def lin_w(sz): res = np.linspace(0, 1, sz+1, endpoint=False)[1:] return np.append(res, np.append([1], res[::-1])) def run_f(params, X_c, X_f, X_f_r, test_size=50): X_f_r = np.array(np.ma.mean(np.ma.masked_outside(X_f_r, 0.03, 0.5)[:,:], axis=1)) X_f_r = np.clip(X_f_r, params['fatality_rate_lower'], params['fatality_rate_upper']) X_c = np.clip(np.exp(X_c)-1, 0, None) preds = X_f.copy() train_size = X_f.shape[1] - 1 for i in range(test_size): t_lag = train_size+i-params['length'] t_wsize = 5 d = np.diff(X_c, axis=1)[:, t_lag-t_wsize:t_lag+1+t_wsize] delta = np.average(d, axis=1) delta = params['absolute growth'] + delta * X_f_r preds = np.hstack(( preds, preds[:, -1].reshape(-1,1)+ delta.reshape(-1,1))) return preds params = { "length": 7, "absolute growth": 0.02, "fatality_rate_lower": 0.02, "fatality_rate_upper": 0.3, } preds_f_1 = run_f(params, preds_c, X_f, f_rate.values[:,:TRAIN_N]) preds_f_1 = np.log(1+preds_f_1 )
COVID19 Global Forecasting (Week 3)
8,824,956
train_data = train.copy() train_df = train_data train_df['area'] = [str(i)+str(' - ')+str(j)for i,j in zip(train_data['Country_Region'], train_data['Province_State'])] train_df['Date'] = pd.to_datetime(train_df['Date']) full_data = train_df today = full_data['Date'].max() +timedelta(days=1) def get_country_data(train_df, area, metric): country_data = train_df[train_df['area']==area] country_data = country_data.drop(['Id','Province_State', 'Country_Region', 'Lat','Long'], axis=1) country_data = pd.pivot_table(country_data, values=['ConfirmedCases','Fatalities'], index=['Date'], aggfunc=np.sum) country_data = country_data[country_data[metric]!=0] return country_data area_info = pd.DataFrame(columns=['area', 'cases_start_date', 'deaths_start_date', 'init_ConfirmedCases', 'init_Fatalities']) for i in range(len(train_df['area'].unique())) : area = train_df['area'].unique() [i] area_cases_data = get_country_data(train_df, area, 'ConfirmedCases') area_deaths_data = get_country_data(train_df, area, 'Fatalities') cases_start_date = area_cases_data.index.min() deaths_start_date = area_deaths_data.index.min() if len(area_cases_data)> 0: confirmed_cases = max(area_cases_data['ConfirmedCases']) else: confirmed_cases = 0 if len(area_deaths_data)> 0: fatalities = max(area_deaths_data['Fatalities']) else: fatalities = 0 area_info.loc[i] = [area, cases_start_date, deaths_start_date, confirmed_cases, fatalities] area_info = area_info.fillna(pd.to_datetime(today)) area_info['init_cases_day_no'] = pd.to_datetime(today)-area_info['cases_start_date'] area_info['init_cases_day_no'] = area_info['init_cases_day_no'].dt.days.fillna(0 ).astype(int) area_info['init_deaths_day_no'] = pd.to_datetime(today)-area_info['deaths_start_date'] area_info['init_deaths_day_no'] = area_info['init_deaths_day_no'].dt.days.fillna(0 ).astype(int) area_info.head() def log_curve(x, k, x_0, ymax): return ymax /(1 + np.exp(-k*(x-x_0))) def log_fit(train_df, area, metric): area_data = get_country_data(train_df, area, metric) x_data = range(len(area_data.index)) y_data = area_data[metric] if len(y_data)< 5: estimated_k = -1 estimated_x_0 = -1 ymax = -1 elif max(y_data)== 0: estimated_k = -1 estimated_x_0 = -1 ymax = -1 else: try: popt, pcov = curve_fit(log_curve, x_data, y_data, bounds=([0,0,0],np.inf), p0=[0.3,100,10000], maxfev=1000000) estimated_k, estimated_x_0, ymax = popt except RuntimeError: print(area) print("Error - curve_fit failed") estimated_k = -1 estimated_x_0 = -1 ymax = -1 estimated_parameters = pd.DataFrame(np.array([[area, estimated_k, estimated_x_0, ymax]]), columns=['area', 'k', 'x_0', 'ymax']) return estimated_parameters def get_parameters(metric): parameters = pd.DataFrame(columns=['area', 'k', 'x_0', 'ymax'], dtype=np.float) for area in train_df['area'].unique() : estimated_parameters = log_fit(train_df, area, metric) parameters = parameters.append(estimated_parameters) parameters['k'] = pd.to_numeric(parameters['k'], downcast="float") parameters['x_0'] = pd.to_numeric(parameters['x_0'], downcast="float") parameters['ymax'] = pd.to_numeric(parameters['ymax'], downcast="float") parameters = parameters.replace({'k': {-1: parameters[parameters['ymax']>0].median() [0]}, 'x_0': {-1: parameters[parameters['ymax']>0].median() [1]}, 'ymax': {-1: parameters[parameters['ymax']>0].median() [2]}}) return parameters cases_parameters = get_parameters('ConfirmedCases') cases_parameters.head(20) deaths_parameters = get_parameters('Fatalities') deaths_parameters.head(20) fit_df = area_info.merge(cases_parameters, on='area', how='left') fit_df = fit_df.rename(columns={"k": "cases_k", "x_0": "cases_x_0", "ymax": "cases_ymax"}) fit_df = fit_df.merge(deaths_parameters, on='area', how='left') fit_df = fit_df.rename(columns={"k": "deaths_k", "x_0": "deaths_x_0", "ymax": "deaths_ymax"}) fit_df['init_ConfirmedCases_fit'] = log_curve(fit_df['init_cases_day_no'], fit_df['cases_k'], fit_df['cases_x_0'], fit_df['cases_ymax']) fit_df['init_Fatalities_fit'] = log_curve(fit_df['init_deaths_day_no'], fit_df['deaths_k'], fit_df['deaths_x_0'], fit_df['deaths_ymax']) fit_df['ConfirmedCases_error'] = fit_df['init_ConfirmedCases']-fit_df['init_ConfirmedCases_fit'] fit_df['Fatalities_error'] = fit_df['init_Fatalities']-fit_df['init_Fatalities_fit'] fit_df.head() test_data = test.copy() test_df = test_data test_df['area'] = [str(i)+str(' - ')+str(j)for i,j in zip(test_data['Country_Region'], test_data['Province_State'])] test_df = test_df.merge(fit_df, on='area', how='left') test_df['Date'] = pd.to_datetime(test_df['Date']) test_df['cases_start_date'] = pd.to_datetime(test_df['cases_start_date']) test_df['deaths_start_date'] = pd.to_datetime(test_df['deaths_start_date']) test_df['cases_day_no'] = test_df['Date']-test_df['cases_start_date'] test_df['cases_day_no'] = test_df['cases_day_no'].dt.days.fillna(0 ).astype(int) test_df['deaths_day_no'] = test_df['Date']-test_df['deaths_start_date'] test_df['deaths_day_no'] = test_df['deaths_day_no'].dt.days.fillna(0 ).astype(int) test_df['ConfirmedCases_fit'] = log_curve(test_df['cases_day_no'], test_df['cases_k'], test_df['cases_x_0'], test_df['cases_ymax']) test_df['Fatalities_fit'] = log_curve(test_df['deaths_day_no'], test_df['deaths_k'], test_df['deaths_x_0'], test_df['deaths_ymax']) test_df['ConfirmedCases_pred'] = round(test_df['ConfirmedCases_fit']+test_df['ConfirmedCases_error']) test_df['Fatalities_pred'] = round(test_df['Fatalities_fit']+test_df['Fatalities_error']) test_df.head() train_df = train.copy() train_df['area'] = [str(i)+str(' - ')+str(j)for i,j in zip(train_df['Country_Region'], train_df['Province_State'])] train_df = train_df.merge(fit_df, on='area', how='left') train_df['Date'] = pd.to_datetime(train_df['Date']) train_df['cases_start_date'] = pd.to_datetime(train_df['cases_start_date']) train_df['deaths_start_date'] = pd.to_datetime(train_df['deaths_start_date']) train_df['cases_day_no'] = train_df['Date']-train_df['cases_start_date'] train_df['cases_day_no'] = train_df['cases_day_no'].dt.days.fillna(0 ).astype(int) train_df['deaths_day_no'] = train_df['Date']-train_df['deaths_start_date'] train_df['deaths_day_no'] = train_df['deaths_day_no'].dt.days.fillna(0 ).astype(int) train_df['ConfirmedCases_fit'] = log_curve(train_df['cases_day_no'], train_df['cases_k'], train_df['cases_x_0'], train_df['cases_ymax']) train_df['Fatalities_fit'] = log_curve(train_df['deaths_day_no'], train_df['deaths_k'], train_df['deaths_x_0'], train_df['deaths_ymax']) train_df['ConfirmedCases_pred'] = round(train_df['ConfirmedCases_fit']+train_df['ConfirmedCases_error']) train_df['Fatalities_pred'] = round(train_df['Fatalities_fit']+train_df['Fatalities_error']) train_df.head() print('done', datetime.now() )<train_model>
class ZDatasetF(Dataset): def __init__(self, X_c, X_f=None, hist_len=10): self.X_c = X_c self.X_f = X_f self.hist_len = hist_len self.is_test = X_f is None def __len__(self): return self.X_c.shape[1] def __getitem__(self, idx): if self.is_test: return {'x_c':self.X_c[:, idx-self.hist_len:idx]} else: return {'x_c':self.X_c[:, idx-self.hist_len:idx], 'x_f':self.X_f[:, idx-1], 'y':np.log(1+self.X_f[:, idx])} class PrLayer2(nn.Module): def __init__(self, in_features1, in_features2): super(PrLayer2, self ).__init__() self.weight0 = Parameter(torch.Tensor(1, 1, in_features2)) self.weight1 = Parameter(torch.Tensor(1, in_features1, in_features2)) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight0, a=math.sqrt(5)) init.kaiming_uniform_(self.weight1, a=math.sqrt(5)) def forward(self, input): return input * torch.sigmoid(self.weight0 + self.weight1) class ZModelF(nn.Module): def __init__(self, hist_len): super(ZModelF, self ).__init__() self.l_conv = PrLayer2(len(X_c),hist_len-1) def forward(self, x_c, x_f): x = x_c[:,:,1:] - x_c[:,:,:-1] res = torch.sum(self.l_conv(x), 2) return {'preds': torch.log(1 + x_f + res)} class DummySampler(torch.utils.data.sampler.Sampler): def __init__(self, idx): self.idx = idx def __iter__(self): return iter(self.idx) def __len__(self): return len(self.idx) def _smooth_l1_loss(target): t = torch.abs(target) t = torch.where(t < 1, 0.5 * t ** 2, t - 0.5) return torch.mean(t) n_epochs = 5000 lr = 0.18 bag_size = 4 device = 'cpu' hist_len = 14 loss_func = torch.nn.MSELoss() reg_loss_func = _smooth_l1_loss reg_factor = 0.035 train_dataset = ZDatasetF(np.exp(X_c)-1, X_f, hist_len=hist_len) test_dataset = ZDatasetF(np.exp(preds_c)-1, hist_len=hist_len) trn_idx = np.arange(hist_len+1, len(train_dataset)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(trn_idx) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=len(trn_idx), sampler=train_sampler, num_workers=0, pin_memory=True) test_idx = np.arange(TRAIN_N, len(test_dataset)) test_sampler = DummySampler(test_idx) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, sampler=test_sampler, num_workers=0, pin_memory=True) gradient_accumulation = 1 preds_f = 0 for m_i in range(bag_size): model_f = ZModelF(hist_len=hist_len ).to(device) optimizer_f = torch.optim.Adam(model_f.parameters() , lr=lr) model_f.train() start_time = time.time() for epoch in range(n_epochs): s = time.time() avg_train_loss = 0 optimizer_f.zero_grad() for idx, data in enumerate(train_loader): X1 = data['x_c'].to(device ).float() X2 = data['x_f'].to(device ).float() y = data['y'].to(device ).float() preds = model_f(X1, X2)['preds'].float() cond = X2 > np.log(10) preds = preds[cond] y = y[cond] loss = loss_func(preds, y) loss += reg_factor * reg_loss_func(model_f.l_conv.weight1) avg_train_loss += loss / len(train_loader) loss.backward() if(idx+1)% gradient_accumulation == 0 or idx == len(train_loader)- 1: optimizer_f.step() optimizer_f.zero_grad() if False: model_f.eval() preds_f_delta = train_p_f.values[:,:TRAIN_N] for idx, data in enumerate(test_loader): X1 = data['x_c'].to(device ).float() temp = model_f(X1, torch.Tensor(preds_f_delta[:,-1] ).unsqueeze(0)) ['preds'] temp = np.exp(temp.detach().cpu().numpy().reshape(-1,1)) - 1 preds_f_delta = np.hstack(( preds_f_delta, temp)) preds_f_delta = np.log(1 + preds_f_delta) val_len = train_p_c.values.shape[1] - TRAIN_N m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , \ preds_f_delta[:, TRAIN_N:TRAIN_N+val_len].flatten())) print(f"{epoch:2} train_loss {avg_train_loss:<8.4f} val_loss {m2:8.5f} {time.time() -s:<2.2f}") model_f.train() model_f.eval() preds_f_delta = train_p_f.values[:,:TRAIN_N] for idx, data in enumerate(test_loader): X1 = data['x_c'].to(device ).float() temp = model_f(X1, torch.Tensor(preds_f_delta[:,-1] ).unsqueeze(0)) ['preds'] temp = np.exp(temp.detach().cpu().numpy().reshape(-1,1)) - 1 preds_f_delta = np.hstack(( preds_f_delta, temp)) preds_f += preds_f_delta / bag_size preds_f_2 = np.log(1 + preds_f) print("Done")
COVID19 Global Forecasting (Week 3)
8,824,956
train['y_hat_log' ] = test_df.ConfirmedCases_fit test['y_hat_log' ] = test_df.ConfirmedCases_pred<drop_column>
preds_f = np.average([preds_f_1, preds_f_2], axis=0, weights=[1,2] )
COVID19 Global Forecasting (Week 3)
8,824,956
dropcols = ['Date', 'date', 'ConfirmedCases', 'Id', 'ForecastId', 'Fatalities'] dropcols = dropcols + ['Country_Region','Province_State'] dropcols = dropcols + ['eg', 'egr', 'mae_eg', 'wga', 'wgr', 'wgc','wghcnb', 'mae_wg', 'SARIMAX', 'ARIMA', 'cc_es',] print(' Modeling...') features = [f for f in train.columns if f not in dropcols + ['shift4w', 'shift6w', 'dist' , 'dayofyear','year','quarter','hour','month','dayofmonth','dayofweek','weekofyear', 'Lat', 'Long']] print(features) X_train = train[features].copy() X_test = test[features].copy() X_train.reset_index(drop=True, inplace=True) X_test.reset_index(drop=True, inplace=True) y_train = train["Fatalities"] y_train_cc = train["ConfirmedCases"] print('done', datetime.now() )<choose_model_class>
if False: val_len = train_p_c.values.shape[1] - TRAIN_N for i in range(val_len): d = i + TRAIN_N m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, d]), preds_c[:, d])) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, d]), preds_f[:, d])) print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]") print() m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_c[:, TRAIN_N:TRAIN_N+val_len].flatten())) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_f[:, TRAIN_N:TRAIN_N+val_len].flatten())) print(f"{(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]" )
COVID19 Global Forecasting (Week 3)
8,824,956
isTraining = False params_xgb = {} params_xgb['n_estimators'] = 1100 params_xgb['max_depth'] = 10 params_xgb['seed'] = 2020 params_xgb['colsample_bylevel'] = 1 params_xgb['colsample_bytree'] = 1 params_xgb['learning_rate'] = 0.3 params_xgb['reg_alpha'] = 0 params_xgb['reg_lambda'] = 1 params_xgb['subsample'] = 1 if isTraining: X_TRAIN = X_train[features].values kf = KFold(n_splits = 5, shuffle = True, random_state=2020) acc = [] for tr_idx, val_idx in kf.split(X_TRAIN, y_train_cc): X_tr, X_vl = X_TRAIN[tr_idx], X_TRAIN[val_idx, :] y_tr, y_vl = y_train_cc[tr_idx], y_train_cc[val_idx] print(X_tr.shape) model_xgb_cc = xgb.XGBRegressor(**params_xgb) model_xgb_cc.fit(X_tr, y_tr, verbose=True) y_hat = model_xgb_cc.predict(X_vl) print('xgb mae :', mean_absolute_error(y_vl, y_hat)) acc.append(mean_absolute_error(y_vl, y_hat)) print('done', np.mean(acc)) print('done', datetime.now() )<prepare_x_and_y>
EU_COUNTRIES = ['Austria', 'Italy', 'Belgium', 'Latvia', 'Bulgaria', 'Lithuania', 'Croatia', 'Luxembourg', 'Cyprus', 'Malta', 'Czechia', 'Netherlands', 'Denmark', 'Poland', 'Estonia', 'Portugal', 'Finland', 'Romania', 'France', 'Slovakia', 'Germany', 'Slovenia', 'Greece', 'Spain', 'Hungary', 'Sweden', 'Ireland'] EUROPE_OTHER = ['Albania', 'Andorra', 'Bosnia and Herzegovina', 'Liechtenstein', 'Monaco', 'Montenegro', 'North Macedonia', 'Norway', 'San Marino', 'Serbia', 'Switzerland', 'Turkey', 'United Kingdom'] AFRICA = ['Algeria', 'Burkina Faso', 'Cameroon', 'Congo(Kinshasa)', "Cote d'Ivoire", 'Egypt', 'Ghana', 'Kenya', 'Madagascar', 'Morocco', 'Nigeria', 'Rwanda', 'Senegal', 'South Africa', 'Togo', 'Tunisia', 'Uganda', 'Zambia'] NORTH_AMERICA = ['US', 'Canada', 'Mexico'] SOUTH_AMERICA = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Paraguay', 'Peru', 'Uruguay', 'Venezuela'] MIDDLE_EAST = ['Afghanistan', 'Bahrain', 'Iran', 'Iraq', 'Israel', 'Jordan', 'Kuwait', 'Lebanon', 'Oman', 'Qatar', 'Saudi Arabia', 'United Arab Emirates'] ASIA = ['Bangladesh', 'Brunei', 'Cambodia', 'India', 'Indonesia', 'Japan', 'Kazakhstan', 'Korea, South', 'Kyrgyzstan', 'Malaysia', 'Pakistan', 'Singapore', 'Sri Lanka', 'Taiwan*', 'Thailand', 'Uzbekistan', 'Vietnam']
COVID19 Global Forecasting (Week 3)
8,824,956
dropcols = ['Date', 'date', 'ConfirmedCases', 'Id', 'ForecastId', 'Fatalities'] dropcols = dropcols + ['Country_Region','Province_State'] dropcols = dropcols + ['eg', 'egr', 'mae_eg', 'wg', 'wga', 'wgr', 'wg_xgb', 'wgc','wghcnb', 'mae_wg', 'SARIMAX', 'ARIMA', 'cc_es',] print(' Modeling...') features = [f for f in train.columns if f not in dropcols + ['shift4w', 'shift6w', 'dist' , 'dayofyear','year','quarter','hour','month','dayofmonth','dayofweek','weekofyear']] print(features) X_train = train[features].copy() X_test = test[features].copy() X_train.reset_index(drop=True, inplace=True) X_test.reset_index(drop=True, inplace=True) X_train.head() print('done', datetime.now() )<init_hyperparams>
temp = pd.DataFrame(np.clip(np.exp(preds_c)- 1, 0, None)) temp['Area'] = AREAS temp = temp.melt(id_vars='Area', var_name='days', value_name="ConfirmedCases") test = test_orig.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days']) temp = pd.DataFrame(np.clip(np.exp(preds_f)- 1, 0, None)) temp['Area'] = AREAS temp = temp.melt(id_vars='Area', var_name='days', value_name="Fatalities") test = test.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days']) test.head()
COVID19 Global Forecasting (Week 3)
8,824,956
params_xgb = {} params_xgb['n_estimators'] = 1100 params_xgb['max_depth'] = 10 params_xgb['seed'] = 2020 params_xgb['colsample_bylevel'] = 1 params_xgb['colsample_bytree'] = 1 params_xgb['learning_rate'] = 0.3 params_xgb['reg_alpha'] = 0 params_xgb['reg_lambda'] = 1 params_xgb['subsample'] = 1 isTraining = True X_train.reset_index(drop=True, inplace=True) if isTraining: print('Evaluating model...') booll = X_train.day_from_jan_first < 86 X_tr, X_vl = X_train[booll][features ], X_train[~booll][features] y_tr, y_vl = train[booll]['ConfirmedCases'], train[~booll]['ConfirmedCases'] model_xgb_cc = xgb.XGBRegressor(**params_xgb) model_xgb_cc.fit(X_tr, y_tr, verbose=True) y_hat = model_xgb_cc.predict(X_vl) y_hat[y_hat<0] = 0 print('xgb mae :', mean_absolute_error(y_vl, y_hat), mean_squared_log_error(y_vl, y_hat), X_tr.shape, X_vl.shape) print('done', datetime.now() )<train_model>
test.to_csv("submission.csv", index=False, columns=["ForecastId", "ConfirmedCases", "Fatalities"] )
COVID19 Global Forecasting (Week 3)
8,824,956
y_train = train["Fatalities"] y_train_cc = train["ConfirmedCases"] params_xgb = {} params_xgb['n_estimators'] = 1100 params_xgb['max_depth'] = 10 params_xgb['seed'] = 2020 params_xgb['colsample_bylevel'] = 1 params_xgb['colsample_bytree'] = 1 params_xgb['learning_rate'] = 0.3 params_xgb['reg_alpha'] = 0 params_xgb['reg_lambda'] = 1 params_xgb['subsample'] = 1 model_xgb_cc = xgb.XGBRegressor(**params_xgb ).fit(X_train[features], y_train_cc, verbose=True) y_hat_xgb_c = model_xgb_cc.predict(X_test[features]) print('done', datetime.now() )<train_model>
test.days.nunique()
COVID19 Global Forecasting (Week 3)
8,824,956
params_xgb = {} params_xgb['n_estimators'] = 1100 params_xgb['max_depth'] = 10 params_xgb['seed'] = 2020 params_xgb['colsample_bylevel'] = 1 params_xgb['colsample_bytree'] = 1 params_xgb['learning_rate'] = 0.300000012 params_xgb['reg_alpha'] = 0 params_xgb['reg_lambda'] = 1 params_xgb['subsample'] = 1 model_xgb_f = xgb.XGBRegressor(**params_xgb ).fit(X_train[features], y_train, verbose=True) y_hat_xgb_f = model_xgb_f.predict(X_test[features]) print(np.mean(y_hat_xgb_f)) print('done', datetime.now() )<feature_engineering>
for i, rec in test.groupby('Area' ).last().sort_values("ConfirmedCases", ascending=False ).iterrows() : print(f"{rec['ConfirmedCases']:10.1f} {rec['Fatalities']:10.1f} {rec['Country_Region']}, {rec['Province_State']}")
COVID19 Global Forecasting (Week 3)
8,825,111
test['y_hat_xgb_c'] = y_hat_xgb_c test['y_hat_xgb_f'] = y_hat_xgb_f test['y_hat_xgb_c2'] = y_hat_xgb_c2 print('Fixing Negative Predictions:' , np.sum(test.y_hat_xgb_c < 0) , np.sum(test.y_hat_xgb_c2 < 0) , np.sum(test.y_hat_xgb_f< 0) , np.sum(test.wg< 0) , np.sum(test.ARIMA< 0) , np.sum(test.SARIMAX< 0) , np.sum(test.y_hat_log< 0)) test.loc[test.y_hat_xgb_c < 0, 'y_hat_xgb_c'] = 0 test.loc[test.y_hat_xgb_c2 < 0, 'y_hat_xgb_c2'] = 0 test.loc[test.y_hat_xgb_f < 0, 'y_hat_xgb_f'] = 0 test.loc[test.wg < 0, 'wg'] = 0 test.loc[test.ARIMA < 0, 'ARIMA'] = 0 test.loc[test.SARIMAX < 0, 'SARIMAX'] = 0 test.loc[test.y_hat_log < 0, 'y_hat_log'] = 0 print('done', datetime.now() )<count_missing_values>
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)))
COVID19 Global Forecasting (Week 3)
8,825,111
print('Fixing Inf Predictions:' , np.sum(test.y_hat_xgb_c.isnull()) , np.sum(test.y_hat_xgb_c2.isnull()) , np.sum(test.y_hat_xgb_f.isnull()) , np.sum(test.wg.isnull()) , np.sum(test.ARIMA.isnull()) , np.sum(test.SARIMAX.isnull()) , np.sum(test.y_hat_log.isnull())) booll =(test['SARIMAX'].isnull()) test.loc[booll, 'SARIMAX'] = test[booll]['y_hat_log'] booll =(test['ARIMA'].isnull()) test.loc[booll, 'ARIMA'] = test[booll]['y_hat_log'] print('Fixing Inf Predictions:' , np.sum(test.y_hat_xgb_c.isnull()) , np.sum(test.y_hat_xgb_c2.isnull()) , np.sum(test.y_hat_xgb_f.isnull()) , np.sum(test.wg.isnull()) , np.sum(test.ARIMA.isnull()) , np.sum(test.SARIMAX.isnull()) , np.sum(test.y_hat_log.isnull())) print('done', datetime.now() )<filter>
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )
COVID19 Global Forecasting (Week 3)
8,825,111
print('Fixing Unrealistic Predictions:', np.sum(test['pop'] *(700/3100)< test.y_hat_xgb_c), np.sum(test['pop'] *(700/3100)< test.y_hat_xgb_c2), np.sum(test['pop'] *(700/3100)< test.y_hat_xgb_f), np.sum(test['pop'] *(700/3100)< test.wg), np.sum(test['pop'] *(700/3100)< test.ARIMA), np.sum(test['pop'] *(700/3100)< test.SARIMAX), np.sum(test['pop'] *(700/3100)< test.y_hat_log), ) booll =(test['pop'] *(700/3100)< test.y_hat_xgb_c) test.loc[booll, 'y_hat_xgb_c'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.y_hat_xgb_c2) test.loc[booll, 'y_hat_xgb_c2'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.y_hat_xgb_f) test.loc[booll, 'y_hat_xgb_f'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.wg) test.loc[booll, 'wg'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.ARIMA) test.loc[booll, 'ARIMA'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.SARIMAX) test.loc[booll, 'SARIMAX'] = test[booll]['pop'] *(700/3100) booll =(test['pop'] *(700/3100)< test.y_hat_log) test.loc[booll, 'y_hat_log'] = test[booll]['pop'] *(700/3100) print('Fixed Unrealistic Predictions:', np.sum(test['pop'] *(700/3100)< test.y_hat_xgb_c), np.sum(test['pop'] *(700/3100)< test.y_hat_xgb_f), np.sum(test['pop'] *(700/3100)< test.wg), np.sum(test['pop'] *(700/3100)< test.ARIMA), np.sum(test['pop'] *(700/3100)< test.SARIMAX), np.sum(test['pop'] *(700/3100)< test.y_hat_log), ) print('done', datetime.now() )<feature_engineering>
FirstDate = train.groupby('Country_Region' ).min() ['Date'].unique() [0] train['Last Confirm'] = train['ConfirmedCases'].shift(1) while train[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate)].shape[0] > 0: train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1) train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'ConfirmedCases'] = train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'Last Confirm'] train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Fatalities'] = train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Last Fatalities'] train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1 )
COVID19 Global Forecasting (Week 3)
8,825,111
test['y_hat_ens'] = test.y_hat_xgb_c *.15 + test.wg *.05 + test.y_hat_log *.30 + test['SARIMAX'] *.03 + test.y_hat_xgb_c2 *.47 test[(test.Province_State=='')&(test.Country_Region=='France')][['Date','y_hat_xgb_c','y_hat_xgb_c2', 'wg','SARIMAX', 'SARIMAX2','y_hat_log', 'y_hat_ens']]<count_missing_values>
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy() RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )
COVID19 Global Forecasting (Week 3)
8,825,111
print('Empty Predictions?', np.sum(test.y_hat_ens.isnull())) <compute_train_metric>
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )
COVID19 Global Forecasting (Week 3)
8,825,111
narf = train[train.day_from_jan_first== 79].copy() narf = narf.merge(test[test.day_from_jan_first== 79], on=['Country_Region','Province_State','day_from_jan_first']) narf['err'] = np.abs(( narf.wg_y - narf.ConfirmedCases)/(1+narf.ConfirmedCases)) print(mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_ens)) print('wg', mean_squared_log_error(narf.ConfirmedCases,narf.wg_y)) print('SARIMAX', mean_squared_log_error(narf.ConfirmedCases,narf.SARIMAX)) print('y_hat_log', mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_log_y)) print('y_hat_xgb_c', mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_xgb_c)) narf = narf[narf.err>100].copy() if narf.shape[0]> 0: print(mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_ens)) else: print('no fixes for wg_y') for index, row in narf.iterrows() : country = row['Country_Region'] state = row['Province_State'] booll =(test.Country_Region==country)&(test.Province_State==state) test.loc[booll, 'wg'] =(test[booll].y_hat_log)*.3 +(test[booll].y_hat_xgb_c)*.7 print('done', datetime.now() )<feature_engineering>
val_score = [] for country in df_val['Country_Region'].unique() : df_val_country = df_val[(df_val['Country_Region'] == country)&(df_val['Fatalities'].isnull() == False)] val_score.append([country, RMSLE(df_val_country['ConfirmedCases'].values,df_val_country['ConfirmedCases_hat'].values),RMSLE(df_val_country['Fatalities'].values,df_val_country['Fatalities_hat'].values)]) df_val_score = pd.DataFrame(val_score) df_val_score.columns = ['Country','ConfirmedCases_Scored','Fatalities_Scored'] df_val_score.sort_values('ConfirmedCases_Scored', ascending = False)
COVID19 Global Forecasting (Week 3)
8,825,111
test['y_hat_ens'] = test.y_hat_xgb_c *.15 + test.wg *.05 + test.y_hat_log *.30 + test['SARIMAX'] *.03 + test.y_hat_xgb_c2 *.47 test[(test.Province_State=='')&(test.Country_Region=='France')][['Date','y_hat_xgb_c','y_hat_xgb_c2', 'wg', 'ARIMA','SARIMAX','y_hat_log', 'y_hat_ens']]<compute_train_metric>
country = "India" df_val = df_val_1 df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()
COVID19 Global Forecasting (Week 3)
8,825,111
narf = train[train.day_from_jan_first== 79].copy() narf = narf.merge(test[test.day_from_jan_first== 79], on=['Country_Region','Province_State','day_from_jan_first']) narf['err'] = np.abs(( narf.y_hat_log_y - narf.ConfirmedCases)/(1+narf.ConfirmedCases)) print(mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_ens)) print('wg', mean_squared_log_error(narf.ConfirmedCases,narf.wg_y)) print('SARIMAX', mean_squared_log_error(narf.ConfirmedCases,narf.SARIMAX)) print('y_hat_log', mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_log_y)) print('y_hat_xgb_c', mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_xgb_c)) narf = narf[narf.err>10].copy() if narf.shape[0]> 0: print(mean_squared_log_error(narf.ConfirmedCases,narf.y_hat_ens)) else: print('no fixes for y_hat_log') for index, row in narf.iterrows() : country = row['Country_Region'] state = row['Province_State'] booll =(test.Country_Region==country)&(test.Province_State==state) print('done', datetime.now() )<filter>
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()
COVID19 Global Forecasting (Week 3)
8,825,111
test[test.y_hat_ens==1521225] test[(test.Province_State=='')&(test.Country_Region=='Turkey')][['Date','y_hat_xgb_c', 'wg', 'ARIMA','SARIMAX','y_hat_log', 'y_hat_ens']]<data_type_conversions>
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()
COVID19 Global Forecasting (Week 3)
8,825,111
test['y_hat_ens'] = test.y_hat_ens.astype(int) print(np.max(test['y_hat_ens'])) <save_to_csv>
method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA'] method_val = [df_val_1,df_val_2,df_val_3] for i in range(0,3): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )
COVID19 Global Forecasting (Week 3)
8,825,111
submissionOrig = pd.read_csv(".. /input/covid19-global-forecasting-week-2/submission.csv") submissionOrig["ConfirmedCases"]= pd.Series(test.y_hat_ens) submissionOrig["Fatalities"] = pd.Series(test.y_hat_xgb_f) submissionOrig.to_csv('submission.csv',index=False) submissionOrig.head(25) print('done', datetime.now() )<import_modules>
df_val = df_val_3 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission.to_csv('submission.csv', index=False) submission
COVID19 Global Forecasting (Week 3)
8,813,914
import pandas as pd<import_modules>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import statsmodels.tsa.arima_model as arima from sklearn.linear_model import LinearRegression from sklearn import linear_model
COVID19 Global Forecasting (Week 3)
8,813,914
import pandas as pd<load_from_csv>
warnings.filterwarnings('ignore') pd.set_option("display.max_rows", None, "display.max_columns", None )
COVID19 Global Forecasting (Week 3)
8,813,914
sub_expo = pd.read_csv('.. /input/submissions-covid19-public/submission_expo.csv') sub_expo = sub_expo.rename({'ConfirmedCases': 'ConfirmedCases_expo', 'Fatalities': 'Fatalities_expo'}, axis=1) sub_gam = pd.read_csv('.. /input/submissions-covid19-public/submission_gam.csv') sub_gam = sub_gam.rename({'ConfirmedCases': 'ConfirmedCases_gam', 'Fatalities': 'Fatalities_gam'}, axis=1) sub_power = pd.read_csv('.. /input/submissions-covid19-public/submission_power.csv') sub_power = sub_power.rename({'ConfirmedCases': 'ConfirmedCases_power', 'Fatalities': 'Fatalities_power'}, axis=1 )<merge>
training_data_original = pd.read_csv('/kaggle/input/inputs2/train.csv') training_data_original['Date'] = pd.to_datetime(training_data_original['Date']) training_data = training_data_original[~training_data_original['Country_Region'].isin(['Diamond Princess', 'MS Zaandam'])] countries = training_data['Country_Region'].unique() countries_main = ['China', 'US', 'Australia', 'Canada'] states = training_data['Province_State'].unique()
COVID19 Global Forecasting (Week 3)
8,813,914
sub = sub_expo.copy() sub = sub.merge(sub_gam, on='ForecastId', how='left') sub = sub.merge(sub_power, on='ForecastId', how='left') sub.head()<feature_engineering>
training_data[training_data['Country_Region'] == 'Belize']
COVID19 Global Forecasting (Week 3)
8,813,914
sub['ConfirmedCases'] = sub[[c for c in sub.columns if c.startswith('ConfirmedCases_')]].mean(axis=1) sub['Fatalities'] = sub[[c for c in sub.columns if c.startswith('Fatalities_')]].mean(axis=1) sub.head()<save_to_csv>
state_metadata = pd.read_excel('/kaggle/input/externaldata2/states.xlsx') country_metadata = pd.read_excel('/kaggle/input/externaldata2/countries.xlsx' )
COVID19 Global Forecasting (Week 3)
8,813,914
sub[['ForecastId', 'ConfirmedCases', 'Fatalities']].to_csv('submission.csv', index=False )<import_modules>
def data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case, n_days_fatal, min_num_cases = 2): training_data_trun = training_data[training_data['ConfirmedCases'] >= min_num_cases] conf_cases_dict = {} fatal_dict = {} for country in countries: if country not in countries_main: training_data_trun_loc = training_data_trun[(training_data_trun['Country_Region'] == country)&(pd.isnull(training_data_trun.Province_State)) ] training_data_trun_loc = training_data_trun_loc.groupby(['Date'] ).sum() training_data_trun_loc = training_data_trun_loc.sort_values(by = 'Date') if len(training_data_trun_loc['ConfirmedCases'].values)>= n_days_case: conf_cases_dict[country] = training_data_trun_loc['ConfirmedCases'].values[:n_days_case] / country_metadata[country_metadata['Countries'] == country]['Population'].values[0] if len(training_data_trun_loc['Fatalities'].values)>= n_days_fatal: fatal_dict[country] = training_data_trun_loc['Fatalities'].values[:n_days_fatal] for state in states: training_data_trun_loc = training_data_trun[training_data_trun['Province_State'] == state] training_data_trun_loc = training_data_trun_loc.groupby(['Date'] ).sum() training_data_trun_loc = training_data_trun_loc.sort_values(by = 'Date') if len(training_data_trun_loc['ConfirmedCases'].values)>= n_days_case: conf_cases_dict[state] = training_data_trun_loc['ConfirmedCases'].values[:n_days_case] / state_metadata[state_metadata['States'] == state]['Population'].values[0] if len(training_data_trun_loc['Fatalities'].values)>= n_days_fatal: fatal_dict[state] = training_data_trun_loc['Fatalities'].values[:n_days_fatal] return pd.DataFrame(conf_cases_dict), pd.DataFrame(fatal_dict )
COVID19 Global Forecasting (Week 3)
8,813,914
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
def fts_training(input_df, rank = 3): matrix = input_df.values u, s, v = np.linalg.svd(matrix, full_matrices=False) scores = np.matmul(u[:, :rank], np.diag(s[:rank])) pcs = v[:rank, :] return scores, pcs
COVID19 Global Forecasting (Week 3)
8,813,914
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<categorify>
def forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = None, n_days_case = 30, n_days_fatal = 10, forecast_days = 10, min_num_cases = 2, model_type = 'ARIMA', components_modeling = True, rank = 3): kf = KalmanFilter(initial_state_mean = np.zeros(rank ).tolist() , n_dim_obs = rank) conf_cases_df, fatal_df = data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case = n_days_case, n_days_fatal = n_days_case, min_num_cases = min_num_cases) pred_countries = conf_cases_df.columns.tolist() pred_countries_fatal = fatal_df.columns.tolist() conf_cases_exog_df, fatal_exog_df = data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case = n_days_case + forecast_days, n_days_fatal = n_days_case + forecast_days, min_num_cases = min_num_cases) scores_exog, pcs_exog = fts_training(conf_cases_exog_df, rank = rank) if len(scores_exog)> 0: scores_exog = kf.em(scores_exog ).smooth(scores_exog)[0] scores, pcs = fts_training(conf_cases_df, rank = rank) forecasted_scores = [] idx = 0 for score in scores.T: if components_modeling: exog = scores_exog[:n_days_case, idx] if len(scores_exog)> 0 else None pred_exog = scores_exog[n_days_case:, idx] if len(scores_exog)> 0 else None y = score else: exog = scores_exog[:n_days_case, :] if len(scores_exog)> 0 else None pred_exog = scores_exog[n_days_case:, :] if len(scores_exog)> 0 else None y = conf_cases_df[loc].values try: model = arima.ARIMA(endog = y, exog = exog, order =(4, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(3, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(2, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: model = arima.ARIMA(endog = y, exog = exog, order =(1, 0, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) if not components_modeling: pred_traj = model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0] break else: forecasted_scores.append(model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0].tolist()) idx = idx + 1 if components_modeling: pred_traj = np.matmul(np.array(forecasted_scores ).T, pcs) pred_traj_df = pd.DataFrame(pred_traj, columns = pred_countries) for loc in pred_countries: if loc in country_metadata['Countries'].values.tolist() : pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * pred_traj_df[loc] if loc in state_metadata['States'].values.tolist() : pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * pred_traj_df[loc] else: pred_traj_df = pd.DataFrame() if loc in countries: pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * pred_traj elif loc in states: pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * pred_traj fatal_scores_exog, fatal_pcs_exog = fts_training(fatal_exog_df, rank = rank) if len(fatal_pcs_exog)> 0: fatal_scores_exog = kf.em(fatal_scores_exog ).smooth(fatal_scores_exog)[0] fatal_scores, fatal_pcs = fts_training(fatal_df, rank = rank) forecasted_fatal_scores = [] idx = 0 for fatal_score in fatal_scores.T: if components_modeling: exog = fatal_scores_exog[:n_days_fatal, idx] if len(fatal_scores_exog)> 0 else None pred_exog = fatal_scores_exog[n_days_fatal:, idx] if len(fatal_scores_exog)> 0 else None y = fatal_score else: exog = fatal_scores_exog[:n_days_fatal, :] if len(fatal_scores_exog)> 0 else None pred_exog = fatal_scores_exog[n_days_fatal:, :] if len(fatal_scores_exog)> 0 else None y = fatal_df[loc].values try: model = arima.ARIMA(endog = y, exog = exog, order =(4, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(3, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(2, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: model = arima.ARIMA(endog = y, exog = exog, order =(1, 0, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) if not components_modeling: fatal_pred_traj = model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0] break else: forecasted_fatal_scores.append(model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0].tolist()) idx = idx + 1 if components_modeling: fatal_pred_traj = np.matmul(np.array(forecasted_fatal_scores ).T, fatal_pcs) fatal_pred_traj_df = pd.DataFrame(fatal_pred_traj, columns = pred_countries_fatal) else: fatal_pred_traj_df = pd.DataFrame() if loc in countries: fatal_pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * fatal_pred_traj elif loc in states: fatal_pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * fatal_pred_traj return pred_traj_df, fatal_pred_traj_df
COVID19 Global Forecasting (Week 3)
8,813,914
train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax') <feature_engineering>
pred_traj_df, fatal_pred_traj_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = 'New York', n_days_case = 29, n_days_fatal = 29, forecast_days = 30, rank = 3, min_num_cases = 25, components_modeling = True )
COVID19 Global Forecasting (Week 3)
8,813,914
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
def generate_prediction(test_data, training_data, countries_main, countries, states, min_num_cases): for country in countries: print(country) if country not in countries_main and country not in excl_list: test_loc_df = test_data[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) ].reset_index() train_loc_df = training_data[(training_data['Country_Region'] == country)&(pd.isnull(training_data.Province_State)) ].reset_index() test_start = test_loc_df['Date'][0] test_end = test_loc_df['Date'][len(test_loc_df)- 1] train_end = train_loc_df['Date'][len(train_loc_df)- 1] test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'ConfirmedCases'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['ConfirmedCases'].values test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'Fatalities'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['Fatalities'].values effective_df = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases] forecast_days = int(( test_end - train_end ).days) min_num_cases_temp = min_num_cases if len(effective_df)> 0: while min_num_cases_temp > 1: try: effective_train_start = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases_temp].reset_index() ['Date'][0] n_days_case = int(( train_end - effective_train_start ).days)+ 1 pred_df, fatal_pred_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = country, n_days_case = n_days_case, n_days_fatal = n_days_case, forecast_days = forecast_days, min_num_cases = min_num_cases_temp, rank = 3) test_loc_df.loc[test_loc_df['Date'] > train_end, 'ConfirmedCases'] = np.maximum.accumulate(pred_df[country].values ).astype(int) test_data.loc[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) , 'ConfirmedCases'] = test_loc_df['ConfirmedCases'].values test_loc_df.loc[test_loc_df['Date'] > train_end, 'Fatalities'] = np.maximum.accumulate(fatal_pred_df[country].values ).astype(int) test_data.loc[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) , 'Fatalities'] = test_loc_df['Fatalities'].values break except: min_num_cases_temp = math.floor(min_num_cases_temp / 2) continue for state in states: print(state) if str(state)not in excl_list: test_loc_df = test_data[(test_data['Province_State'] == state)].reset_index() train_loc_df = training_data[(training_data['Province_State'] == state)].reset_index() test_start = test_loc_df['Date'][0] test_end = test_loc_df['Date'][len(test_loc_df)- 1] train_end = train_loc_df['Date'][len(train_loc_df)- 1] test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'ConfirmedCases'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['ConfirmedCases'].values test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'Fatalities'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['Fatalities'].values effective_df = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases] forecast_days = int(( test_end - train_end ).days) min_num_cases_temp = min_num_cases if len(effective_df)> 0: while min_num_cases_temp > 1: try: effective_train_start = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases_temp].reset_index() ['Date'][0] n_days_case = int(( train_end - effective_train_start ).days)+ 1 pred_df, fatal_pred_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = state, n_days_case = n_days_case, n_days_fatal = n_days_case, forecast_days = forecast_days, min_num_cases = min_num_cases_temp, rank = 3) test_loc_df.loc[test_loc_df['Date'] > train_end, 'ConfirmedCases'] = np.maximum.accumulate(pred_df[state].values ).astype(int) test_data.loc[test_data['Province_State'] == state, 'ConfirmedCases'] = test_loc_df['ConfirmedCases'].values test_loc_df.loc[test_loc_df['Date'] > train_end, 'Fatalities'] = np.maximum.accumulate(fatal_pred_df[state].values ).astype(int) test_data.loc[test_data['Province_State'] == state, 'Fatalities'] = test_loc_df['Fatalities'].values break except: min_num_cases_temp = math.floor(min_num_cases_temp / 2) continue return test_data
COVID19 Global Forecasting (Week 3)
8,813,914
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
test_data = pd.read_csv('/kaggle/input/inputs2/test.csv') test_data['Date'] = pd.to_datetime(test_data['Date'] )
COVID19 Global Forecasting (Week 3)
8,813,914
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
test_data['ConfirmedCases'] = None test_data['Fatalities'] = None
COVID19 Global Forecasting (Week 3)
8,813,914
country = "Vietnam" df_val = df_val_1 df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<save_model>
excl_list = ['Belize', 'Botswana', 'Diamond Princess', 'MS Zaandam', 'Angola', 'Turks and Caicos Islands', 'Burma', 'Burundi', 'Chad', 'Eritrea', 'Grenada', 'Guinea-Bissau', 'Holy See', 'Kosovo', 'Laos', 'Libya', 'Mali', 'Mozambique', 'Saint Kitts and Nevis', 'Somalia', 'Syria', 'nan', 'Saint Barthelemy', 'Virgin Islands', 'Montserrat', 'Saint Vincent and the Grenadines', 'Sierra Leone', 'Northwest Territories', 'Yukon', 'Anguilla', 'British Virgin Islands', 'Papua New Guinea', 'Bhutan', 'Congo(Brazzaville)', 'Gabon', 'Guinea', 'Guyana', 'Haiti', 'Namibia', 'Saint Lucia', 'Seychelles', 'Curacao', 'Cayman Islands', 'Central African Republic', 'Liberia', 'Mauritania', 'Nepal', 'Nicaragua', 'Sudan']
COVID19 Global Forecasting (Week 3)
8,813,914
animator.save('confirm_animation.gif', writer='imagemagick', fps=2) display(Image(url='confirm_animation.gif'))<feature_engineering>
predictions = generate_prediction(test_data, training_data, countries_main, countries, states, min_num_cases = 25 )
COVID19 Global Forecasting (Week 3)
8,813,914
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
missing_countries = predictions[(predictions.ConfirmedCases.isnull())&(pd.isnull(predictions.Province_State)) ]['Country_Region'].unique().tolist() missing_states = predictions[(predictions.ConfirmedCases.isnull())&(pd.notnull(predictions.Province_State)) ]['Province_State'].unique().tolist()
COVID19 Global Forecasting (Week 3)
8,813,914
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
def fill_excl_pred(predictions, training_data_original, test_data): for country in missing_countries: print(country) pred_loc_df = predictions[predictions['Country_Region'] == country] train_loc_df = training_data_original[training_data_original['Country_Region'] == country] series_comf_cases = train_loc_df['ConfirmedCases'].values series_fatal = train_loc_df['Fatalities'].values test_start = test_data[test_data['Country_Region'] == country]['Date'].values[0] series_comf_cases_test = training_data_original[(training_data_original['Country_Region'] == country)&(training_data_original['Date'] >= test_start)]['ConfirmedCases'] series_fatal_test = training_data_original[(training_data_original['Country_Region'] == country)&(training_data_original['Date'] >= test_start)]['Fatalities'] if len(series_comf_cases)> 0: regressor = LinearRegression() regressor.fit(np.arange(len(series_comf_cases_test)).reshape(-1, 1), series_comf_cases_test) comf_cases_pred = regressor.predict(np.arange(13, 43 ).reshape(-1, 1)) regressor.fit(np.arange(len(series_fatal_test)).reshape(-1, 1), series_fatal_test) fatal_pred = regressor.predict(np.arange(13, 43 ).reshape(-1, 1)) else: comf_cases_pred = [] fatal_pred = [] conf_cases_loc = np.concatenate(( series_comf_cases_test, comf_cases_pred), axis=0) fatal_loc = np.concatenate(( series_fatal_test, fatal_pred), axis=0) predictions.loc[predictions['Country_Region'] == country, 'ConfirmedCases'] = conf_cases_loc.astype(int) predictions.loc[predictions['Country_Region'] == country, 'Fatalities'] = fatal_loc.astype(int) for state in missing_states: print(state) pred_loc_df = predictions[predictions['Province_State'] == state] train_loc_df = training_data_original[training_data_original['Province_State'] == state] series_comf_cases = train_loc_df['ConfirmedCases'].values series_fatal = train_loc_df['Fatalities'].values test_start = test_data[test_data['Province_State'] == state]['Date'].values[0] series_comf_cases_test = training_data_original[(training_data_original['Province_State'] == state)&(training_data_original['Date'] >= test_start)]['ConfirmedCases'] series_fatal_test = training_data[(training_data_original['Province_State'] == state)&(training_data_original['Date'] >= test_start)]['Fatalities'] regressor = LinearRegression() regressor.fit(np.arange(len(series_comf_cases_test)).reshape(-1, 1), series_comf_cases_test) comf_cases_pred = regressor.predict(np.arange(13, 43 ).reshape(-1, 1)) regressor.fit(np.arange(len(series_fatal_test)).reshape(-1, 1), series_fatal_test) fatal_pred = regressor.predict(np.arange(13, 43 ).reshape(-1, 1)) conf_cases_loc = np.concatenate(( series_comf_cases_test, comf_cases_pred), axis=0) fatal_loc = np.concatenate(( series_fatal_test, fatal_pred), axis=0) predictions.loc[predictions['Province_State'] == state, 'ConfirmedCases'] = conf_cases_loc.astype(int) predictions.loc[predictions['Province_State'] == state, 'Fatalities'] = fatal_loc.astype(int) return predictions
COVID19 Global Forecasting (Week 3)
8,813,914
method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA'] method_val = [df_val_1,df_val_2,df_val_3] for i in range(0,3): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )<save_to_csv>
prediction_final = fill_excl_pred(predictions, training_data_original, test_data )
COVID19 Global Forecasting (Week 3)
8,813,914
df_val = df_val_3 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0}) submission.to_csv('submission.csv', index=False) submission<filter>
prediction_final[prediction_final['Country_Region'] == 'Italy']
COVID19 Global Forecasting (Week 3)
8,813,914
df_worldinfor[df_worldinfor['Country'] == 'Vietnam']<import_modules>
predictions_csv = pd.DataFrame() predictions_csv['ForecastId'] = prediction_final['ForecastId'].astype(int) predictions_csv['ConfirmedCases'] = prediction_final['ConfirmedCases'].astype(float) predictions_csv['Fatalities'] = prediction_final['Fatalities'].astype(float )
COVID19 Global Forecasting (Week 3)
8,813,914
<load_from_csv><EOS>
predictions_csv.to_csv('submission.csv', index = False )
COVID19 Global Forecasting (Week 3)
8,819,836
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<drop_column>
warnings.filterwarnings('ignore' )
COVID19 Global Forecasting (Week 3)
8,819,836
print("fill blanks and add region for counting") df.fillna(' ',inplace=True) df['Lat']=df['Province_State']+df['Country_Region'] df.drop('Province_State',axis=1,inplace=True) df.drop('Country_Region',axis=1,inplace=True) <load_from_csv>
dftrain = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date'] ).fillna('None') dftest = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date'] ).fillna('None') dfsubm = pd.read_csv('.. /input/covid19-global-forecasting-week-3/submission.csv' )
COVID19 Global Forecasting (Week 3)
8,819,836
countries_list=df.Lat.unique() df1=[] for i in countries_list: df1.append(df[df['Lat']==i]) print("we have "+ str(len(df1)) +" regions in our dataset") test=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv" )<choose_model_class>
confirmed = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' ).sort_values(by='Country/Region') deaths = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv') recovered = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' )
COVID19 Global Forecasting (Week 3)