kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,819,836
submit_confirmed=[] submit_fatal=[] for i in df1: data = i.ConfirmedCases.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_confirmed.extend(list(new[-43:])) except: submit_confirmed.extend(list(data[-10:-1])) for j in range(34): submit_confirmed.append(data[-1]*2) data = i.Fatalities.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_fatal.extend(list(new[-43:])) except: submit_fatal.extend(list(data[-10:-1])) for j in range(34): submit_fatal.append(data[-1]*2) <data_type_conversions>
def change_countryprovince(df): df['Country_Region'] = df['Country/Region'] df['Province_State'] = df['Province/State'] df = df.drop(['Country/Region', 'Province/State'], axis=1) return df
COVID19 Global Forecasting (Week 3)
8,819,836
df_submit=pd.concat([pd.Series(np.arange(1,1+len(submit_confirmed))),pd.Series(submit_confirmed),pd.Series(submit_fatal)],axis=1) df_submit=df_submit.fillna(method='pad' ).astype(int )<merge>
confirmed = change_countryprovince(confirmed) deaths = change_countryprovince(deaths) recovered = change_countryprovince(recovered) csse_data = pd.DataFrame(confirmed.set_index(['Province_State','Country_Region'] ).drop(['Lat','Long'],axis=1 ).stack() ).reset_index() csse_data.columns = ['Province_State', 'Country_Region', 'Date', 'ConfirmedCases'] csse_data['Fatalities'] = deaths.set_index(['Province_State','Country_Region'] ).drop(['Lat','Long'],axis=1 ).stack().tolist() csse_data['Date'] = pd.to_datetime(csse_data['Date']) csse_data.head()
COVID19 Global Forecasting (Week 3)
8,819,836
df_submit.rename(columns={0: 'ForecastId', 1: 'ConfirmedCases',2: 'Fatalities',}, inplace=True) complete_test= pd.merge(test, df_submit, how="left", on="ForecastId" )<save_to_csv>
len(csse_data['Province_State'].unique() )
COVID19 Global Forecasting (Week 3)
8,819,836
df_submit.to_csv('submission.csv',header=['ForecastId','ConfirmedCases','Fatalities'],index=False) complete_test.to_csv('complete_test.csv',index=False) <import_modules>
len(dftrain['Province_State'].unique() )
COVID19 Global Forecasting (Week 3)
8,819,836
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
dftrain = dftrain.join(confirmed[['Country_Region', 'Province_State', 'Lat', 'Long']].set_index(['Province_State', 'Country_Region']), on=['Province_State', 'Country_Region'] )
COVID19 Global Forecasting (Week 3)
8,819,836
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<categorify>
dftrain['Dayofyear'] = dftrain['Date'].dt.dayofyear dftest['Dayofyear'] = dftest['Date'].dt.dayofyear
COVID19 Global Forecasting (Week 3)
8,819,836
train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax') <feature_engineering>
def transpose_df(df): df = df.drop(['Lat','Long'],axis=1 ).groupby('Country_Region' ).sum().T df.index = pd.to_datetime(df.index) return df
COVID19 Global Forecasting (Week 3)
8,819,836
feature_day = [1,2,5,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
confirmedT = transpose_df(confirmed) deathsT = transpose_df(deaths) recoveredT = transpose_df(recovered) mortalityT = deathsT/confirmedT
COVID19 Global Forecasting (Week 3)
8,819,836
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
def add_day(df): df['Date'] = df.index df['Dayofyear'] = df['Date'].dt.dayofyear return df
COVID19 Global Forecasting (Week 3)
8,819,836
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
confirmedT, deathsT, recoveredT, mortalityT = add_day(confirmedT), add_day(deathsT), add_day(recoveredT), add_day(mortalityT )
COVID19 Global Forecasting (Week 3)
8,819,836
country = "Vietnam" df_val = df_val_1 df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<save_model>
allcountries_ordered = confirmed.set_index(['Country_Region'] ).iloc[:,-2].sort_values(ascending=False ).index.tolist()
COVID19 Global Forecasting (Week 3)
8,819,836
animator.save('confirm_animation.gif', writer='imagemagick', fps=2) display(Image(url='confirm_animation.gif'))<feature_engineering>
confirmed.set_index(['Country_Region'] ).iloc[:,-2].sort_values(ascending=False ).to_csv('confirmed_countries.csv' )
COVID19 Global Forecasting (Week 3)
8,819,836
feature_day = [1,2,5,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
def df_day1(df, confirmed): df_day1 = pd.DataFrame({'Days since 100 cases' : np.arange(1000)} ).set_index('Days since 100 cases') countries_df = df.columns.tolist() [:-2] countries_conf = confirmed.columns.tolist() [:-2] for ic, country in enumerate(countries_df): for ic2, country2 in enumerate(countries_conf): if country == country2: dfsub = df[confirmed[country] > 100.][country] df_day1[country] = np.nan df_day1.loc[:len(dfsub)-1,country] =(dfsub ).tolist() df_day1 = df_day1.dropna(how='all') return df_day1
COVID19 Global Forecasting (Week 3)
8,819,836
feature_day = [1,2,5,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
confirmed_day1 = df_day1(confirmedT, confirmedT) deaths_day1 = df_day1(deathsT, confirmedT) recovered_day1 = df_day1(recoveredT, confirmedT) mortality_day1 = df_day1(mortalityT, confirmedT) confirmednorm_day1 = confirmed_day1/confirmed_day1.loc[0,:] maxday = confirmed_day1.shape[0]
COVID19 Global Forecasting (Week 3)
8,819,836
method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA'] method_val = [df_val_1,df_val_2,df_val_3] for i in range(0,3): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )<save_to_csv>
date_day1 = confirmedT.copy() for column in date_day1: date_day1[column] = confirmedT.index.tolist() date_day1 = df_day1(date_day1, confirmedT )
COVID19 Global Forecasting (Week 3)
8,819,836
df_val = df_val_3 df_val['ConfirmedCases_hat'] =(df_val_1['ConfirmedCases_hat'] + df_val_2['ConfirmedCases_hat'] + df_val_3['ConfirmedCases_hat'])/3 df_val['Fatalities_hat'] =(df_val_1['Fatalities_hat'] + df_val_2['Fatalities_hat'] + df_val_3['Fatalities_hat'])/3 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0}) submission.to_csv('submission.csv', index=False) submission<filter>
def logistic_curve(x, k, x_0, ymax): return ymax /(1 + np.exp(-k*(x-x_0)) )
COVID19 Global Forecasting (Week 3)
8,819,836
df_worldinfor[df_worldinfor['Country'] == 'Vietnam']<import_modules>
def logistic_curve2(x, k1, k2, x_0, ymax): return ymax /(1 + np.exp(-k1*(x-x_0)) + np.exp(-k2*(x-x_0)) )
COVID19 Global Forecasting (Week 3)
8,819,836
print("Read in libraries") <load_from_csv>
list_countries = dftrain[dftrain['Date'] == '2020-01-22']['Country_Region'].tolist() list_states = dftrain[dftrain['Date'] == '2020-01-22']['Province_State'].tolist() datenow = datetime.now()
COVID19 Global Forecasting (Week 3)
8,819,836
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv") sub = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/submission.csv") print("read in files") test.head()<concatenate>
list_date_pand = [] ; list_maxcases = []; list_maxfat = [] for country, state in list(zip(list_countries, list_states)) : df2 = dftrain.loc[(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state)].fillna('None') maxcases, maxfat = df2['ConfirmedCases'].max() , df2['Fatalities'].max() date_pand2 = [] date_pand = df2[df2['ConfirmedCases'] > 100.]['Date'].tolist() try: list_date_pand.append(pd.to_datetime(date_pand[0])) except: list_date_pand.append(pd.to_datetime(datenow)) list_maxcases.append(maxcases); list_maxfat.append(maxfat) dfstartpand = pd.DataFrame(np.array([list_countries, list_states, list_date_pand, list_maxcases, list_maxfat] ).T, columns=['Country_Region', 'Province_State', 'Start_Pandemic', 'ConfirmedCases', 'Fatalities']) dfstartpand['Start_Pandemic'] = dfstartpand['Start_Pandemic'].dt.date
COVID19 Global Forecasting (Week 3)
8,819,836
train = train.append(test[test['Date']>'2020-03-31'] )<feature_engineering>
dfstartpand_ordered = dfstartpand.sort_values(by=['Start_Pandemic', 'ConfirmedCases', 'Fatalities'], ascending=[True, False, False]) country_state_ordered = list(zip(dfstartpand_ordered['Country_Region'].tolist() , dfstartpand_ordered['Province_State'])) datetrain = dftrain['Date'].unique() datetest = dftest['Date'].unique()
COVID19 Global Forecasting (Week 3)
8,819,836
train['Date'] = pd.to_datetime(train['Date'], format='%Y-%m-%d') train['day_dist'] = train['Date']-train['Date'].min() train['day_dist'] = train['day_dist'].dt.days print(train['Date'].max()) print(test['Date'].min()) print(test['Date'].max()) cat_cols = train.dtypes[train.dtypes=='object'].keys() cat_cols for cat_col in cat_cols: train[cat_col].fillna('no_value', inplace = True) train['place'] = train['Province_State']+'_'+train['Country_Region'] <categorify>
dftest['ConfirmedCases_logreg'] = 0.0 ; dftrain['ConfirmedCases_logreg'] = 0.0 dftest['Fatalities_logreg'] = 0.0 ; dftrain['Fatalities_logreg'] = 0.0 p0 = 1 for country, state in country_state_ordered: masktrain =(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state) masktrain2 =(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state)& \ (dftrain['Date'] <= '2020-04-07')&(dftrain['Date'] >= starttest) masktest =(dftest['Country_Region'] == country)&(dftest['Province_State'] == state) masktest2 =(dftest['Country_Region'] == country)&(dftest['Province_State'] == state)& \ (dftest['Date'] <= '2020-04-07') df2plot = dftrain[masktrain].set_index('Date') X = np.arange(len(df2plot)) X_test =(np.timedelta64(datetest[0]-datetrain[0], 'D')).astype(float)+np.arange(0,len(datetest)) try: y = df2plot['ConfirmedCases'] p0_cases = [1/(len(X)/2.) , X[-1], y.max() ] popt, pcov = curve_fit(logistic_curve, X, y, p0=p0_cases,bounds=([0,0,0],np.inf), maxfev=1000) k_cases, x_0_cases, ymax_cases = popt cases_train_fc = pd.Series(logistic_curve(X, k_cases, x_0_cases, ymax_cases),index=df2plot.index) cases_test_fc = pd.Series(logistic_curve(X_test, k_cases, x_0_cases, ymax_cases),index=datetest) dftest.loc[masktest,'ConfirmedCases_logreg'] = cases_test_fc.tolist() dftrain.loc[masktrain,'ConfirmedCases_logreg'] = cases_train_fc.tolist() except: print(country+' '+state+' Unable to fit the confirmed cases') dftest.loc[masktest,'ConfirmedCases_logreg'] = dftrain.loc[masktrain,'ConfirmedCases'].iloc[-1] dftrain.loc[masktrain,'ConfirmedCases_logreg'] = dftrain.loc[masktrain,'ConfirmedCases'] try: y = df2plot['Fatalities'] p0_deaths = [1/(len(X)/2.) , X[-1], y.max() ] popt, pcov = curve_fit(logistic_curve, X, y, p0=p0_deaths,bounds=([0,0,0],np.inf), maxfev=1000) k_deaths, x_0_deaths, ymax_deaths = popt deaths_train_fc = pd.Series(logistic_curve(X, k_deaths, x_0_deaths, ymax_deaths),index=datetrain) deaths_test_fc = pd.Series(logistic_curve(X_test, k_deaths, x_0_deaths, ymax_deaths),index=datetest) dftest.loc[masktest,'Fatalities_logreg'] = deaths_test_fc.tolist() dftrain.loc[masktrain,'Fatalities_logreg'] = deaths_train_fc.tolist() except: print(country+' '+state+' Unable to fit the fatalities') dftest.loc[masktest,'Fatalities_logreg'] = dftrain.loc[masktrain,'Fatalities'].iloc[-1] dftrain.loc[masktrain,'Fatalities_logreg'] = dftrain.loc[masktrain,'Fatalities'] dftest.loc[masktest2,'ConfirmedCases_logreg'] = dftrain.loc[masktrain2,'ConfirmedCases'].tolist() dftest.loc[masktest2,'Fatalities_logreg'] = dftrain.loc[masktrain2,'Fatalities'].tolist()
COVID19 Global Forecasting (Week 3)
8,819,836
cat_cols = train.dtypes[train.dtypes=='object'].keys() cat_cols for cat_col in ['place']: le = preprocessing.LabelEncoder() le.fit(train[cat_col]) train[cat_col]=le.transform(train[cat_col]) train.keys() drop_cols = ['Id', 'ConfirmedCases','Date', 'ForecastId','Fatalities','day_dist', 'Province_State', 'Country_Region']<filter>
dfsubm['ConfirmedCases'] = dftest['ConfirmedCases_logreg'] dfsubm['Fatalities'] = dftest['Fatalities_logreg']
COVID19 Global Forecasting (Week 3)
8,819,836
val = train[(train['Date']>='2020-03-19')&(train['Id'].isnull() ==False)] val<prepare_x_and_y>
dfsubm.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,775,443
y_ft = train["Fatalities"] y_val_ft = val["Fatalities"] y_cc = train["ConfirmedCases"] y_val_cc = val["ConfirmedCases"] <compute_test_metric>
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv', parse_dates=['Date']) test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv',parse_dates=['Date']) submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv' )
COVID19 Global Forecasting (Week 3)
8,775,443
def rmsle(y_true, y_pred): return np.sqrt(np.mean(np.power(np.log1p(y_pred)- np.log1p(y_true), 2))) def mape(y_true, y_pred): return np.mean(np.abs(y_pred -y_true)*100/(y_true+1)) <init_hyperparams>
train.shape
COVID19 Global Forecasting (Week 3)
8,775,443
params = { "objective": "regression", "boosting": 'gbdt', "num_leaves": 1280, "learning_rate": 0.05, "feature_fraction": 0.9, "reg_lambda": 2, "metric": "rmse", 'min_data_in_leaf':20 } <drop_column>
train.columns = train.columns.str.lower() test.columns = test.columns.str.lower()
COVID19 Global Forecasting (Week 3)
8,775,443
dates = dates[dates>'2020-03-31'] dates<count_missing_values>
train.fillna(' ',inplace=True) test.fillna(' ', inplace=True) train_id = train.pop('id') test_id = test.pop('forecastid') train['cp'] = train['country_region'] + train['province_state'] test['cp'] = test['country_region'] + test['province_state'] train.drop(['province_state','country_region'], axis=1, inplace=True) test.drop(['province_state','country_region'], axis =1, inplace=True )
COVID19 Global Forecasting (Week 3)
8,775,443
train.isnull().sum()<count_missing_values>
train.cp.nunique() , test.cp.nunique()
COVID19 Global Forecasting (Week 3)
8,775,443
train.isnull().sum()<filter>
df = pd.DataFrame() def create_time_feat(data): df['date']= data['date'] df['hour']=df['date'].dt.hour df['weekofyear']=df['date'].dt.weekofyear df['quarter'] =df['date'].dt.quarter df['month'] = df['date'].dt.month df['dayofyear']=df['date'].dt.dayofyear x=df[['hour','weekofyear','quarter','month','dayofyear']] return x cr_tr = create_time_feat(train) cr_te = create_time_feat(test )
COVID19 Global Forecasting (Week 3)
8,775,443
test[test['Country_Region']=='Italy']<filter>
train_df = pd.concat([train,cr_tr], axis=1) test_df = pd.concat([test, cr_te], axis =1) train_df.shape, test_df.shape, train_df.cp.nunique() , test_df.cp.nunique() , test.shape
COVID19 Global Forecasting (Week 3)
8,775,443
test[test['Country_Region']=='Italy']<load_from_csv>
test_df.dropna(inplace=True )
COVID19 Global Forecasting (Week 3)
8,775,443
train_sub = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv" )<feature_engineering>
le=LabelEncoder() train_df['cp_le']=le.fit_transform(train_df['cp']) test_df['cp_le']=le.transform(test_df['cp']) train_df.drop(['cp'], axis=1, inplace=True) test_df.drop(['cp'], axis=1, inplace=True )
COVID19 Global Forecasting (Week 3)
8,775,443
<merge>
COVID19 Global Forecasting (Week 3)
8,775,443
test = pd.merge(test,train_sub[['Province_State','Country_Region','Date','ConfirmedCases','Fatalities']], on=['Province_State','Country_Region','Date'], how='left' )<filter>
COVID19 Global Forecasting (Week 3)
8,775,443
test.loc[test['ConfirmedCases_x'].isnull() ==True]<feature_engineering>
def create_date_feat(data, cf, ft): for d in data['date'].drop_duplicates() : for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc, cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc, ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 create_date_feat(train_df,'confirmedcases','fatalities' )
COVID19 Global Forecasting (Week 3)
8,775,443
test.loc[test['ConfirmedCases_x'].isnull() ==True, 'ConfirmedCases_x'] = test.loc[test['ConfirmedCases_x'].isnull() ==True, 'ConfirmedCases_y']<feature_engineering>
cf_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'cf_2', 'cf_3', 'cf_4', 'cf_5', 'cf_6', 'cf_7', 'cf_8', 'cf_9','cf_10', 'cf_11', 'cf_12', 'cf_13', 'cf_14'] ft_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','ft_1', 'ft_2', 'ft_3', 'ft_4', 'ft_5', 'ft_6', 'ft_7', 'ft_8', 'ft_9','ft_10', 'ft_11', 'ft_12', 'ft_13', 'ft_14'] train_x_cf = train_df[cf_feat] print(train_x_cf.shape) train_x_ft = train_df[ft_feat] print(train_x_ft.shape) train_x_cf_reshape = train_x_cf.values.reshape(train_x_cf.shape[0],1,train_x_cf.shape[1]) train_x_ft_reshape = train_x_ft.values.reshape(train_x_ft.shape[0],1,train_x_ft.shape[1]) train_y_cf = train_df['confirmedcases'] train_y_ft = train_df['fatalities'] train_y_cf_reshape = train_y_cf.values.reshape(-1,1) train_y_ft_reshape = train_y_ft.values.reshape(-1,1) tr_x_cf, val_x_cf, tr_y_cf, val_y_cf = train_test_split(train_x_cf_reshape, train_y_cf_reshape, test_size=0.2, random_state=0) tr_x_ft, val_x_ft, tr_y_ft, val_y_ft = train_test_split(train_x_ft_reshape, train_y_ft_reshape, test_size=0.2, random_state=0)
COVID19 Global Forecasting (Week 3)
8,775,443
test.loc[test['Fatalities_x'].isnull() ==True, 'Fatalities_x'] = test.loc[test['Fatalities_x'].isnull() ==True, 'Fatalities_y']<feature_engineering>
def rmsle(pred,true): assert pred.shape[0]==true.shape[0] return K.sqrt(K.mean(K.square(K.log(pred+1)- K.log(true+1)))) es = EarlyStopping(monitor='val_loss', min_delta = 0, verbose=0, patience=10, mode='auto') mc_cf = ModelCheckpoint('model_cf.h5', monitor='val_loss', verbose=0, save_best_only=True) mc_ft = ModelCheckpoint('model_ft.h5', monitor='val_loss', verbose=0, save_best_only=True) def lstm_model(hidden_nodes, second_dim, third_dim): model = Sequential([LSTM(hidden_nodes, input_shape=(second_dim, third_dim), activation='relu'), Dense(64, activation='relu'), Dense(32, activation='relu'), Dense(1, activation='relu')]) model.compile(loss=rmsle, optimizer = 'adam') return model model_cf = lstm_model(10, tr_x_cf.shape[1], tr_x_cf.shape[2]) model_ft = lstm_model(10, tr_x_ft.shape[1], tr_x_ft.shape[2]) history_cf = model_cf.fit(tr_x_cf, tr_y_cf, epochs=200, batch_size=512, validation_data=(val_x_cf,val_y_cf), callbacks=[es,mc_cf]) history_ft = model_ft.fit(tr_x_ft, tr_y_ft, epochs=200, batch_size=512, validation_data=(val_x_ft,val_y_ft), callbacks=[es,mc_ft] )
COVID19 Global Forecasting (Week 3)
8,775,443
<prepare_output>
feat = ['confirmedcases','fatalities','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] c_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'cf_2', 'cf_3', 'cf_4', 'cf_5', 'cf_6', 'cf_7', 'cf_8', 'cf_9','cf_10', 'cf_11', 'cf_12', 'cf_13', 'cf_14'] f_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','ft_1', 'ft_2', 'ft_3', 'ft_4', 'ft_5', 'ft_6', 'ft_7', 'ft_8', 'ft_9','ft_10', 'ft_11', 'ft_12', 'ft_13', 'ft_14'] tot_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] test_new = test_df.copy().join(pd.DataFrame(columns=feat)) test_mask =(test_df['date'] <= train_df['date'].max()) train_mask =(train_df['date'] >= test_df['date'].min()) test_new.loc[test_mask,feat] = train_df.loc[train_mask, feat].values future_df = pd.date_range(start = train_df['date'].max() +pd.Timedelta(days=1),end=test_df['date'].max() , freq='1D') def create_add_trend_pred(data, cf, ft): for d in future_df: for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc,cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc,ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 test_x = data.loc[org_mask,tot_feat] test_x_cf = test_x[c_feat] test_x_cf = test_x_cf.to_numpy().reshape(1,-1) test_x_cf_reshape = test_x_cf.reshape(test_x_cf.shape[0],1,test_x_cf.shape[1]) test_x_ft = test_x[f_feat] test_x_ft = test_x_ft.to_numpy().reshape(1,-1) test_x_ft_reshape = test_x_ft.reshape(test_x_ft.shape[0],1,test_x_ft.shape[1]) data.loc[org_mask, cf] = model_cf.predict(test_x_cf_reshape) data.loc[org_mask, ft] = model_ft.predict(test_x_ft_reshape) create_add_trend_pred(test_new, 'confirmedcases', 'fatalities' )
COVID19 Global Forecasting (Week 3)
8,775,443
sub = test[['ForecastId', 'ConfirmedCases_x','Fatalities_x']]<rename_columns>
sub_pred = pd.DataFrame({'ForecastId': test_id, 'ConfirmedCases':test_new['confirmedcases'],'Fatalities':test_new['fatalities']}) sub_pred.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,800,787
sub.columns = ['ForecastId', 'ConfirmedCases', 'Fatalities']<feature_engineering>
plt.style.use('ggplot') font = {'family' : 'meiryo'} plt.rc('font', **font )
COVID19 Global Forecasting (Week 3)
8,800,787
sub.loc[sub['ConfirmedCases']<0, 'ConfirmedCases'] = 0<feature_engineering>
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv" )
COVID19 Global Forecasting (Week 3)
8,800,787
sub.loc[sub['Fatalities']<0, 'Fatalities'] = 0<drop_column>
train_df = train_df[train_df["Date"] < "2020-03-27"] train_df = train_df.fillna("No State" )
COVID19 Global Forecasting (Week 3)
8,800,787
<load_from_csv>
test_rate = 0.05 maxlen = 13 train_date_count = len(set(train_df["Date"])) X, Y = [],[] scaler = StandardScaler() train_df["ConfirmedCases_std"] = scaler.fit_transform(train_df["ConfirmedCases"].values.reshape(len(train_df["ConfirmedCases"].values),1)) confirmedCase_std_min = train_df["ConfirmedCases_std"].min() train_df["ConfirmedCases_std"] = train_df["ConfirmedCases_std"] + abs(confirmedCase_std_min) for state,country in train_df.groupby(["Province_State","Country_Region"] ).sum().index: df = train_df[(train_df["Country_Region"] == country)&(train_df["Province_State"] == state)] if df["ConfirmedCases"].sum() != 0: for i in range(len(df)- maxlen): X.append(df[['ConfirmedCases_std']].iloc[i:(i+maxlen)].values) Y.append(df[['ConfirmedCases_std']].iloc[i+maxlen].values) X=np.array(X) Y=np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle = True ,random_state = 0 )
COVID19 Global Forecasting (Week 3)
8,800,787
print("Read in libraries") print("read in train file") df=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv", usecols=['Province_State','Country_Region','Date','ConfirmedCases','Fatalities']) print("fill blanks and add region for counting") df.fillna(' ',inplace=True) df['Lat']=df['Province_State']+df['Country_Region'] df.drop('Province_State',axis=1,inplace=True) df.drop('Country_Region',axis=1,inplace=True) countries_list=df.Lat.unique() df1=[] for i in countries_list: df1.append(df[df['Lat']==i]) print("we have "+ str(len(df1)) +" regions in our dataset") test=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv") submit_confirmed=[] submit_fatal=[] for i in df1: data = i.ConfirmedCases.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_confirmed.extend(list(new[-43:])) except: submit_confirmed.extend(list(data[-10:-1])) for j in range(34): submit_confirmed.append(data[-1]*2) data = i.Fatalities.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_fatal.extend(list(new[-43:])) except: submit_fatal.extend(list(data[-10:-1])) for j in range(34): submit_fatal.append(data[-1]*2) df_submit=pd.concat([pd.Series(np.arange(1,1+len(submit_confirmed))),pd.Series(submit_confirmed),pd.Series(submit_fatal)],axis=1) df_submit=df_submit.fillna(method='pad' ).astype(int) df_submit.head() df_submit.rename(columns={0: 'ForecastId', 1: 'ConfirmedCases',2: 'Fatalities',}, inplace=True )<data_type_conversions>
def huber_loss(y_true, y_pred, clip_delta=1.0): error = y_true - y_pred cond = tf.keras.backend.abs(error)< clip_delta squared_loss = 0.5 * tf.keras.backend.square(error) linear_loss = clip_delta *(tf.keras.backend.abs(error)- 0.5 * clip_delta) return tf.where(cond, squared_loss, linear_loss) def huber_loss_mean(y_true, y_pred, clip_delta=1.0): return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta)) def rmsle(y, y_pred): assert len(y)== len(y_pred) terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(terms_to_sum)*(1.0/len(y)))** 0.5
COVID19 Global Forecasting (Week 3)
8,800,787
sub.rename(columns={'ForecastId': 'ForecastId1', "ConfirmedCases": 'Confirmed_lgbt',"Fatalities": 'Fatalities_lgbt',}, inplace=True) df_submit.rename(columns={'ForecastId': 'ForecastId2', "ConfirmedCases": 'Confirmed_arima',"Fatalities": 'Fatalities_arima',}, inplace=True) df_submit.shape df_combine = pd.concat([sub, df_submit], axis=1, join='inner') cols = ['Confirmed_lgbt','Confirmed_arima'] df_combine['ConfirmedCases'] = df_combine[cols].astype(float ).mean(axis=1) cols = ['Fatalities_lgbt','Fatalities_arima'] df_combine['Fatalities'] = df_combine[cols].astype(float ).mean(axis=1) del df_combine['ForecastId2'] del df_combine['Confirmed_lgbt'] del df_combine['Fatalities_lgbt'] del df_combine['Confirmed_arima'] del df_combine['Fatalities_arima'] df_combine.head() df_combine.rename(columns={'ForecastId1': 'ForecastId', "ConfirmedCases": 'ConfirmedCases',"Fatalities": 'Fatalities',}, inplace=True) df_combine.to_csv('submission.csv',index=False) test=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv") complete_test= pd.merge(test, df_combine, how="left", on="ForecastId") complete_test.to_csv('complete_test.csv',index=False) <set_options>
epochs_num = 10 n_hidden = 200 n_in = 1 model = Sequential() model.add(GRU(n_hidden, batch_input_shape=(None, maxlen, n_in), kernel_initializer='random_uniform', return_sequences=False)) model.add(Dense(n_in, kernel_initializer='random_uniform')) model.add(Activation("linear")) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4) model.compile(loss = huber_loss_mean, optimizer=opt )
COVID19 Global Forecasting (Week 3)
8,800,787
warnings.filterwarnings("ignore") <load_from_csv>
early_stopping = EarlyStopping(monitor='loss', patience=5, verbose=1) hist = model.fit(X_train, Y_train, batch_size=10, epochs=epochs_num, callbacks=[early_stopping],shuffle=False )
COVID19 Global Forecasting (Week 3)
8,800,787
datapath = '.. /input/covid19-global-forecasting-week-2/' train = pd.read_csv(datapath+'train.csv',) test = pd.read_csv(datapath+'test.csv' )<data_type_conversions>
predicted_std = model.predict(X_test) result_std= pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test
COVID19 Global Forecasting (Week 3)
8,800,787
train['Date'] = train['Date'].astype('datetime64[ns]') test['Date'] = test['Date'].astype('datetime64[ns]') print("Train Date type: ", train['Date'].dtype) print("Test Date type: ",test['Date'].dtype )<rename_columns>
predicted = scaler.inverse_transform(predicted_std - abs(confirmedCase_std_min)) Y_test = scaler.inverse_transform(Y_test - abs(confirmedCase_std_min))
COVID19 Global Forecasting (Week 3)
8,800,787
train.columns = ['id','state','country','date','ConfirmedCases','Fatalities'] test.columns = ['ForecastId', 'state','country','date']<feature_engineering>
np.sqrt(mean_squared_log_error(predicted, Y_test))
COVID19 Global Forecasting (Week 3)
8,800,787
train['place'] = train['state'].fillna('')+ '_' + train['country'] test['place'] = test['state'].fillna('')+ '_' + test['country']<count_unique_values>
result= pd.DataFrame(predicted) result.columns = ['predict'] result['actual'] = Y_test result.plot(figsize=(25,6)) plt.show()
COVID19 Global Forecasting (Week 3)
8,800,787
print('How many places?: ', 'Train: ', len(train['place'].unique()), 'Test: ', len(test['place'].unique())) print('Unique place similar as test?: ',(train['place'].unique() == test['place'].unique() ).sum() )<drop_column>
test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") test_df
COVID19 Global Forecasting (Week 3)
8,800,787
china_cases = train[train['place'].str.contains('China')][['date', 'ConfirmedCases', 'Fatalities']].reset_index(drop=True) restworld_cases = train[-train['place'].str.contains('China')][['date', 'ConfirmedCases', 'Fatalities']].reset_index(drop=True )<drop_column>
submission_c = pd.read_csv(".. /input/covid19-global-forecasting-week-3/submission.csv" )
COVID19 Global Forecasting (Week 3)
8,800,787
us_cases = train[train['place'].str.contains('US')][['date','place', 'ConfirmedCases', 'Fatalities']].reset_index(drop=True )<compute_test_metric>
temp =(datetime.datetime.strptime("2020-03-25", '%Y-%m-%d')- datetime.timedelta(days=maxlen)).strftime('%Y-%m-%d') test_df = train_df[train_df["Date"] > temp]
COVID19 Global Forecasting (Week 3)
8,800,787
def RMSLE(predicted, actual): return np.sqrt(np.mean(np.power(( np.log(predicted+1)-np.log(actual+1)) ,2)) )<feature_engineering>
check_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv" ).query("Date>'2020-03-25'and Date<='2020-04-07'") check_df["ConfirmedCases_std"] = scaler.transform(check_df["ConfirmedCases"].values.reshape(len(check_df["ConfirmedCases"].values),1)) + abs(confirmedCase_std_min )
COVID19 Global Forecasting (Week 3)
8,800,787
train_sub = train[['id','place','date','ConfirmedCases','Fatalities']] train_sub['logConfirmedCases'] = np.log(train_sub['ConfirmedCases']) train_sub = train_sub.set_index('date' )<feature_engineering>
confirmedCases_pred = [] for i in range(0,306*maxlen,maxlen): temp_array = np.array(test_df["ConfirmedCases_std"][i:i+maxlen]) for j in range(43): if j<13: temp_array = np.append(temp_array,np.array(check_df["ConfirmedCases_std"])[int(i*13/maxlen)+j]) elif np.array(test_df["ConfirmedCases"][i:i+maxlen] ).sum() == 0: temp_array = np.append(temp_array,temp_array[-1]) else: pred = model.predict(temp_array[-maxlen:].reshape(1,maxlen,1)) pred = np.where(float(pred)< temp_array[-1],temp_array[-1],pred) temp_array = np.append(temp_array,pred) confirmedCases_pred.append(temp_array[-43:] )
COVID19 Global Forecasting (Week 3)
8,800,787
list= [] for place in train_sub.place.unique() : a = train_sub[train_sub['place']==place] a['z_cases'] =(a['logConfirmedCases']- a['logConfirmedCases'].rolling(window=3 ).mean())/a['logConfirmedCases'].rolling(window=3 ).std() a['zp_cases']= a['z_cases']- a['z_cases'].shift(3) a['z_death'] =(a['Fatalities']-a['Fatalities'].rolling(window=3 ).mean())/a['Fatalities'].rolling(window=3 ).std() a['zp_death']= a['z_death']- a['z_death'].shift(3) list.append(a) rolling_df = pd.concat(list )<statistical_test>
submission_c["ConfirmedCases"] = np.abs(scaler.inverse_transform(np.array(confirmedCases_pred ).reshape(306*43)- abs(confirmedCase_std_min))) submission_c["ConfirmedCases_std"] = np.array(confirmedCases_pred ).reshape(306*43) submission_c
COVID19 Global Forecasting (Week 3)
8,800,787
stationary_data =[] for place in train_sub.place.unique() : a= rolling_df[(rolling_df['place']==place)&(rolling_df['logConfirmedCases'] > 0)]['logConfirmedCases'].dropna() try: dftest = adfuller(a, autolag='AIC') if(dftest[1] < 0.001): stationary_data.append(place) else: pass except: pass print(len(stationary_data))<count_unique_values>
submission_c.to_csv('./submission_c.csv') submission_c.to_csv('.. \output\kaggle\working\submission_c.csv' )
COVID19 Global Forecasting (Week 3)
8,800,787
station_death_data =[] for place in train_sub.place.unique() : dftest = adfuller(rolling_df[rolling_df['place']==place]['Fatalities'], autolag='AIC') if(dftest[1] < 0.001): station_death_data.append(place) else: pass print(len(station_death_data))<define_variables>
test_rate = 0.05 maxlen = 13 train_date_count = len(set(train_df["Date"])) X, Y = [],[] scaler = StandardScaler() train_df["Fatalities_std"] = scaler.fit_transform(train_df["Fatalities"].values.reshape(len(train_df["Fatalities"].values),1)) fatalities_std_min = train_df["Fatalities_std"].min() train_df["Fatalities_std"] = train_df["Fatalities_std"] + abs(fatalities_std_min) ss = StandardScaler() train_df["ConfirmedCases_std"] = ss.fit_transform(train_df["ConfirmedCases"].values.reshape(len(train_df["ConfirmedCases"].values),1)) confirmedCase_std_min = train_df["ConfirmedCases_std"].min() train_df["ConfirmedCases_std"] = train_df["ConfirmedCases_std"] + abs(confirmedCase_std_min) for state,country in train_df.groupby(["Province_State","Country_Region"] ).sum().index: df = train_df[(train_df["Country_Region"] == country)&(train_df["Province_State"] == state)] if df["Fatalities"].sum() != 0 or df["ConfirmedCases"].sum() != 0: for i in range(len(df)- maxlen): X.append(df[['Fatalities_std','ConfirmedCases_std']].iloc[i:(i+maxlen)].values) Y.append(df[['Fatalities_std']].iloc[i+maxlen].values) X=np.array(X) Y=np.array(Y) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle = True ,random_state = 0 )
COVID19 Global Forecasting (Week 3)
8,800,787
confirmedc_lag = ['Anhui_China', 'Chongqing_China','Guangdong_China', 'Guizhou_China', 'Hainan_China', 'Hebei_China','Hubei_China', 'Ningxia_China','Shandong_China','Shanxi_China', 'Sichuan_China']<define_variables>
epochs_num = 30 n_hidden = 200 n_in = 2 model = Sequential() model.add(GRU(n_hidden, batch_input_shape=(None, maxlen, n_in), kernel_initializer='random_uniform', return_sequences=False)) model.add(Dense(1, kernel_initializer='random_uniform')) model.add(Activation("linear")) opt = Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4) model.compile(loss = huber_loss_mean, optimizer=opt )
COVID19 Global Forecasting (Week 3)
8,800,787
fatalities_lag = ['Hubei_China']<drop_column>
early_stopping = EarlyStopping(monitor='loss', patience=5, verbose=1) hist = model.fit(X_train, Y_train, batch_size=8, epochs=epochs_num, callbacks=[early_stopping],shuffle=False )
COVID19 Global Forecasting (Week 3)
8,800,787
non_stationary_death_data = [ele for ele in allplaces] for place in fatalities_lag: if place in allplaces: non_stationary_death_data.remove(place) print(len(non_stationary_death_data))<feature_engineering>
predicted_std = model.predict(X_test) result_std= pd.DataFrame(predicted_std) result_std.columns = ['predict'] result_std['actual'] = Y_test
COVID19 Global Forecasting (Week 3)
8,800,787
train_sub['logConfirmedCases']= train_sub['logConfirmedCases'].replace(to_replace=-inf, value=0 )<merge>
predicted = scaler.inverse_transform(predicted_std - abs(fatalities_std_min)) Y_test = scaler.inverse_transform(Y_test - abs(fatalities_std_min))
COVID19 Global Forecasting (Week 3)
8,800,787
poly_data = train[['date','place', 'ConfirmedCases','Fatalities']].merge(test[['date','place']], how='outer', on=['date','place'] ).sort_values(['place', 'date']) print(poly_data.date.min() , test.date.min() , train.date.max() , poly_data.date.max() )<feature_engineering>
X_test_ = scaler.inverse_transform(X_test - abs(fatalities_std_min))
COVID19 Global Forecasting (Week 3)
8,800,787
label = [] for place in poly_data.place.unique() : labelrange = range(1,len(poly_data[poly_data['place']==place])+1) label.append([i for i in labelrange]) lab = [item for lab in label for item in lab] poly_data['label'] = lab poly_data.head()<create_dataframe>
submission_df = submission_c
COVID19 Global Forecasting (Week 3)
8,800,787
XYtrain['intercept']= -1 result=pd.DataFrame() for place in poly_data.place.unique() : for degree in [2,3,4,5,6]: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(features)) rmsle = RMSLE(y_pred, target) result = result.append(pd.DataFrame({'place':[place], 'degree':[degree],'RMSLE': [rmsle]})) <count_unique_values>
temp =(datetime.datetime.strptime("2020-03-25", '%Y-%m-%d')- datetime.timedelta(days=maxlen)).strftime('%Y-%m-%d') test_df = train_df[train_df["Date"] > temp]
COVID19 Global Forecasting (Week 3)
8,800,787
best_degree = pd.DataFrame() for place in result.place.unique() : a = result[result['place']==place] best_degree = best_degree.append(a[a['RMSLE'] == a['RMSLE'].min() ]) print(best_degree.groupby('degree')['place'].nunique()) print('Zero polynomial(no fit): ',best_degree[best_degree['RMSLE']<0.00001]['place'].unique() )<count_unique_values>
check_df["Fatalities_std"] = scaler.transform(check_df["Fatalities"].values.reshape(len(check_df["Fatalities"].values),1)) + abs(fatalities_std_min) check_df
COVID19 Global Forecasting (Week 3)
8,800,787
fit_best_degree = best_degree[best_degree['RMSLE']>0.00001] twodeg_places = fit_best_degree[fit_best_degree['degree']==2]['place'].unique() threedeg_places = fit_best_degree[fit_best_degree['degree']==3]['place'].unique() fourdeg_places = fit_best_degree[fit_best_degree['degree']==4]['place'].unique() fivedeg_places = fit_best_degree[fit_best_degree['degree']==5]['place'].unique() sdeg_places = fit_best_degree[fit_best_degree['degree']==6]['place'].unique() nofit_places1 = best_degree[best_degree['RMSLE']<0.00001]['place'].unique() print(fit_best_degree.nunique()) print(len(twodeg_places), len(threedeg_places), len(fourdeg_places), len(fivedeg_places), len(sdeg_places), len(nofit_places1))<feature_engineering>
fatalities_pred = [] for i in range(0,306*maxlen,maxlen): temp_array = np.array(test_df[["Fatalities_std","ConfirmedCases_std"]][i:i+maxlen]) for j in range(43): if j<13: temp_array = np.append(temp_array,np.append(np.array(check_df["Fatalities_std"])[int(i*13/maxlen)+j],np.array(check_df["ConfirmedCases_std"])[int(i*13/maxlen)+j] ).reshape(1,2),axis=0) elif np.array(test_df[["Fatalities","ConfirmedCases"]][i:i+maxlen] ).sum() == 0: temp_array = np.append(temp_array,np.array(temp_array[-1] ).reshape(1,2),axis=0) else: pred = model.predict(temp_array[-maxlen:].reshape(1,maxlen,2)) pred = np.where(pred<temp_array[-1][0],temp_array[-1][0],pred) temp_array = np.append(temp_array,np.append(pred,submission_df["ConfirmedCases_std"][i/maxlen*43+j] ).reshape(1,2),axis=0) fatalities_pred.append(temp_array[-43:] )
COVID19 Global Forecasting (Week 3)
8,800,787
XYtest = XYtest.reset_index(drop=True) XYtest['intercept'] = -1<choose_model_class>
submission_df["Fatalities"] = np.abs(scaler.inverse_transform([i[0] - abs(fatalities_std_min)for i in np.array(fatalities_pred ).reshape(306*43,2)])) submission_df
COVID19 Global Forecasting (Week 3)
8,800,787
poly_predicted_confirmedcases = pd.DataFrame() for place in twodeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(2), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) a = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(a) for place in threedeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(3), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) b = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(b) for place in fourdeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(4), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) c = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(c) for place in fivedeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(5), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) d = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(d) for place in sdeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['ConfirmedCases'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(6), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) e = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(e) <statistical_test>
submission_df[["ConfirmedCases","Fatalities"]] = submission_df[["ConfirmedCases","Fatalities"]].round().astype(int) submission_df
COVID19 Global Forecasting (Week 3)
8,800,787
fatalities_result=pd.DataFrame() for place in poly_data.place.unique() : for degree in [2,3,4,5,6]: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(features)) rmsle = RMSLE(y_pred, target) fatalities_result = fatalities_result.append(pd.DataFrame({'place':[place], 'degree':[degree],'RMSLE': [rmsle]}))<count_unique_values>
submission_df = submission_df.drop("ConfirmedCases_std",axis=1 )
COVID19 Global Forecasting (Week 3)
8,800,787
fat_best_degree = pd.DataFrame() for place in fatalities_result.place.unique() : a = fatalities_result[fatalities_result['place']==place] fat_best_degree = fat_best_degree.append(a[a['RMSLE'] == a['RMSLE'].min() ]) print(fat_best_degree.groupby('degree')['place'].nunique()) print('Zero polynomial(no fit): ', fat_best_degree[fat_best_degree['RMSLE']<0.000001]['place'].unique() )<count_unique_values>
submission_df = submission_df.set_index('ForecastId' )
COVID19 Global Forecasting (Week 3)
8,800,787
fit_best_degree = fat_best_degree[fat_best_degree['RMSLE']>0.000001] twodeg_places = fit_best_degree[fit_best_degree['degree']==2]['place'].unique() threedeg_places = fit_best_degree[fit_best_degree['degree']==3]['place'].unique() fourdeg_places = fit_best_degree[fit_best_degree['degree']==4]['place'].unique() fivedeg_places = fit_best_degree[fit_best_degree['degree']==5]['place'].unique() sevdeg_places = fit_best_degree[fit_best_degree['degree']==6]['place'].unique() nofit_places2 = fat_best_degree[fat_best_degree['RMSLE']<0.000001]['place'].unique() print(fit_best_degree.nunique()) print(len(twodeg_places), len(threedeg_places), len(fourdeg_places), len(fivedeg_places), len(sevdeg_places), len(nofit_places2))<choose_model_class>
submission_df.to_csv('submission.csv' )
COVID19 Global Forecasting (Week 3)
8,781,854
poly_predicted_fatalities = pd.DataFrame() for place in twodeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(2), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) a = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(a) for place in threedeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(3), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) b = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(b) for place in fourdeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(4), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) c = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(c) for place in fivedeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(5), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) d = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(d) for place in sevdeg_places: features = XYtrain[XYtrain['place']==place][['label','intercept']] target = XYtrain[XYtrain['place']==place]['Fatalities'] Xtest = XYtest[XYtest['place']==place][['label','intercept']] model = make_pipeline(PolynomialFeatures(6), Ridge()) model.fit(np.array(features), target) y_pred = model.predict(np.array(Xtest)) e = pd.DataFrame(zip(XYtrain[XYtrain['place']==place]['place'], y_pred.tolist()),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(e )<create_dataframe>
train_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv" )
COVID19 Global Forecasting (Week 3)
8,781,854
for place in nofit_places1: e = poly_data[(poly_data['place']==place)&(poly_data['date']>'2020-03-18')] f = e['ConfirmedCases'].fillna(method = 'ffill') g = pd.DataFrame(zip(e['place'], f),columns=['place','ConfirmedCases']) poly_predicted_confirmedcases = poly_predicted_confirmedcases.append(g) for place in nofit_places2: h = poly_data[(poly_data['place']==place)&(poly_data['date']>'2020-03-18')] i = h['Fatalities'].fillna(method = 'ffill') j = pd.DataFrame(zip(h['place'], i),columns=['place','Fatalities']) poly_predicted_fatalities = poly_predicted_fatalities.append(j )<prepare_output>
import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px
COVID19 Global Forecasting (Week 3)
8,781,854
poly_predicted_confirmedcases2= pd.DataFrame({'date':XYtest.date, 'place':poly_predicted_confirmedcases['place'].tolist() , 'ConfirmedCases':poly_predicted_confirmedcases['ConfirmedCases'].tolist() }) poly_predicted_confirmedcases2.head()<prepare_output>
train =train_data.drop(["Province_State"] , axis = 1) test =test_data.drop(["Province_State"] , axis = 1)
COVID19 Global Forecasting (Week 3)
8,781,854
poly_predicted_fatalities2= pd.DataFrame({'date':XYtest.date, 'place':poly_predicted_fatalities['place'].tolist() , 'Fatalities':poly_predicted_fatalities['Fatalities'].tolist() }) poly_predicted_fatalities2.head()<merge>
train = train[train["ConfirmedCases"] != 0]
COVID19 Global Forecasting (Week 3)
8,781,854
poly_compiled = poly_predicted_confirmedcases2.merge(poly_predicted_fatalities2, how='inner', on=['place','date'] )<merge>
df = train.fillna('NA' ).groupby(['Country_Region','Date'])['ConfirmedCases'].sum() \ .groupby(['Country_Region'] ).max().sort_values() \ .groupby(['Country_Region'] ).sum().sort_values(ascending = False) top10 = pd.DataFrame(df ).head(10 ).reset_index() top10
COVID19 Global Forecasting (Week 3)
8,781,854
test_poly_compiled= test.merge(poly_compiled, how='inner', on=['place','date']) test_poly_compiled= test_poly_compiled.set_index('date') test_poly_compiled<prepare_output>
deaths = train.fillna('NA' ).groupby(['Country_Region','Date'])['Fatalities'].sum() \ .groupby(['Country_Region'] ).max().sort_values() \ .groupby(['Country_Region'] ).sum().sort_values(ascending = False) deaths10 = pd.DataFrame(deaths ).head(10 ).reset_index() deaths10
COVID19 Global Forecasting (Week 3)
8,781,854
df_compiled = pd.DataFrame() for place in test_poly_compiled.place.unique() : a = test_poly_compiled[test_poly_compiled['place']==place] ind_max_confirmedcases = np.argmax(a['ConfirmedCases']) a = a.replace(to_replace=a.loc[(a.index>ind_max_confirmedcases),'ConfirmedCases'].tolist() , value=a.loc[ind_max_confirmedcases,'ConfirmedCases']) ind_max_fatatities = np.argmax(a['Fatalities']) a = a.replace(to_replace=a.loc[(a.index>ind_max_fatatities),'Fatalities'].tolist() , value=a.loc[ind_max_fatatities,'Fatalities']) df_compiled = df_compiled.append(a) df_compiled[df_compiled['place']=='_Zimbabwe'].tail()<load_from_csv>
data_by_date = pd.DataFrame(train.groupby(["Country_Region" , "Date"])["ConfirmedCases"].sum()) data_by_date = data_by_date.sort_values("ConfirmedCases" , axis=0 , ascending = True ).reset_index()
COVID19 Global Forecasting (Week 3)
8,781,854
submission= pd.read_csv(datapath+'submission.csv' )<merge>
death_by_date = pd.DataFrame(train.groupby(["Country_Region" , "Date"])["Fatalities"].sum()) death_by_date = death_by_date.sort_values("Fatalities" , axis=0 , ascending = False ).reset_index()
COVID19 Global Forecasting (Week 3)
8,781,854
sub2 = submission[['ForecastId']].merge(df_compiled[['ForecastId','ConfirmedCases','Fatalities']], how='left',on='ForecastId' )<feature_engineering>
df = data_by_date.loc[(data_by_date['Country_Region'] == 'US')&(data_by_date.Date >= '2020-03-01')] df = df.sort_values('ConfirmedCases',ascending = True )
COVID19 Global Forecasting (Week 3)
8,781,854
sub2['ConfirmedCases'] = sub2['ConfirmedCases'].round(1) sub2['Fatalities'] = sub2['Fatalities'].round(1 ).abs()<save_to_csv>
death_US = death_by_date.loc[(death_by_date['Country_Region'] == 'US')&(death_by_date.Date >= '2020-03-01')] death_US = death_US.sort_values('Fatalities',ascending = True )
COVID19 Global Forecasting (Week 3)
8,781,854
sub2.to_csv('submission.csv', index=False )<compute_test_metric>
data_china = data_by_date.loc[(data_by_date["Country_Region"] == "China")&(data_by_date["Date"] >= "2020-01-01")]
COVID19 Global Forecasting (Week 3)
8,781,854
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
death_china = death_by_date.loc[(death_by_date["Country_Region"] == "China")&(death_by_date["Date"] >= "2020-01-01")] death_china.sort_values("Fatalities" , ascending = True )
COVID19 Global Forecasting (Week 3)
8,781,854
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<groupby>
xtrain = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') xtest = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') xsubmission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv' )
COVID19 Global Forecasting (Week 3)
8,781,854
train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax' )<feature_engineering>
xtrain.rename(columns={'Country_Region':'Country'}, inplace=True) xtest.rename(columns={'Country_Region':'Country'}, inplace=True) xtrain.rename(columns={'Province_State':'State'}, inplace=True) xtest.rename(columns={'Province_State':'State'}, inplace=True) xtrain['Date'] = pd.to_datetime(xtrain['Date'], infer_datetime_format=True) xtest['Date'] = pd.to_datetime(xtest['Date'], infer_datetime_format=True) xtrain.info() xtest.info() y1_xTrain = xtrain.iloc[:, -2] y1_xTrain.head() y2_xTrain = xtrain.iloc[:, -1] y2_xTrain.head() EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state
COVID19 Global Forecasting (Week 3)
8,781,854
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
X_xTrain = xtrain.copy()
COVID19 Global Forecasting (Week 3)
8,781,854
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
X_xTrain['State'].fillna(EMPTY_VAL, inplace=True) X_xTrain['State'] = X_xTrain.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_xTrain.loc[:, 'Date'] = X_xTrain.Date.dt.strftime("%m%d") X_xTrain["Date"] = X_xTrain["Date"].astype(int) X_xTrain.head() X_xTest = xtest.copy() X_xTest['State'].fillna(EMPTY_VAL, inplace=True) X_xTest['State'] = X_xTest.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_xTest.loc[:, 'Date'] = X_xTest.Date.dt.strftime("%m%d") X_xTest["Date"] = X_xTest["Date"].astype(int) X_xTest.head()
COVID19 Global Forecasting (Week 3)
8,781,854
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<compute_test_metric>
le = preprocessing.LabelEncoder() X_xTrain.Country = le.fit_transform(X_xTrain.Country) X_xTrain['State'] = le.fit_transform(X_xTrain['State']) X_xTrain.head() X_xTest.Country = le.fit_transform(X_xTest.Country) X_xTest['State'] = le.fit_transform(X_xTest['State']) X_xTest.head() xtrain.head() xtrain.loc[xtrain.Country == 'Afghanistan', :] xtest.tail()
COVID19 Global Forecasting (Week 3)
8,781,854
method_list = ['Poly Bayesian Ridge','SARIMA'] method_val = [df_val_1,df_val_3] for i in range(0,2): df_val = method_val[i] method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)] print(method_score )<save_to_csv>
filterwarnings('ignore') le = preprocessing.LabelEncoder() countries = X_xTrain.Country.unique()
COVID19 Global Forecasting (Week 3)
8,781,854
df_val = df_val_1 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission.to_csv('submission.csv', index=False) submission<set_options>
xout = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []} )
COVID19 Global Forecasting (Week 3)
8,781,854
pd.set_option('display.max_columns',500) filterwarnings('ignore' )<load_from_csv>
for country in countries: states = X_xTrain.loc[X_xTrain.Country == country, :].State.unique() for state in states: X_xTrain_CS = X_xTrain.loc[(X_xTrain.Country == country)&(X_xTrain.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y1_xTrain_CS = X_xTrain_CS.loc[:, 'ConfirmedCases'] y2_xTrain_CS = X_xTrain_CS.loc[:, 'Fatalities'] X_xTrain_CS = X_xTrain_CS.loc[:, ['State', 'Country', 'Date']] X_xTrain_CS.Country = le.fit_transform(X_xTrain_CS.Country) X_xTrain_CS['State'] = le.fit_transform(X_xTrain_CS['State']) X_xTest_CS = X_xTest.loc[(X_xTest.Country == country)&(X_xTest.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_xTest_CS_Id = X_xTest_CS.loc[:, 'ForecastId'] X_xTest_CS = X_xTest_CS.loc[:, ['State', 'Country', 'Date']] X_xTest_CS.Country = le.fit_transform(X_xTest_CS.Country) X_xTest_CS['State'] = le.fit_transform(X_xTest_CS['State']) xmodel1 = XGBRegressor(n_estimators=1000) xmodel1.fit(X_xTrain_CS, y1_xTrain_CS) y1_xpred = xmodel1.predict(X_xTest_CS) xmodel2 = XGBRegressor(n_estimators=1000) xmodel2.fit(X_xTrain_CS, y2_xTrain_CS) y2_xpred = xmodel2.predict(X_xTest_CS) xdata = pd.DataFrame({'ForecastId': X_xTest_CS_Id, 'ConfirmedCases': y1_xpred, 'Fatalities': y2_xpred}) xout = pd.concat([xout, xdata], axis=0 )
COVID19 Global Forecasting (Week 3)
8,781,854
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv', parse_dates=['Date']) test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv', parse_dates=['Date'] )<rename_columns>
xout.ForecastId = xout.ForecastId.astype('int') xout.tail() xout.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,811,204
train = train.rename(columns={'Country_Region':'Country','Province_State':'State'}) test = test.rename(columns={'Country_Region':'Country','Province_State':'State'} )<data_type_conversions>
import plotly.graph_objects as go import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm import time from datetime import datetime from pathlib import Path from sklearn import preprocessing import keras.backend as K from keras.models import Sequential from keras.layers import Dense, LSTM, RNN, Dropout from keras.callbacks import EarlyStopping, ModelCheckpoint from keras import optimizers from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder from sklearn.model_selection import train_test_split
COVID19 Global Forecasting (Week 3)
8,811,204
train.State = train.State.fillna('Empty') test.State = test.State.fillna('Empty' )<feature_engineering>
train = pd.read_csv(r'/kaggle/input/covid19-global-forecasting-week-3/train.csv', parse_dates=['Date']) test = pd.read_csv(r'/kaggle/input/covid19-global-forecasting-week-3/test.csv',parse_dates=['Date']) submission = pd.read_csv(r'/kaggle/input/covid19-global-forecasting-week-3/submission.csv') train.tail()
COVID19 Global Forecasting (Week 3)
8,811,204
train['day'] = train['Date'].dt.day train['month'] = train['Date'].dt.month train['dayofweek'] = train['Date'].dt.dayofweek train['dayofyear'] = train['Date'].dt.dayofyear train['quarter'] = train['Date'].dt.quarter train['weekofyear'] = train['Date'].dt.weekofyear test['day'] = test['Date'].dt.day test['month'] = test['Date'].dt.month test['dayofweek'] = test['Date'].dt.dayofweek test['dayofyear'] = test['Date'].dt.dayofyear test['quarter'] = test['Date'].dt.quarter test['weekofyear'] = test['Date'].dt.weekofyear<import_modules>
mask = train['Date'].max() world_cum_confirmed = sum(train[train['Date'] == mask].ConfirmedCases) world_cum_fatal = sum(train[train['Date'] == mask].Fatalities) print('Number of Countires are: ', len(train['Country_Region'].unique())) print('Training dataset ends at: ', mask) print('Number of cumulative confirmed cases worldwide are: ', world_cum_confirmed) print('Number of cumulative fatal cases worldwide are: ', world_cum_fatal )
COVID19 Global Forecasting (Week 3)
8,811,204
from xgboost.sklearn import XGBRegressor<choose_model_class>
cum_per_country = train[train['Date'] == mask].groupby(['Date','Country_Region'] ).sum().sort_values(['ConfirmedCases'], ascending=False) cum_per_country[:10]
COVID19 Global Forecasting (Week 3)
8,811,204
model = XGBRegressor(n_estimators=1000 )<prepare_x_and_y>
date = train['Date'].unique() cc_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().ConfirmedCases ft_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().Fatalities cc_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().ConfirmedCases ft_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().Fatalities cc_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().ConfirmedCases ft_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().Fatalities cc_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().ConfirmedCases ft_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().Fatalities cc_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().ConfirmedCases ft_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().Fatalities fig = go.Figure() fig.add_trace(go.Scatter(x=date, y=cc_us, name='US')) fig.add_trace(go.Scatter(x=date, y=cc_ity, name='Italy')) fig.add_trace(go.Scatter(x=date, y=cc_spn, name='Spain')) fig.add_trace(go.Scatter(x=date, y=cc_gmn, name='Germany')) fig.add_trace(go.Scatter(x=date, y=cc_frc, name='France')) fig.update_layout(title="Plot of Cumulative Cases for Top 5 countires(except China)", xaxis_title="Date", yaxis_title="Cases") fig.update_xaxes(nticks=30) fig.show()
COVID19 Global Forecasting (Week 3)
8,811,204
countries = train.Country.unique().tolist() results_df = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for c in countries: states = train.loc[train.Country == c,:].State.unique().tolist() for state in states: X_train = train.loc[(train.Country == c)&(train.State == state), :] y1 = X_train[['ConfirmedCases']] y2 = X_train[['Fatalities']] X_train = X_train.drop(['Id','ConfirmedCases','Fatalities','State','Country','Date'], axis=1) X_test = test.loc[(test.Country == c)&(test.State == state), :] results_temp = X_test[['ForecastId']] X_test = X_test.drop(['ForecastId','State','Country','Date'], axis=1) model_confirmed = model.fit(X_train, y1) prediction_confirmed = model_confirmed.predict(X_test) model_fatalities = model.fit(X_train, y2) prediction_fatalities = model_fatalities.predict(X_test) results_temp['ConfirmedCases'] = prediction_confirmed results_temp['Fatalities'] = prediction_fatalities results_df = pd.concat([results_df, results_temp], axis=0) <prepare_output>
train.columns = train.columns.str.lower() test.columns = test.columns.str.lower()
COVID19 Global Forecasting (Week 3)
8,811,204
sub = results_df.copy()<data_type_conversions>
train.fillna(' ',inplace=True) test.fillna(' ', inplace=True) train_id = train.pop('id') test_id = test.pop('forecastid') train['cp'] = train['country_region'] + train['province_state'] test['cp'] = test['country_region'] + test['province_state'] train.drop(['province_state','country_region'], axis=1, inplace=True) test.drop(['province_state','country_region'], axis =1, inplace=True )
COVID19 Global Forecasting (Week 3)