kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,824,396
train_data['Nationality'].value_counts()<count_values>
def eval1(y, p): val_len = y.shape[1] - TRAIN_N return np.sqrt(mean_squared_error(y[:, TRAIN_N:TRAIN_N+val_len].flatten() , p[:, TRAIN_N:TRAIN_N+val_len].flatten())) def run_c(params, X, test_size=50): gr_base = [] gr_base_factor = [] x_min = np.ma.MaskedArray(X, X<1) x_min = x_min.argmin(axis=1) for i in range(X.shape[0]): temp = X[i,:] threshold = np.log(1+params['min cases for growth rate']) num_days = params['last N days'] if(temp > threshold ).sum() > num_days: d = np.diff(temp[temp > threshold])[-num_days:] w = np.arange(len(d)) +1 w = w**5 w = w / np.sum(w) gr_base.append(np.clip(np.average(d, weights=w), 0, params['growth rate max'])) d2 = np.diff(d) w = np.arange(len(d2)) +1 w = w**10 w = w / np.sum(w) gr_base_factor.append(np.clip(np.average(d2, weights=w), -0.5, params["growth rate factor max"])) else: gr_base.append(params['growth rate default']) gr_base_factor.append(params['growth rate factor']) gr_base = np.array(gr_base) gr_base_factor = np.array(gr_base_factor) preds = X.copy() for i in range(test_size): delta = np.clip(preds[:, -1], np.log(2), None)+ gr_base *(1 + params['growth rate factor']*(1 + params['growth rate factor factor'])**(i)) **(np.log1p(i)) preds = np.hstack(( preds, delta.reshape(-1,1))) return preds params = { "min cases for growth rate": 0, "last N days": 15, "growth rate default": 0.2, "growth rate max": 0.3, "growth rate factor max": -0.1, "growth rate factor": -0.3, "growth rate factor factor": 0.01, } x = train_p_c preds_c = run_c(params, np.log(1+x.values)[:,:TRAIN_N])
COVID19 Global Forecasting (Week 3)
8,824,396
test_data['Nationality'].value_counts()<data_type_conversions>
for i in range(N_AREAS): if 'China' in AREAS[i] and preds_c[i, TRAIN_N-1] < np.log(31): preds_c[i, TRAIN_N:] = preds_c[i, TRAIN_N-1]
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['Nationality'].fillna(value = 'Nationality_Unknown', inplace = True) test_data['Nationality'].fillna(value = 'Nationality_Unknown', inplace = True )<count_values>
def lin_w(sz): res = np.linspace(0, 1, sz+1, endpoint=False)[1:] return np.append(res, np.append([1], res[::-1])) def run_f(params, X_c, X_f, X_f_r, test_size=50): X_f_r = np.array(np.ma.mean(np.ma.masked_outside(X_f_r, 0.03, 0.5)[:,:], axis=1)) X_f_r = np.clip(X_f_r, params['fatality_rate_lower'], params['fatality_rate_upper']) X_c = np.clip(np.exp(X_c)-1, 0, None) preds = X_f.copy() train_size = X_f.shape[1] - 1 for i in range(test_size): t_lag = train_size+i-params['length'] t_wsize = 5 d = np.diff(X_c, axis=1)[:, t_lag-t_wsize:t_lag+1+t_wsize] delta = np.average(d, axis=1) delta = params['absolute growth'] + delta * X_f_r preds = np.hstack(( preds, preds[:, -1].reshape(-1,1)+ delta.reshape(-1,1))) return preds params = { "length": 7, "absolute growth": 0.02, "fatality_rate_lower": 0.02, "fatality_rate_upper": 0.3, } preds_f_1 = run_f(params, preds_c, X_f, f_rate.values[:,:TRAIN_N]) preds_f_1 = np.log(1+preds_f_1 )
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['Size'].value_counts()<count_values>
class ZDatasetF(Dataset): def __init__(self, X_c, X_f=None, hist_len=10): self.X_c = X_c self.X_f = X_f self.hist_len = hist_len self.is_test = X_f is None def __len__(self): return self.X_c.shape[1] def __getitem__(self, idx): if self.is_test: return {'x_c':self.X_c[:, idx-self.hist_len:idx]} else: return {'x_c':self.X_c[:, idx-self.hist_len:idx], 'x_f':self.X_f[:, idx-1], 'y':np.log(1+self.X_f[:, idx])} class PrLayer2(nn.Module): def __init__(self, in_features1, in_features2): super(PrLayer2, self ).__init__() self.weight0 = Parameter(torch.Tensor(1, 1, in_features2)) self.weight1 = Parameter(torch.Tensor(1, in_features1, in_features2)) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight0, a=math.sqrt(5)) init.kaiming_uniform_(self.weight1, a=math.sqrt(5)) def forward(self, input): return input * torch.sigmoid(self.weight0 + self.weight1) class ZModelF(nn.Module): def __init__(self, hist_len): super(ZModelF, self ).__init__() self.l_conv = PrLayer2(len(X_c),hist_len-1) def forward(self, x_c, x_f): x = x_c[:,:,1:] - x_c[:,:,:-1] res = torch.sum(self.l_conv(x), 2) return {'preds': torch.log(1 + x_f + res)} class DummySampler(torch.utils.data.sampler.Sampler): def __init__(self, idx): self.idx = idx def __iter__(self): return iter(self.idx) def __len__(self): return len(self.idx) def _smooth_l1_loss(target): t = torch.abs(target) t = torch.where(t < 1, 0.5 * t ** 2, t - 0.5) return torch.mean(t) n_epochs = 5000 lr = 0.18 bag_size = 4 device = 'cpu' hist_len = 14 loss_func = torch.nn.MSELoss() reg_loss_func = _smooth_l1_loss reg_factor = 0.035 train_dataset = ZDatasetF(np.exp(X_c)-1, X_f, hist_len=hist_len) test_dataset = ZDatasetF(np.exp(preds_c)-1, hist_len=hist_len) trn_idx = np.arange(hist_len+1, len(train_dataset)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(trn_idx) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=len(trn_idx), sampler=train_sampler, num_workers=0, pin_memory=True) test_idx = np.arange(TRAIN_N, len(test_dataset)) test_sampler = DummySampler(test_idx) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, sampler=test_sampler, num_workers=0, pin_memory=True) gradient_accumulation = 1 preds_f = 0 for m_i in range(bag_size): model_f = ZModelF(hist_len=hist_len ).to(device) optimizer_f = torch.optim.Adam(model_f.parameters() , lr=lr) model_f.train() start_time = time.time() for epoch in range(n_epochs): s = time.time() avg_train_loss = 0 optimizer_f.zero_grad() for idx, data in enumerate(train_loader): X1 = data['x_c'].to(device ).float() X2 = data['x_f'].to(device ).float() y = data['y'].to(device ).float() preds = model_f(X1, X2)['preds'].float() cond = X2 > np.log(10) preds = preds[cond] y = y[cond] loss = loss_func(preds, y) loss += reg_factor * reg_loss_func(model_f.l_conv.weight1) avg_train_loss += loss / len(train_loader) loss.backward() if(idx+1)% gradient_accumulation == 0 or idx == len(train_loader)- 1: optimizer_f.step() optimizer_f.zero_grad() if epoch % 1000 == 0: model_f.eval() preds_f_delta = train_p_f.values[:,:TRAIN_N] for idx, data in enumerate(test_loader): X1 = data['x_c'].to(device ).float() temp = model_f(X1, torch.Tensor(preds_f_delta[:,-1] ).unsqueeze(0)) ['preds'] temp = np.exp(temp.detach().cpu().numpy().reshape(-1,1)) - 1 preds_f_delta = np.hstack(( preds_f_delta, temp)) preds_f_delta = np.log(1 + preds_f_delta) model_f.train() model_f.eval() preds_f_delta = train_p_f.values[:,:TRAIN_N] for idx, data in enumerate(test_loader): X1 = data['x_c'].to(device ).float() temp = model_f(X1, torch.Tensor(preds_f_delta[:,-1] ).unsqueeze(0)) ['preds'] temp = np.exp(temp.detach().cpu().numpy().reshape(-1,1)) - 1 preds_f_delta = np.hstack(( preds_f_delta, temp)) preds_f += preds_f_delta / bag_size preds_f_2 = np.log(1 + preds_f) print("Done")
COVID19 Global Forecasting (Week 3)
8,824,396
test_data['Size'].value_counts()<data_type_conversions>
preds_f = np.mean([preds_f_1, preds_f_2], axis=0 )
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['Size'].fillna(value = 'Size_Unknown', inplace = True) test_data['Size'].fillna(value = 'Size_Unknown', inplace = True )<count_values>
if False: val_len = train_p_c.values.shape[1] - TRAIN_N for i in range(val_len): d = i + TRAIN_N m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, d]), preds_c[:, d])) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, d]), preds_f[:, d])) print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]") print() m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_c[:, TRAIN_N:TRAIN_N+val_len].flatten())) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_f[:, TRAIN_N:TRAIN_N+val_len].flatten())) print(f"{(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]" )
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['TopThreeAmericanName'].value_counts()<count_values>
def get_cpmp_sub(save_oof=False, save_public_test=False): train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] train test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] test day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min train = train[train.day < TRAIN_N] min_test_val_day = test.day.min() max_test_val_day = train.day.max() max_test_day = test.day.max() num_days = max_test_day + 1 min_test_val_day, max_test_val_day, num_days train['ForecastId'] = -1 test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 debug = False data = pd.concat([train, test[test.day > max_test_val_day][train.columns] ] ).reset_index(drop=True) if debug: data = data[data['geo'] >= 'France_'].reset_index(drop=True) gc.collect() dates = data[data['geo'] == 'France_'].Date.values if 0: gr = data.groupby('geo') data['ConfirmedCases'] = gr.ConfirmedCases.transform('cummax') data['Fatalities'] = gr.Fatalities.transform('cummax') geo_data = data.pivot(index='geo', columns='day', values='ForecastId') num_geo = geo_data.shape[0] geo_data geo_id = {} for i,g in enumerate(geo_data.index): geo_id[g] = i ConfirmedCases = data.pivot(index='geo', columns='day', values='ConfirmedCases') Fatalities = data.pivot(index='geo', columns='day', values='Fatalities') if debug: cases = ConfirmedCases.values deaths = Fatalities.values else: cases = np.log1p(ConfirmedCases.values) deaths = np.log1p(Fatalities.values) def get_dataset(start_pred, num_train, lag_period): days = np.arange(start_pred - num_train + 1, start_pred + 1) lag_cases = np.vstack([cases[:, d - lag_period : d] for d in days]) lag_deaths = np.vstack([deaths[:, d - lag_period : d] for d in days]) target_cases = np.vstack([cases[:, d : d + 1] for d in days]) target_deaths = np.vstack([deaths[:, d : d + 1] for d in days]) geo_ids = np.vstack([geo_ids_base for d in days]) country_ids = np.vstack([country_ids_base for d in days]) return lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days def update_valid_dataset(data, pred_death, pred_case): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data day = days[-1] + 1 new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case]) new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death]) new_target_cases = cases[:, day:day+1] new_target_deaths = deaths[:, day:day+1] new_geo_ids = geo_ids new_country_ids = country_ids new_days = 1 + days return new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_geo_ids, new_country_ids, new_days def fit_eval(lr_death, lr_case, data, start_lag_death, end_lag_death, num_lag_case, fit, score): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], country_ids]) X_death = np.hstack([lag_deaths[:, -num_lag_case:], country_ids]) X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], lag_deaths[:, -num_lag_case:], country_ids]) y_death = target_deaths y_death_prev = lag_deaths[:, -1:] if fit: if 0: keep =(y_death > 0 ).ravel() X_death = X_death[keep] y_death = y_death[keep] y_death_prev = y_death_prev[keep] lr_death.fit(X_death, y_death) y_pred_death = lr_death.predict(X_death) y_pred_death = np.maximum(y_pred_death, y_death_prev) X_case = np.hstack([lag_cases[:, -num_lag_case:], geo_ids]) X_case = lag_cases[:, -num_lag_case:] y_case = target_cases y_case_prev = lag_cases[:, -1:] if fit: lr_case.fit(X_case, y_case) y_pred_case = lr_case.predict(X_case) y_pred_case = np.maximum(y_pred_case, y_case_prev) if score: death_score = val_score(y_death, y_pred_death) case_score = val_score(y_case, y_pred_case) else: death_score = 0 case_score = 0 return death_score, case_score, y_pred_death, y_pred_case def train_model(train, valid, start_lag_death, end_lag_death, num_lag_case, num_val, score=True): alpha = 3 lr_death = Ridge(alpha=alpha, fit_intercept=False) lr_case = Ridge(alpha=alpha, fit_intercept=True) (train_death_score, train_case_score, train_pred_death, train_pred_case, )= fit_eval(lr_death, lr_case, train, start_lag_death, end_lag_death, num_lag_case, fit=True, score=score) death_scores = [] case_scores = [] death_pred = [] case_pred = [] for i in range(num_val): (valid_death_score, valid_case_score, valid_pred_death, valid_pred_case, )= fit_eval(lr_death, lr_case, valid, start_lag_death, end_lag_death, num_lag_case, fit=False, score=score) death_scores.append(valid_death_score) case_scores.append(valid_case_score) death_pred.append(valid_pred_death) case_pred.append(valid_pred_case) if 0: print('val death: %0.3f' % valid_death_score, 'val case: %0.3f' % valid_case_score, 'val : %0.3f' % np.mean([valid_death_score, valid_case_score]), flush=True) valid = update_valid_dataset(valid, valid_pred_death, valid_pred_case) if score: death_scores = np.sqrt(np.mean([s**2 for s in death_scores])) case_scores = np.sqrt(np.mean([s**2 for s in case_scores])) if 0: print('train death: %0.3f' % train_death_score, 'train case: %0.3f' % train_case_score, 'val death: %0.3f' % death_scores, 'val case: %0.3f' % case_scores, 'val : %0.3f' %(( death_scores + case_scores)/ 2), flush=True) else: print('%0.4f' % case_scores, ', %0.4f' % death_scores, '= %0.4f' %(( death_scores + case_scores)/ 2), flush=True) death_pred = np.hstack(death_pred) case_pred = np.hstack(case_pred) return death_scores, case_scores, death_pred, case_pred countries = [g.split('_')[0] for g in geo_data.index] countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) country_ids_base.shape geo_ids_base = np.arange(num_geo ).reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) geo_ids_base = 0.1 * ohe.fit_transform(geo_ids_base) geo_ids_base.shape def val_score(true, pred): pred = np.log1p(np.round(np.expm1(pred)- 0.2)) return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) def val_score(true, pred): return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) start_lag_death, end_lag_death = 14, 6, num_train = 5 num_lag_case = 14 lag_period = max(start_lag_death, num_lag_case) def get_oof(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[start_val], start_val, num_val) train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = train[['Date', 'Id', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[sub.day >= start_val] sub = sub[['Id', 'ConfirmedCases', 'Fatalities']].copy() return sub if save_oof: for start_val_delta, date in zip(range(3, -8, -3), ['2020-03-22', '2020-03-19', '2020-03-16', '2020-03-13']): print(date, end=' ') oof = get_oof(start_val_delta) oof.to_csv('.. /submissions/cpmp-%s.csv' % date, index=None) def get_sub(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[last_train], start_val, num_val) num_lag_case = 14 train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub.fillna(0) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] return sub return sub known_test = train[['geo', 'day', 'ConfirmedCases', 'Fatalities'] ].merge(test[['geo', 'day', 'ForecastId']], how='left', on=['geo', 'day']) known_test = known_test[['ForecastId', 'ConfirmedCases', 'Fatalities']][known_test.ForecastId.notnull() ].copy() known_test unknow_test = test[test.day > max_test_val_day] unknow_test def get_final_sub() : start_val = max_test_val_day + 1 last_train = start_val - 1 num_val = max_test_day - start_val + 1 print(dates[last_train], start_val, num_val) num_lag_case = num_val + 3 train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) (_, _, val_death_preds, val_case_preds )= train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val, score=False) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases print(unknow_test.shape, pred_deaths.shape, pred_cases.shape) sub = unknow_test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] sub = pd.concat([known_test, sub]) return sub if save_public_test: sub = get_sub() else: sub = get_final_sub() return sub
COVID19 Global Forecasting (Week 3)
8,824,396
test_data['TopThreeAmericanName'].value_counts()<data_type_conversions>
if False: val_len = train_p_c.values.shape[1] - TRAIN_N m1s = [] m2s = [] for i in range(val_len): d = i + TRAIN_N m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, d]), preds_c_cpmp[:, d-START_PUBLIC])) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, d]), preds_f_cpmp[:, d-START_PUBLIC])) print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]") m1s += [m1] m2s += [m2] print() m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_c_cpmp[:, TRAIN_N-START_PUBLIC:TRAIN_N-START_PUBLIC+val_len].flatten())) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_f_cpmp[:, TRAIN_N-START_PUBLIC:TRAIN_N-START_PUBLIC+val_len].flatten())) print(f"{(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]")
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['TopThreeAmericanName'].fillna(value = 'TopThreeAmericanName_Unknown', inplace = True) test_data['TopThreeAmericanName'].fillna(value = 'TopThreeAmericanName_Unknown', inplace = True )<count_values>
COMP = '.. /input/covid19-global-forecasting-week-3' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '' ).fillna('N/A') test['Country_Region'] = test['Country_Region'].str.replace(',', '' ).fillna('N/A') train['Location'] = train['Country_Region'].fillna('')+ '-' + train['Province_State'].fillna('N/A') test['Location'] = test['Country_Region'].fillna('')+ '-' + test['Province_State'].fillna('N/A') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['continent'] = train['continent'].fillna('') test['continent'] = test['continent'].fillna('') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1 def get_beluga_sub() : train, test, submission = get_comp_data(COMP) train.shape, test.shape, submission.shape TRAIN_START = train.Date.min() TEST_START = test.Date.min() TRAIN_END = train.Date.max() TEST_END = test.Date.max() TRAIN_START, TRAIN_END, TEST_START, TEST_END train_clean = process_each_location(train) train_clean['Geo DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.14 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Location=='France-N/A', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-N/A', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-N/A', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-N/A', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe() daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i confirmed_prediciton = pd.melt(daily_log_confirmed, id_vars='Location') confirmed_prediciton['ConfirmedCases'] = to_exp(confirmed_prediciton['value']) confirmed_prediciton_cases = confirmed_prediciton.copy() latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-N/A', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-N/A', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-N/A', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-N/A', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe() daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i confirmed_prediciton = pd.melt(daily_log_deaths, id_vars='Location') confirmed_prediciton['Fatalities'] = to_exp(confirmed_prediciton['value']) confirmed_prediciton_fatalities = confirmed_prediciton.copy() return confirmed_prediciton_cases, confirmed_prediciton_fatalities preds_beluga_cases, preds_beluga_fatalities = get_beluga_sub()
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['PRIMEUNIT'].value_counts()<count_values>
locs = p_f_beluga.index.values
COVID19 Global Forecasting (Week 3)
8,824,396
test_data['PRIMEUNIT'].value_counts()<data_type_conversions>
warnings.filterwarnings('ignore' )
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['PRIMEUNIT'].fillna(value = 'PRIMEUNIT_Unknown', inplace = True) test_data['PRIMEUNIT'].fillna(value = 'PRIMEUNIT_Unknown', inplace = True )<count_values>
FIRST_TEST = test_orig['Date'].apply(lambda x: x.dayofyear ).min()
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['AUCGUART'].value_counts()<count_values>
COVID19 Global Forecasting (Week 3)
8,824,396
test_data['AUCGUART'].value_counts()<data_type_conversions>
def do_aggregation(df, col, mean_range): df_new = copy.deepcopy(df) col_new = '{}_({}-{})'.format(col, mean_range[0], mean_range[1]) df_new[col_new] = 0 tmp = df_new[col].rolling(mean_range[1]-mean_range[0]+1 ).mean() df_new[col_new][mean_range[0]:] = tmp[:-(mean_range[0])] df_new[col_new][pd.isna(df_new[col_new])] = 0 return df_new[[col_new]].reset_index(drop=True) def do_aggregations(df): df = pd.concat([df, do_aggregation(df, 'cases/day', [1,1] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'cases/day', [1,7] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'cases/day', [8,14] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'cases/day', [15,21] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'fatal/day', [1,1] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'fatal/day', [1,7] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'fatal/day', [8,14] ).reset_index(drop=True)], axis=1) df = pd.concat([df, do_aggregation(df, 'fatal/day', [15,21] ).reset_index(drop=True)], axis=1) for threshold in [1, 10, 100]: days_under_threshold =(df['ConfirmedCases']<threshold ).sum() tmp = df['day'].values - 22 - days_under_threshold tmp[tmp<=0] = 0 df['days_since_{}cases'.format(threshold)] = tmp for threshold in [1, 10, 100]: days_under_threshold =(df['Fatalities']<threshold ).sum() tmp = df['day'].values - 22 - days_under_threshold tmp[tmp<=0] = 0 df['days_since_{}fatal'.format(threshold)] = tmp if df['place_id'][0]=='China/Hubei': df['days_since_1cases'] += 35 df['days_since_10cases'] += 35-13 df['days_since_100cases'] += 4 df['days_since_1fatal'] += 13 return df
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['AUCGUART'].fillna(value = 'AUCGUART_Unknown', inplace = True) test_data['AUCGUART'].fillna(value = 'AUCGUART_Unknown', inplace = True )<drop_column>
def feature_engineering_oscii() : def fix_area(x): try: x_new = x['Country_Region'] + "/" + x['Province_State'] except: x_new = x['Country_Region'] return x_new def fix_area2(x): try: x_new = x['Country/Region'] + "/" + x['Province/State'] except: x_new = x['Country/Region'] return x_new def encode_label(df, col, freq_limit=0): df[col][pd.isna(df[col])] = 'nan' tmp = df[col].value_counts() cols = tmp.index.values freq = tmp.values num_cols =(freq>=freq_limit ).sum() print("col: {}, num_cat: {}, num_reduced: {}".format(col, len(cols), num_cols)) col_new = '{}_le'.format(col) df_new = pd.DataFrame(np.ones(len(df), np.int16)*(num_cols-1), columns=[col_new]) for i, item in enumerate(cols[:num_cols]): df_new[col_new][df[col]==item] = i return df_new def get_df_le(df, col_index, col_cat): df_new = df[[col_index]] for col in col_cat: df_tmp = encode_label(df, col) df_new = pd.concat([df_new, df_tmp], axis=1) return df_new def to_float(x): x_new = 0 try: x_new = float(x.replace(",", "")) except: x_new = np.nan return x_new df_train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") df_test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") df_traintest = pd.concat([df_train, df_test]) print('process_date') df_traintest['Date'] = pd.to_datetime(df_traintest['Date']) df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear ).astype(np.int16) day_min = df_traintest['day'].min() df_traintest['days'] = df_traintest['day'] - day_min df_traintest.loc[df_traintest['Province_State'].isnull() , 'Province_State'] = 'N/A' df_traintest['place_id'] = df_traintest['Country_Region'] + '_' + df_traintest['Province_State'] print('add lat and long') df_latlong = pd.read_csv(".. /input/smokingstats/df_Latlong.csv") df_latlong.loc[df_latlong['Province/State'].isnull() , 'Province_State'] = 'N/A' df_latlong['place_id'] = df_latlong['Country/Region'] + '_' + df_latlong['Province/State'] df_latlong = df_latlong[df_latlong['place_id'].duplicated() ==False] df_traintest = pd.merge(df_traintest, df_latlong[['place_id', 'Lat', 'Long']], on='place_id', how='left') places = np.sort(df_traintest['place_id'].unique()) print('calc cases, fatalities per day') df_traintest2 = copy.deepcopy(df_traintest) df_traintest2['cases/day'] = 0 df_traintest2['fatal/day'] = 0 tmp_list = np.zeros(len(df_traintest2)) for place in places: tmp = df_traintest2['ConfirmedCases'][df_traintest2['place_id']==place].values tmp[1:] -= tmp[:-1] df_traintest2['cases/day'][df_traintest2['place_id']==place] = tmp tmp = df_traintest2['Fatalities'][df_traintest2['place_id']==place].values tmp[1:] -= tmp[:-1] df_traintest2['fatal/day'][df_traintest2['place_id']==place] = tmp print('do agregation') df_traintest3 = [] for place in places[:]: df_tmp = df_traintest2[df_traintest2['place_id']==place].reset_index(drop=True) df_tmp = do_aggregations(df_tmp) df_traintest3.append(df_tmp) df_traintest3 = pd.concat(df_traintest3 ).reset_index(drop=True) print('add smoking') df_smoking = pd.read_csv(".. /input/smokingstats/share-of-adults-who-smoke.csv") df_smoking_recent = df_smoking.sort_values('Year', ascending=False ).reset_index(drop=True) df_smoking_recent = df_smoking_recent[df_smoking_recent['Entity'].duplicated() ==False] df_smoking_recent['Country_Region'] = df_smoking_recent['Entity'] df_smoking_recent['SmokingRate'] = df_smoking_recent['Smoking prevalence, total(ages 15+ )(% of adults)'] df_traintest4 = pd.merge(df_traintest3, df_smoking_recent[['Country_Region', 'SmokingRate']], on='Country_Region', how='left') SmokingRate = df_smoking_recent['SmokingRate'][df_smoking_recent['Entity']=='World'].values[0] df_traintest4['SmokingRate'][pd.isna(df_traintest4['SmokingRate'])] = SmokingRate print('add data from World Economic Outlook Database') df_weo = pd.read_csv(".. /input/smokingstats/WEO.csv") subs = df_weo['Subject Descriptor'].unique() [:-1] df_weo_agg = df_weo[['Country']][df_weo['Country'].duplicated() ==False].reset_index(drop=True) for sub in subs[:]: df_tmp = df_weo[['Country', '2019']][df_weo['Subject Descriptor']==sub].reset_index(drop=True) df_tmp = df_tmp[df_tmp['Country'].duplicated() ==False].reset_index(drop=True) df_tmp.columns = ['Country', sub] df_weo_agg = df_weo_agg.merge(df_tmp, on='Country', how='left') df_weo_agg.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in df_weo_agg.columns] df_weo_agg.columns df_weo_agg['Country_Region'] = df_weo_agg['Country'] df_traintest5 = pd.merge(df_traintest4, df_weo_agg, on='Country_Region', how='left') print('add Life expectancy') df_life = pd.read_csv(".. /input/smokingstats/Life expectancy at birth.csv") tmp = df_life.iloc[:,1].values.tolist() df_life = df_life[['Country', '2018']] def func(x): x_new = 0 try: x_new = float(x.replace(",", "")) except: x_new = np.nan return x_new df_life['2018'] = df_life['2018'].apply(lambda x: func(x)) df_life = df_life[['Country', '2018']] df_life.columns = ['Country_Region', 'LifeExpectancy'] df_traintest6 = pd.merge(df_traintest5, df_life, on='Country_Region', how='left') print("add additional info from countryinfo dataset") df_country = pd.read_csv(".. /input/countryinfo/covid19countryinfo.csv") df_country['Country_Region'] = df_country['country'] df_country = df_country[df_country['country'].duplicated() ==False] df_traintest7 = pd.merge(df_traintest6, df_country.drop(['tests', 'testpop', 'country'], axis=1), on=['Country_Region',], how='left') df_traintest7['id'] = np.arange(len(df_traintest7)) df_le = get_df_le(df_traintest7, 'id', ['Country_Region', 'Province_State']) df_traintest8 = pd.merge(df_traintest7, df_le, on='id', how='left') print('covert object type to float') cols = [ 'Gross_domestic_product__constant_prices', 'Gross_domestic_product__current_prices', 'Gross_domestic_product__deflator', 'Gross_domestic_product_per_capita__constant_prices', 'Gross_domestic_product_per_capita__current_prices', 'Output_gap_in_percent_of_potential_GDP', 'Gross_domestic_product_based_on_purchasing_power_parity__PPP__valuation_of_country_GDP', 'Gross_domestic_product_based_on_purchasing_power_parity__PPP__per_capita_GDP', 'Gross_domestic_product_based_on_purchasing_power_parity__PPP__share_of_world_total', 'Implied_PPP_conversion_rate', 'Total_investment', 'Gross_national_savings', 'Inflation__average_consumer_prices', 'Inflation__end_of_period_consumer_prices', 'Six_month_London_interbank_offered_rate__LIBOR_', 'Volume_of_imports_of_goods_and_services', 'Volume_of_Imports_of_goods', 'Volume_of_exports_of_goods_and_services', 'Volume_of_exports_of_goods', 'Unemployment_rate', 'Employment', 'Population', 'General_government_revenue', 'General_government_total_expenditure', 'General_government_net_lending_borrowing', 'General_government_structural_balance', 'General_government_primary_net_lending_borrowing', 'General_government_net_debt', 'General_government_gross_debt', 'Gross_domestic_product_corresponding_to_fiscal_year__current_prices', 'Current_account_balance', 'pop' ] df_traintest8['cases/day'] = df_traintest8['cases/day'].astype(np.float) df_traintest8['fatal/day'] = df_traintest8['fatal/day'].astype(np.float) for col in cols: df_traintest8[col] = df_traintest8[col].apply(lambda x: to_float(x)) return df_traintest8
COVID19 Global Forecasting (Week 3)
8,824,396
train_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1) test_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1 )<count_values>
def calc_score(y_true, y_pred): y_true[y_true<0] = 0 score = mean_squared_error(np.log(y_true.clip(0, 1e10)+1), np.log(y_pred[:]+1)) **0.5 return score
COVID19 Global Forecasting (Week 3)
8,824,396
train_data['PurchDate'].value_counts()<drop_column>
SEED = 42 params = {'num_leaves': 8, 'min_data_in_leaf': 5, 'objective': 'regression', 'max_depth': 8, 'learning_rate': 0.02, 'boosting': 'gbdt', 'bagging_freq': 5, 'bagging_fraction': 0.8, 'feature_fraction': 0.8201, 'bagging_seed': SEED, 'reg_alpha': 1, 'reg_lambda': 4.9847051755586085, 'random_state': SEED, 'metric': 'mse', 'verbosity': 100, 'min_gain_to_split': 0.02, 'min_child_weight': 5, 'num_threads': 6, }
COVID19 Global Forecasting (Week 3)
8,824,396
train_data.drop('PurchDate', axis = 1, inplace = True) test_data.drop('PurchDate', axis = 1, inplace = True )<drop_column>
col_target = 'fatal/day' col_var = [ 'Lat', 'Long', 'cases/day_(1-1)', 'cases/day_(1-7)', 'fatal/day_(1-7)', 'fatal/day_(8-14)', 'fatal/day_(15-21)', 'SmokingRate', 'density', ] col_cat = [] df_train = df_traintest[(pd.isna(df_traintest['ForecastId'])) &(df_traintest['days']<TRAIN_N)] df_valid = df_traintest[(pd.isna(df_traintest['ForecastId'])) &(df_traintest['days']<TRAIN_N)] X_train = df_train[col_var] X_valid = df_valid[col_var] y_train = np.log(df_train[col_target].values.clip(0, 1e10)+1) y_valid = np.log(df_valid[col_target].values.clip(0, 1e10)+1) train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=col_cat) valid_data = lgb.Dataset(X_valid, label=y_valid, categorical_feature=col_cat) num_round = 340 model = lgb.train(params, train_data, num_round, valid_sets=[train_data, valid_data], verbose_eval=100, early_stopping_rounds=150,) best_itr = model.best_iteration
COVID19 Global Forecasting (Week 3)
8,824,396
train_data.drop(['RefId', 'IsBadBuy'], axis = 1 ).dtypes != 'object'<drop_column>
col_target2 = 'cases/day' col_var2 = [ 'Lat', 'Long', 'days_since_10cases', 'cases/day_(1-1)', 'cases/day_(1-7)', 'cases/day_(8-14)', 'cases/day_(15-21)', ]
COVID19 Global Forecasting (Week 3)
8,824,396
not_categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object']<feature_engineering>
df_train = df_traintest[(pd.isna(df_traintest['ForecastId'])) &(df_traintest['days']<TRAIN_N)] df_valid = df_traintest[(pd.isna(df_traintest['ForecastId'])) &(df_traintest['days']<TRAIN_N)] X_train = df_train[col_var2] X_valid = df_valid[col_var2] y_train = np.log(df_train[col_target2].values.clip(0, 1e10)+1) y_valid = np.log(df_valid[col_target2].values.clip(0, 1e10)+1) train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=col_cat) valid_data = lgb.Dataset(X_valid, label=y_valid, categorical_feature=col_cat) model2 = lgb.train(params, train_data, num_round, valid_sets=[train_data, valid_data], verbose_eval=100, early_stopping_rounds=150,) best_itr2 = model2.best_iteration
COVID19 Global Forecasting (Week 3)
8,824,396
for i in not_categorical: maximum=np.max(train_data[i]) train_data[i]=train_data[i]/maximum maximum_test=np.max(test_data[i]) test_data[i]=test_data[i]/maximum_test<drop_column>
places = AREAS.copy()
COVID19 Global Forecasting (Week 3)
8,824,396
categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes=='object']<filter>
df_tmp = df_traintest[(( df_traintest['days']<TRAIN_N)&(pd.isna(df_traintest['ForecastId'])))|(( TRAIN_N<=df_traintest['days'])&(pd.isna(df_traintest['ForecastId'])==False)) ].reset_index(drop=True) df_tmp = df_tmp.drop([ 'cases/day_(1-1)', 'cases/day_(1-7)', 'cases/day_(8-14)', 'cases/day_(15-21)', 'fatal/day_(1-1)', 'fatal/day_(1-7)', 'fatal/day_(8-14)', 'fatal/day_(15-21)', 'days_since_1cases', 'days_since_10cases', 'days_since_100cases', 'days_since_1fatal', 'days_since_10fatal', 'days_since_100fatal', ], axis=1) df_traintest9 = [] for i, place in tqdm(enumerate(places[:])) : df_tmp2 = df_tmp[df_tmp['place_id']==place].reset_index(drop=True) df_tmp2 = do_aggregations(df_tmp2) df_traintest9.append(df_tmp2) df_traintest9 = pd.concat(df_traintest9 ).reset_index(drop=True)
COVID19 Global Forecasting (Week 3)
8,824,396
train_data[categorical[0]]<categorify>
df_preds = [] for i, place in tqdm(enumerate(places[:])) : df_interest = copy.deepcopy(df_traintest9[df_traintest9['place_id']==place].reset_index(drop=True)) df_interest['cases/day'][(pd.isna(df_interest['ForecastId'])) ==False] = -1 df_interest['fatal/day'][(pd.isna(df_interest['ForecastId'])) ==False] = -1 len_known =(df_interest['days']<TRAIN_N ).sum() len_unknown = 30 for j in range(len_unknown): X_valid = df_interest[col_var].iloc[j+len_known] X_valid2 = df_interest[col_var2].iloc[j+len_known] pred_f = model.predict(X_valid) pred_c = model2.predict(X_valid2) pred_c =(np.exp(pred_c)-1 ).clip(0, 1e10) pred_f =(np.exp(pred_f)-1 ).clip(0, 1e10) df_interest['fatal/day'][j+len_known] = pred_f df_interest['cases/day'][j+len_known] = pred_c df_interest['Fatalities'][j+len_known] = df_interest['Fatalities'][j+len_known-1] + pred_f df_interest['ConfirmedCases'][j+len_known] = df_interest['ConfirmedCases'][j+len_known-1] + pred_c df_interest = df_interest.drop([ 'cases/day_(1-1)', 'cases/day_(1-7)', 'cases/day_(8-14)', 'cases/day_(15-21)', 'fatal/day_(1-1)', 'fatal/day_(1-7)', 'fatal/day_(8-14)', 'fatal/day_(15-21)', 'days_since_1cases', 'days_since_10cases', 'days_since_100cases', 'days_since_1fatal', 'days_since_10fatal', 'days_since_100fatal', ], axis=1) df_interest = do_aggregations(df_interest) if(i+1)%10==0: print("{:3d}/{} {}, len known: {}, len unknown: {}".format(i+1, len(places), place, len_known, len_unknown), df_interest.shape) df_interest['fatal_pred'] = np.cumsum(df_interest['fatal/day'].values) df_interest['cases_pred'] = np.cumsum(df_interest['cases/day'].values) df_preds.append(df_interest) df_preds = pd.concat(df_preds )
COVID19 Global Forecasting (Week 3)
8,824,396
pd.get_dummies(train_data[categorical[0]] )<categorify>
preds_f_oscii = np.log1p(p_f_oscii.values[:].copy()) preds_c_oscii = np.log1p(p_c_oscii.values[:].copy()) preds_f_oscii.shape, preds_c_oscii.shape
COVID19 Global Forecasting (Week 3)
8,824,396
for i in categorical: dummies=pd.get_dummies(train_data[i]) dummies.columns=str(i)+'_'+dummies.columns train_data=pd.concat([train_data,dummies],axis=1) train_data.drop(i,inplace=True,axis=1) dummies=pd.get_dummies(test_data[i]) dummies.columns=str(i)+'_'+dummies.columns test_data=pd.concat([test_data,dummies],axis=1) test_data.drop(i,inplace=True,axis=1 )<feature_engineering>
if False: val_len = train_p_c.values.shape[1] - TRAIN_N m1s = [] m2s = [] for i in range(val_len): d = i + TRAIN_N m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, d]), preds_c_oscii[:, d-START_PUBLIC])) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, d]), preds_f_oscii[:, d-START_PUBLIC])) print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]") m1s += [m1] m2s += [m2] print() m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_c_oscii[:, TRAIN_N-START_PUBLIC:TRAIN_N-START_PUBLIC+val_len].flatten())) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_f_oscii[:, TRAIN_N-START_PUBLIC:TRAIN_N-START_PUBLIC+val_len].flatten())) print(f"{(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]" )
COVID19 Global Forecasting (Week 3)
8,824,396
for i in train_data.drop('IsBadBuy',axis=1 ).columns: if i not in test_data.columns: test_data[i]=np.zeros(len(test_data))<feature_engineering>
preds_c_blend = np.log1p(np.average([np.expm1(preds_c_oscii[:,64:107]),np.expm1(preds_c_cpmp[:]),np.expm1(p_c_beluga[:,64:107]),np.expm1(preds_c[:,64:107])],axis=0, weights=[8,1,1,8])) preds_f_blend = np.log1p(np.average([np.expm1(preds_f_oscii[:,64:107]),np.expm1(preds_f_cpmp[:]),np.expm1(p_f_beluga[:,64:107]),np.expm1(preds_f[:,64:107])],axis=0, weights=[8,1,1,8]))
COVID19 Global Forecasting (Week 3)
8,824,396
for i in test_data.columns: if i not in train_data.columns: train_data[i]=np.zeros(len(train_data))<drop_column>
val_len = 13 m1s = [] m2s = [] for i in range(val_len): d = i + 64 m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, d]), preds_c_blend[:, i])) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, d]), preds_f_blend[:, i])) print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]") m1s += [m1] m2s += [m2] print() m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c_raw.values[:, 64:64+val_len] ).flatten() , preds_c_blend[:, :val_len].flatten())) m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f_raw.values[:, 64:64+val_len] ).flatten() , preds_f_blend[:, :val_len].flatten())) print(f"PUBLIC LB {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]" )
COVID19 Global Forecasting (Week 3)
8,824,396
test_data = test_data[train_data.drop("IsBadBuy",axis=1 ).columns]<prepare_x_and_y>
EU_COUNTRIES = ['Austria', 'Italy', 'Belgium', 'Latvia', 'Bulgaria', 'Lithuania', 'Croatia', 'Luxembourg', 'Cyprus', 'Malta', 'Czechia', 'Netherlands', 'Denmark', 'Poland', 'Estonia', 'Portugal', 'Finland', 'Romania', 'France', 'Slovakia', 'Germany', 'Slovenia', 'Greece', 'Spain', 'Hungary', 'Sweden', 'Ireland'] EUROPE_OTHER = ['Albania', 'Andorra', 'Bosnia and Herzegovina', 'Liechtenstein', 'Monaco', 'Montenegro', 'North Macedonia', 'Norway', 'San Marino', 'Serbia', 'Switzerland', 'Turkey', 'United Kingdom'] AFRICA = ['Algeria', 'Burkina Faso', 'Cameroon', 'Congo(Kinshasa)', "Cote d'Ivoire", 'Egypt', 'Ghana', 'Kenya', 'Madagascar', 'Morocco', 'Nigeria', 'Rwanda', 'Senegal', 'South Africa', 'Togo', 'Tunisia', 'Uganda', 'Zambia'] NORTH_AMERICA = ['US', 'Canada', 'Mexico'] SOUTH_AMERICA = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Paraguay', 'Peru', 'Uruguay', 'Venezuela'] MIDDLE_EAST = ['Afghanistan', 'Bahrain', 'Iran', 'Iraq', 'Israel', 'Jordan', 'Kuwait', 'Lebanon', 'Oman', 'Qatar', 'Saudi Arabia', 'United Arab Emirates'] ASIA = ['Bangladesh', 'Brunei', 'Cambodia', 'India', 'Indonesia', 'Japan', 'Kazakhstan', 'Korea, South', 'Kyrgyzstan', 'Malaysia', 'Pakistan', 'Singapore', 'Sri Lanka', 'Taiwan*', 'Thailand', 'Uzbekistan', 'Vietnam']
COVID19 Global Forecasting (Week 3)
8,824,396
X=train_data.drop(['RefId','IsBadBuy'],axis=1) y=train_data['IsBadBuy']<split>
non_china_mask = np.array(['China' not in a for a in AREAS] ).astype(bool) non_china_mask.shape
COVID19 Global Forecasting (Week 3)
8,824,396
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42 )<import_modules>
preds_c2 = preds_c.copy() preds_f2 = preds_f.copy() preds_c2[non_china_mask,64:107] = preds_c_blend[non_china_mask] preds_f2[non_china_mask,64:107] = preds_f_blend[non_china_mask]
COVID19 Global Forecasting (Week 3)
8,824,396
from sklearn.neighbors import KNeighborsClassifier<import_modules>
temp = pd.DataFrame(np.clip(np.exp(preds_c2)- 1, 0, None)) temp['Area'] = AREAS temp = temp.melt(id_vars='Area', var_name='days', value_name="ConfirmedCases") test = test_orig.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days']) temp = pd.DataFrame(np.clip(np.exp(preds_f2)- 1, 0, None)) temp['Area'] = AREAS temp = temp.melt(id_vars='Area', var_name='days', value_name="Fatalities") test = test.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days']) test.head()
COVID19 Global Forecasting (Week 3)
8,824,396
from sklearn.neighbors import KNeighborsClassifier<import_modules>
test.to_csv("submission.csv", index=False, columns=["ForecastId", "ConfirmedCases", "Fatalities"] )
COVID19 Global Forecasting (Week 3)
8,824,396
from sklearn.neighbors import KNeighborsClassifier<train_model>
test.days.nunique()
COVID19 Global Forecasting (Week 3)
8,824,396
KNN = KNeighborsClassifier(n_neighbors = 11, n_jobs = -1) KNN.fit(X_train,y_train )<compute_test_metric>
for i, rec in test.groupby('Area' ).last().sort_values("ConfirmedCases", ascending=False ).iterrows() : print(f"{rec['ConfirmedCases']:10.1f} {rec['Fatalities']:10.1f} {rec['Country_Region']}, {rec['Province_State']}")
COVID19 Global Forecasting (Week 3)
8,822,284
KNN.score(X_test,y_test )<predict_on_test>
import pandas as pd from pathlib import Path from pandas_profiling import ProfileReport from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder import datetime from sklearn.model_selection import GridSearchCV from sklearn import preprocessing from sklearn.model_selection import cross_val_score
COVID19 Global Forecasting (Week 3)
8,822,284
predict=KNN.predict(test_data.drop('RefId',axis=1))<prepare_output>
base_dir='/kaggle/input/covid19-global-forecasting-week-3/' train_file='train.csv' test_file='test.csv' submit_file='submission.csv'
COVID19 Global Forecasting (Week 3)
8,822,284
Submission=pd.DataFrame(data=predict,columns=['IsBadBuy']) Submission.head()<prepare_output>
train_df = pd.read_csv(base_dir+'train.csv') test_df = pd.read_csv(base_dir+'test.csv') submission = pd.read_csv(base_dir+'submission.csv' )
COVID19 Global Forecasting (Week 3)
8,822,284
Submission['RefId']=test_data['RefId'] Submission.set_index('RefId',inplace=True )<save_to_csv>
train=train_df.copy() test= test_df.copy()
COVID19 Global Forecasting (Week 3)
8,822,284
Submission.head() Submission.to_csv('Submission.csv' )<load_from_csv>
train['Province_State'][train.Province_State.isna() ] = train['Country_Region'][train.Province_State.isna() ] test['Province_State'][test.Province_State.isna() ] = test['Country_Region'][test.Province_State.isna() ]
COVID19 Global Forecasting (Week 3)
8,822,284
train_data=pd.read_csv('/kaggle/input/DontGetKicked/training.csv') train_data.head()<load_from_csv>
def decomposedate(df): df['Date'] = pd.to_datetime(df['Date'],infer_datetime_format=True) df['Day_of_Week']=df['Date'].dt.dayofweek df['Month'] = df['Date'].dt.month df['Day'] = df['Date'].dt.day df['Day_of_Year'] = df['Date'].dt.dayofyear df['Week_of_Year'] = df['Date'].dt.weekofyear df['Quarter'] = df['Date'].dt.quarter df.drop('Date',1,inplace=True) return df
COVID19 Global Forecasting (Week 3)
8,822,284
test_data=pd.read_csv('/kaggle/input/DontGetKicked/test.csv') test_data.head()<count_missing_values>
train=decomposedate(train) test=decomposedate(test )
COVID19 Global Forecasting (Week 3)
8,822,284
train_data.isnull().sum()<count_missing_values>
submission=pd.DataFrame(columns=submission.columns )
COVID19 Global Forecasting (Week 3)
8,822,284
test_data.isnull().sum()<count_values>
l1=LabelEncoder() l2=LabelEncoder()
COVID19 Global Forecasting (Week 3)
8,822,284
train_data['IsBadBuy'].value_counts()<count_values>
train['Country_Region']=l1.fit_transform(train['Country_Region']) train['Province_State']=l2.fit_transform(train['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,822,284
train_data['Model'].value_counts()<drop_column>
test['Country_Region']=l1.fit_transform(test['Country_Region']) test['Province_State']=l2.fit_transform(test['Province_State'] )
COVID19 Global Forecasting (Week 3)
8,822,284
train_data.drop('Model',axis=1,inplace=True) test_data.drop("Model",axis=1,inplace=True )<count_values>
y1=train[['ConfirmedCases']] y2=train[['Fatalities']] test_id=test['ForecastId'].values.tolist()
COVID19 Global Forecasting (Week 3)
8,822,284
train_data['Trim'].value_counts()<drop_column>
train.pop('Id') test.pop('ForecastId' )
COVID19 Global Forecasting (Week 3)
8,822,284
train_data.drop('Trim',inplace=True,axis=1) test_data.drop('Trim',inplace=True,axis=1 )<count_values>
features_cols=['Province_State','Country_Region','Day_of_Week','Month','Day','Day_of_Year','Week_of_Year','Quarter']
COVID19 Global Forecasting (Week 3)
8,822,284
train_data['SubModel'].value_counts()<drop_column>
train_x=train[features_cols] test_x = test[features_cols]
COVID19 Global Forecasting (Week 3)
8,822,284
train_data.drop('SubModel',inplace=True,axis=1) test_data.drop('SubModel',inplace=True,axis=1 )<count_values>
model_1=DecisionTreeClassifier() model_2=DecisionTreeClassifier() model_1.fit(train_x,y1) model_2.fit(train_x,y2 )
COVID19 Global Forecasting (Week 3)
8,822,284
train_data['Color'].value_counts()<count_values>
test_y1=model_1.predict(test_x) test_y2=model_2.predict(test_x) submission=pd.DataFrame(columns=submission.columns) submission['ForecastId']=test_id submission['ConfirmedCases']=test_y1 submission['Fatalities']=test_y2
COVID19 Global Forecasting (Week 3)
8,822,284
test_data['Color'].value_counts()<data_type_conversions>
submission.to_csv('submission.csv',index=False )
COVID19 Global Forecasting (Week 3)
8,761,948
train_data['Color'].fillna(value='Color_Unknown',inplace=True) test_data['Color'].fillna(value='Color_Unknown',inplace=True )<count_missing_values>
df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv' )
COVID19 Global Forecasting (Week 3)
8,761,948
print("Number of null values in Color column "+str(train_data['Color'].isnull().sum())) print("Number of null values in Color column "+str(test_data['Color'].isnull().sum()))<count_values>
df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True) df_train.loc[:, 'Date'] = df_train.Date.dt.strftime("%m%d") df_train["Date"] = df_train["Date"].astype(int) df_test.loc[:, 'Date'] = df_test.Date.dt.strftime("%m%d") df_test["Date"] = df_test["Date"].astype(int )
COVID19 Global Forecasting (Week 3)
8,761,948
train_data['Transmission'].value_counts()<count_values>
df_train['ConfirmedCases'] = df_train['ConfirmedCases'].apply(int) df_train['Fatalities'] = df_train['Fatalities'].apply(int )
COVID19 Global Forecasting (Week 3)
8,761,948
test_data['Transmission'].value_counts()<filter>
df_train['Province_State'] = df_train['Province_State'].fillna('unknown') df_test['Province_State'] = df_test['Province_State'].fillna('unknown' )
COVID19 Global Forecasting (Week 3)
8,761,948
train_data[train_data['Transmission']=='Manual']<rename_columns>
import xgboost as xgb from xgboost import XGBRegressor
COVID19 Global Forecasting (Week 3)
8,761,948
train_data['Transmission'].replace("Manual","MANUAL",inplace=True )<count_values>
features = ['Date'] submission = pd.DataFrame(columns=['ForecastId', 'ConfirmedCases', 'Fatalities']) for i in tqdm(df_train.Country_Region.unique()): c_train = df_train[df_train['Country_Region'] == i] c_test = df_test[df_test['Country_Region'] == i] for j in c_train.Province_State.unique() : p_train = c_train[c_train['Province_State'] == j] p_test = c_test[c_test['Province_State'] == j] x_train = p_train[features] y_train_cc = p_train['ConfirmedCases'] y_train_ft = p_train['Fatalities'] model = xgb.XGBRegressor(n_estimators=1000) model.fit(x_train, y_train_cc) y_pred_cc = model.predict(p_test[features]) model.fit(x_train, y_train_ft) y_pred_ft = model.predict(p_test[features]) p_test['ConfirmedCases'] = y_pred_cc p_test['Fatalities'] = y_pred_ft submission = pd.concat([submission, p_test[['ForecastId', 'ConfirmedCases', 'Fatalities']]], axis=0 )
COVID19 Global Forecasting (Week 3)
8,761,948
train_data['Transmission'].value_counts()<data_type_conversions>
submission.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,761,948
train_data['Transmission'].fillna(value="Transmission_unk",inplace=True) test_data['Transmission'].fillna(value="Transmission_unk",inplace=True )<count_values>
submission.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,807,084
train_data['WheelTypeID'].value_counts()<drop_column>
%matplotlib inline def sigmoid_sqrt_func(x, a, b, c, d, e): return c + d /(1.0 + np.exp(-a*x+b)) + e*x**0.5 def sigmoid_linear_func(x, a, b, c, d, e): return c + d /(1.0 + np.exp(-a*x+b)) + e*0.1*x def sigmoid_quad_func(x, a, b, c, d, e, f): return c + d /(1.0 + np.exp(-a*x+b)) + e*0.1*x + f*0.001*x*x def sigmoid_func(x, a, b, c, d): return c + d /(1.0 + np.exp(-a*x+b)) def exp_func(x, a, b, c, d): return c + d * np.exp(a*x+b) def func_fitting(y, func=sigmoid_func, x_scale=50.0, y_scale=10000.0, start_pred=8, AN=0, MAXN=60, PN=15, b=5): x = range(len(y)) x_real = np.array(x)/x_scale y_real = np.array(y)/y_scale x_train = x_real y_train = y_real def next_day_pred(AN, BN): x_train = x_real[AN:BN] y_train = y_real[AN:BN] popt, pcov = curve_fit(func, x_train, y_train, method='trf', maxfev=20000, p0=(1, 0, 0, 1), bounds=[(-b, -np.inf, -np.inf, -b),(b, np.inf, np.inf, b)], ) x_pred = np.array(range(MAXN)) /x_scale y_pred = func(x_pred, *popt) return x_pred, y_pred NP = start_pred y_pred = [np.nan]*NP y_pred_list = [] for BN in range(NP, len(y_real)) : x_pred, y_pred_ = next_day_pred(BN-PN, BN) y_pred.append(y_pred_[BN]) y_pred_list.append(y_pred_) for BN in range(len(y_real), len(y_pred_)) : y_pred.append(y_pred_[BN]) y_pred = np.array(y_pred) y_pred_list = np.array(y_pred_list) y_pred_std = np.std(y_pred_list[-2:], axis=0) return x_real*x_scale, y_real*y_scale, x_train*x_scale, y_train*y_scale, \ x_pred*x_scale, y_pred*y_scale, y_pred_std*y_scale def draw_figure(start_date, title, x_real, y_real, x_train, y_train, x_pred, y_pred, y_pred_std): def to_date(idx): idx = np.round(idx) return datetime.datetime.strptime(start_date, '%m/%d/%Y' ).date() + datetime.timedelta(days=idx) fig, ax1 = plt.subplots(figsize=[14, 7]) plot1 = ax1.plot(list(map(to_date, x_real)) , y_real, 'gs',label='original') plot2 = ax1.plot(list(map(to_date, x_pred)) , y_pred, 'r',label='predict') plot3 = ax1.fill_between(list(map(to_date, x_pred)) , np.maximum(0,(y_pred-y_pred_std)) , (y_pred+y_pred_std), alpha=0.2, edgecolor=' plot0 = ax1.plot(list(map(to_date, x_train)) , y_train, 'y.',label='history') ax2=ax1.twinx() ax2.plot(list(map(to_date, x_real)) [1:],(y_real[1:]-y_real[:-1]), '-s',label='original add') ax2.plot(list(map(to_date, x_pred)) [1:],(y_pred[1:]-y_pred[:-1]), '-',label='pred add') plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) plt.gca().xaxis.set_major_locator(mdates.DayLocator()) plt.gcf().autofmt_xdate() plt.xlabel('x') plt.ylabel('y') fig.legend(loc=2) plt.title(title) plt.savefig('{}.pdf'.format(title)) plt.show() date = list(map(to_date, x_pred)) pred = y_pred real = y_real for i in range(len(pred)) : if i < len(real): print('{}\t{:.0f}\t{:.0f}\t{:.3f}'.format(date[i], real[i], pred[i], np.abs(pred[i]-real[i])/real[i]*100)) else: print('{}\t-\t{:.0f}'.format(date[i], pred[i])) return pred
COVID19 Global Forecasting (Week 3)
8,807,084
train_data.drop('WheelTypeID',inplace=True,axis=1) test_data.drop('WheelTypeID',inplace=True,axis=1 )<count_values>
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') pred_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv') train_data = train_data.fillna(value='NULL') test_data = test_data.fillna(value='NULL' )
COVID19 Global Forecasting (Week 3)
8,807,084
train_data['WheelType'].value_counts()<count_values>
train_date_list = train_data.iloc[:, 3].unique() print(len(train_date_list)) print(train_date_list) test_date_list = test_data.iloc[:, 3].unique() print(len(test_date_list)) print(test_date_list) len(train_data.groupby(['Province_State', 'Country_Region'])) len(test_data.groupby(['Province_State', 'Country_Region']))
COVID19 Global Forecasting (Week 3)
8,807,084
test_data['WheelType'].value_counts()<data_type_conversions>
start_date = '01/22/2020' start_pred = 74 start_submit = 64 len_pred = 30 test_date_list = test_data.iloc[:, 3].unique() test_data_filled = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') test_data_filled = test_data_filled.fillna(value='NULL') test_data_filled['ConfirmedCases'] = pred_data['ConfirmedCases'] test_data_filled['Fatalities'] = pred_data['Fatalities'] for idx,(k, v)in enumerate(train_data.groupby(['Province_State', 'Country_Region'])) : print(idx, k, end=' ') b_cc, b_f = 5, 3 if k[1] == 'China': b_cc = 1 b_f = 1 if k[0] == 'Hong Kong': b_cc = 3 elif k[1] == 'Italy': b_cc = 10 b_f = 3 elif k[1] == 'US': b_cc = 5 b_f = 3 elif k[1] == 'Spain': b_cc = 4 b_f = 3 hist_num = v.loc[:,'ConfirmedCases'].tolist() ret = func_fitting(hist_num, y_scale=max(1000, np.max(hist_num)) , b=b_cc, start_pred=start_pred, PN=10, MAXN=len(hist_num)+len_pred) ret = list(ret) real_cc = np.round(np.array(ret[1])) pred_cc = np.round(np.array(ret[5])) for i in range(len(real_cc)) : pred_cc[i] = real_cc[i] pred_cc = pred_cc[start_submit:] print(pred_cc) hist_num = v.loc[:,'Fatalities'].tolist() ret = func_fitting(hist_num, y_scale=max(1000, np.max(hist_num)) , b=b_f, start_pred=start_pred, PN=10, MAXN=len(hist_num)+len_pred) ret = list(ret) real_f = np.round(np.array(ret[1])) pred_f = np.round(np.array(ret[5])) for i in range(len(real_f)) : pred_f[i] = real_f[i] pred_f = pred_f[start_submit:] print(pred_f) for i in range(14, len(pred_cc)) : if pred_f[i] < 20 and pred_cc[i] > 200 and k[1] != 'China': pred_f[i] = pred_cc[i] * 0.01 * np.log10(pred_cc[i]) print(pred_f) print(pred_cc[-1], pred_f[-1]) for i in range(len(pred_cc)) : index =(test_data_filled['Province_State'] == k[0])& \ (test_data_filled['Country_Region'] == k[1])& \ (test_data_filled['Date'] == test_date_list[i]) test_data_filled.loc[index, 'ConfirmedCases'] = pred_cc[i] test_data_filled.loc[index, 'Fatalities'] = pred_f[i]
COVID19 Global Forecasting (Week 3)
8,807,084
train_data['WheelType'].fillna(value='WheelType_unk',inplace=True) test_data['WheelType'].fillna(value='WheelType_unk',inplace=True )<count_values>
submission = test_data_filled.loc[:,['ForecastId', 'ConfirmedCases', 'Fatalities']]
COVID19 Global Forecasting (Week 3)
8,807,084
<count_values><EOS>
submission.to_csv("submission.csv", index=False) submission.head(500 )
COVID19 Global Forecasting (Week 3)
8,824,741
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<count_values>
pd.options.display.max_rows = 500 pd.options.display.max_columns = 500 %matplotlib inline def rmse(yt, yp): return np.sqrt(np.mean(( yt-yp)**2)) class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelAhmet(CovidModel): def preprocess(self, df, meta_df): df["Date"] = pd.to_datetime(df['Date']) df = df.merge(meta_df, on=self.loc_group, how="left") df["lat"] =(df["lat"] // 30 ).astype(np.float32 ).fillna(0) df["lon"] =(df["lon"] // 60 ).astype(np.float32 ).fillna(0) df["population"] = np.log1p(df["population"] ).fillna(-1) df["area"] = np.log1p(df["area"] ).fillna(-1) for col in self.loc_group: df[col].fillna("", inplace=True) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x)for x in zip(df['Country_Region'], df['Province_State'])] return df def get_model(self): def nn_block(input_layer, size, dropout_rate, activation): out_layer = KL.Dense(size, activation=None )(input_layer) out_layer = KL.Activation(activation )(out_layer) out_layer = KL.Dropout(dropout_rate )(out_layer) return out_layer ts_inp = KL.Input(shape=(len(self.ts_features),)) global_inp = KL.Input(shape=(len(self.global_features),)) inp = KL.concatenate([global_inp, ts_inp]) hidden_layer = nn_block(inp, 64, 0.0, "relu") gate_layer = nn_block(hidden_layer, 32, 0.0, "sigmoid") hidden_layer = nn_block(hidden_layer, 32, 0.0, "relu") hidden_layer = KL.multiply([hidden_layer, gate_layer]) out = KL.Dense(len(self.TARGETS), activation="linear" )(hidden_layer) model = tf.keras.models.Model(inputs=[global_inp, ts_inp], outputs=out) return model def get_input(self, df): return [df[self.global_features], df[self.ts_features]] def train_models(self, df, num_models=20, save=False): def custom_loss(y_true, y_pred): return K.sum(K.sqrt(K.sum(K.square(y_true - y_pred), axis=0, keepdims=True)))/len(self.TARGETS) models = [] for i in range(num_models): model = self.get_model() model.compile(loss=custom_loss, optimizer=Nadam(lr=1e-4)) hist = model.fit(self.get_input(df), df[self.TARGETS], batch_size=2048, epochs=200, verbose=0, shuffle=True) if save: model.save_weights("model{}.h5".format(i)) models.append(model) return models def predict_one(self, df): pred = np.zeros(( df.shape[0], 2)) for model in self.models: pred += model.predict(self.get_input(df)) /len(self.models) pred = np.maximum(pred, df[self.prev_targets].values) pred[:, 0] = np.log1p(np.expm1(pred[:, 0])+ 0.1) pred[:, 1] = np.log1p(np.expm1(pred[:, 1])+ 0.01) return np.clip(pred, None, 15) def __init__(self): df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") meta_df = pd.read_csv(".. /input/covid19-forecasting-metadata/region_metadata.csv") self.loc_group = ["Province_State", "Country_Region"] df = self.preprocess(df, meta_df) sub_df = self.preprocess(sub_df, meta_df) df = df.merge(sub_df[["ForecastId", "Date", "geo"]], how="left", on=["Date", "geo"]) df = df.append(sub_df[sub_df["Date"] > df["Date"].max() ], sort=False) df["day"] = df["day"] - df["day"].min() self.TARGETS = ["ConfirmedCases", "Fatalities"] self.prev_targets = ['prev_ConfirmedCases_1', 'prev_Fatalities_1'] for col in self.TARGETS: df[col] = np.log1p(df[col]) self.NUM_SHIFT = 7 self.global_features = ["lat", "lon", "population", "area"] self.ts_features = [] for s in range(1, self.NUM_SHIFT+1): for col in self.TARGETS: df["prev_{}_{}".format(col, s)] = df.groupby(self.loc_group)[col].shift(s) self.ts_features.append("prev_{}_{}".format(col, s)) self.df = df[df["Date"] >= df["Date"].min() + timedelta(days=self.NUM_SHIFT)].copy() def predict_first_day(self, day): self.models = self.train_models(self.df[self.df["day"] < day]) temp_df = self.df.loc[self.df["day"] == day].copy() y_pred = self.predict_one(temp_df) self.y_prevs = [None]*self.NUM_SHIFT for i in range(1, self.NUM_SHIFT): self.y_prevs[i] = temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]].values temp_df[self.TARGETS] = y_pred self.day = day return temp_df[["geo", "day"] + self.TARGETS] def predict_next_day(self, yesterday_pred_df): self.day = self.day + 1 temp_df = self.df.loc[self.df["day"] == self.day].copy() yesterday_pred_df = temp_df[["geo"]].merge(yesterday_pred_df[["geo"] + self.TARGETS], on="geo", how="left") temp_df[self.prev_targets] = yesterday_pred_df[self.TARGETS].values for i in range(2, self.NUM_SHIFT+1): temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]] = self.y_prevs[i-1] y_pred, self.y_prevs = self.predict_one(temp_df), [None, temp_df[self.prev_targets].values] + self.y_prevs[1:-1] temp_df[self.TARGETS] = y_pred return temp_df[["geo", "day"] + self.TARGETS]
COVID19 Global Forecasting (Week 3)
8,824,741
test_data['Nationality'].value_counts()<data_type_conversions>
class CovidModelCPMP(CovidModel): def __init__(self): train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv') test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min self.min_test_val_day = test.day.min() self.max_test_val_day = train.day.max() self.max_test_day = test.day.max() train['ForecastId'] = -1 test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 data = pd.concat([train, test[test.day > self.max_test_val_day][train.columns] ] ).reset_index(drop=True) self.data = data self.train = train self.test = test self.dates = data[data['geo'] == 'France_'].Date.values region_meta = pd.read_csv('.. /input/covid19-forecasting-metadata/region_metadata.csv') region_meta['Province_State'].fillna('', inplace=True) region_meta['geo'] = ['_'.join(x)for x in zip(region_meta['Country_Region'], region_meta['Province_State'],)] population = data[['geo']].merge(region_meta, how='left', on='geo' ).fillna(0) population = population.groupby('geo')[['population']].first() population['population'] = np.log1p(population['population']) self.population = population[['population']].values continents = region_meta['continent'] continents = pd.factorize(continents)[0] continents_ids_base = continents.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) self.continents_ids_base = ohe.fit_transform(continents_ids_base) self.geo_data = data.pivot(index='geo', columns='day', values='ForecastId') self.num_geo = self.geo_data.shape[0] self.ConfirmedCases = data.pivot(index='geo', columns='day', values='ConfirmedCases') self.Fatalities = data.pivot(index='geo', columns='day', values='Fatalities') self.cases = np.log1p(self.ConfirmedCases.values) self.deaths = np.log1p(self.Fatalities.values) self.case_threshold = 30 self.c_case = 10 self.t_case = 100 self.c_death = 10 self.t_death = 5 time_cases = self.c_case *(self.cases >= np.log1p(self.t_case)) time_cases = np.cumsum(time_cases, axis=1) self.time_cases = 1 * np.log1p(time_cases) time_deaths = self.c_death *(self.deaths >= np.log1p(self.t_death)) time_deaths = np.cumsum(time_deaths, axis=1) self.time_deaths = 1 *np.log1p(time_deaths) countries = [g.split('_')[0] for g in self.geo_data.index] countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) self.country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) self.start_lag_death = 13 self.end_lag_death = 5 self.num_train = 5 self.num_lag_case = 14 self.lag_period = max(self.start_lag_death, self.num_lag_case) self.df = train[['geo', 'day', 'ConfirmedCases', 'Fatalities']].copy() self.df.ConfirmedCases = np.log1p(self.df.ConfirmedCases) self.df.Fatalities = np.log1p(self.df.Fatalities) def get_country_ids(self): countries = [g.split('_')[0] for g in self.geo_data.index] countries = pd.factorize(countries)[0] countries[self.cases[:, :self.last_train+1].max(axis=1)< np.log1p(self.case_threshold)] = -1 countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) return country_ids_base def val_score(self, true, pred): return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) def get_dataset(self, start_pred, num_train): days = np.arange(start_pred - num_train + 1, start_pred + 1) lag_cases = np.vstack([self.cases[:, d - self.lag_period : d] for d in days]) lag_deaths = np.vstack([self.deaths[:, d - self.lag_period : d] for d in days]) target_cases = np.vstack([self.cases[:, d : d + 1] for d in days]) target_deaths = np.vstack([self.deaths[:, d : d + 1] for d in days]) continents_ids = np.vstack([self.continents_ids_base for d in days]) country_ids = np.vstack([self.country_ids_base for d in days]) population = np.vstack([self.population for d in days]) time_case = np.vstack([self.time_cases[:, d - 1: d ] for d in days]) time_death = np.vstack([self.time_deaths[:, d - 1 : d ] for d in days]) return(lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days) def update_time(self, time_death, time_case, pred_death, pred_case): new_time_death = np.expm1(time_death)+ self.c_death *(pred_death >= np.log1p(self.t_death)) new_time_death = 1 *np.log1p(new_time_death) new_time_case = np.expm1(time_case)+ self.c_case *(pred_case >= np.log1p(self.t_case)) new_time_case = 1 *np.log1p(new_time_case) return new_time_death, new_time_case def update_valid_dataset(self, dataset, pred_death, pred_case, pred_day): (lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days)= dataset if pred_day != days[-1]: print('error', pred_day, days[-1]) return None day = days[-1] + 1 new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case]) new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death]) new_target_cases = self.cases[:, day:day+1] new_target_deaths = self.deaths[:, day:day+1] new_continents_ids = continents_ids new_country_ids = country_ids new_population = population new_time_death, new_time_case = self.update_time(time_death, time_case, pred_death, pred_case) new_days = 1 + days return(new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_continents_ids, new_country_ids, new_population, new_time_case, new_time_death, new_days) def fit_eval(self, dataset, fit): (lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days)= dataset X_death = np.hstack([lag_cases[:, -self.start_lag_death:-self.end_lag_death], lag_deaths[:, -self.num_lag_case:], country_ids, continents_ids, population, time_case, time_death, ]) y_death = target_deaths y_death_prev = lag_deaths[:, -1:] if fit: self.lr_death.fit(X_death, y_death) y_pred_death = self.lr_death.predict(X_death) y_pred_death = np.maximum(y_pred_death, y_death_prev) X_case = np.hstack([lag_cases[:, -self.num_lag_case:], country_ids, continents_ids, population, time_case, ]) y_case = target_cases y_case_prev = lag_cases[:, -1:] if fit: self.lr_case.fit(X_case, y_case) y_pred_case = self.lr_case.predict(X_case) y_pred_case = np.maximum(y_pred_case, y_case_prev) return y_pred_death, y_pred_case def get_pred_df(self, val_death_preds, val_case_preds,): pred_deaths = self.Fatalities.iloc[:, self.start_val:self.start_val+self.num_val].copy() pred_deaths.iloc[:, :] = val_death_preds pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = self.ConfirmedCases.iloc[:, self.start_val:self.start_val+self.num_val].copy() pred_cases.iloc[:, :] = val_case_preds pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = self.data[['geo', 'day']] sub = sub[sub.day == self.start_val] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[(sub.day >= self.start_val)&(sub.day <= self.end_val)] return sub def predict_first_day(self, day): self.start_val = day self.end_val = day + 1 self.num_val = self.end_val - self.start_val + 1 score = True self.last_train = self.start_val - 1 print(self.dates[self.last_train], self.start_val, self.num_val) self.country_ids_base = self.get_country_ids() train_data = self.get_dataset(self.last_train, self.num_train) alpha = 3 self.lr_death = Ridge(alpha=alpha, fit_intercept=True) self.lr_case = Ridge(alpha=alpha, fit_intercept=True) _ = self.fit_eval(train_data, fit=True) self.valid_data = self.get_dataset(self.start_val, 1) val_death_preds, val_case_preds = self.fit_eval(self.valid_data, fit=False) df = self.get_pred_df(val_death_preds, val_case_preds) return df def predict_next_day(self, yesterday_pred_df): yesterday_pred_df = yesterday_pred_df.sort_values(by='geo' ).reset_index(drop=True) if yesterday_pred_df.day.nunique() != 1: print('error', yesterday_pred_df.day.unique()) return None pred_death = yesterday_pred_df[['Fatalities']].values pred_case = yesterday_pred_df[['ConfirmedCases']].values pred_day = yesterday_pred_df.day.unique() [0] new_valid_data = self.update_valid_dataset(self.valid_data, pred_death, pred_case, pred_day) if len(new_valid_data)> 0: self.valid_data = new_valid_data self.start_val = pred_day + 1 self.end_val = pred_day + 2 val_death_preds, val_case_preds = self.fit_eval(self.valid_data, fit=False) df = self.get_pred_df(val_death_preds, val_case_preds) return df
COVID19 Global Forecasting (Week 3)
8,824,741
train_data['Nationality'].fillna(value='Nationality_unk',inplace=True) test_data['Nationality'].fillna(value='Nationality_unk',inplace=True )<count_values>
class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed print('Lag:', lag, 'Seed:', seed) train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1)) [:10] print('Last Date in Train:',self.maxdate, 'Test first Date:',self.testdate) train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[ test.Date > train.Date.max() ].copy() train = pd.concat(( train, publictest), sort=False) train.sort_values(['Country_Region','Province_State','Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region','Province_State','Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0)|(train.ForecastId.notnull())|(train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('.. /input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region','Province_State','Date','Type','Reference'] dt = dt.loc[ dt.Date == dt.Date ] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid','Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2 ).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3 ).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4 ).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5 ).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7 ).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10 ).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12 ).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16 ).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20 ).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2 ).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3 ).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4 ).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5 ).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7 ).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10 ).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12 ).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16 ).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20 ).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1*(df['Country_Region'] == 'China') df['flag_US'] = 1*(df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1*(df['cid'] == 'Kosovo_') df['flag_Korea'] = 1*(df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1*(df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1*(df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1*(df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1*(df['cid'] == 'Ghana_') df['flag_Togo_'] = 1*(df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1*(df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1*(df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1*(df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1*(df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1*(df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1*(df['cid'] == 'Honduras_') df['flag_Bangladesh']= 1*(df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1*(df['cid'] == 'Paraguay_') tr = df.loc[ df.day < valid_day ].copy() vl = df.loc[ df.day == valid_day ].copy() tr = tr.loc[ tr.lag0_1 > 0 ].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max' ).reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max' ).reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid' , how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid' , how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return tr, vl def train_models(self, valid_day = 10): train = self.train.copy() train.loc[(train.cid=='China_Guizhou')&(train.Date=='2020-03-17'), 'target0' ] = np.log1p(146) train.loc[(train.cid=='Guyana_')&(train.Date>='2020-03-22')&(train.Date<='2020-03-30'), 'target0' ] = np.log1p(12) train.loc[(train.cid=='US_Virgin Islands')&(train.Date>='2020-03-29')&(train.Date<='2020-03-29'), 'target0' ] = np.log1p(24) train.loc[(train.cid=='US_Virgin Islands')&(train.Date>='2020-03-30')&(train.Date<='2020-03-30'), 'target0' ] = np.log1p(27) train.loc[(train.cid=='Iceland_')&(train.Date>='2020-03-15')&(train.Date<='2020-03-15'), 'target1' ] = np.log1p(0) train.loc[(train.cid=='Kazakhstan_')&(train.Date>='2020-03-20')&(train.Date<='2020-03-20'), 'target1' ] = np.log1p(0) train.loc[(train.cid=='Serbia_')&(train.Date>='2020-03-26')&(train.Date<='2020-03-26'), 'target1' ] = np.log1p(5) train.loc[(train.cid=='Serbia_')&(train.Date>='2020-03-27')&(train.Date<='2020-03-27'), 'target1' ] = np.log1p(6) train.loc[(train.cid=='Slovakia_')&(train.Date>='2020-03-22')&(train.Date<='2020-03-31'), 'target1' ] = np.log1p(1) train.loc[(train.cid=='US_Hawaii')&(train.Date>='2020-03-25')&(train.Date<='2020-03-31'), 'target1' ] = np.log1p(1) param = { 'subsample': 0.9850, 'colsample_bytree': 0.850, 'max_depth': 6, 'gamma': 0.000, 'learning_rate': 0.010, 'min_child_weight': 5.00, 'reg_alpha': 0.000, 'reg_lambda': 0.400, 'silent':1, 'objective':'reg:squarederror', 'nthread': 12, 'seed': self.seed } tr, vl = self.create_features(train.copy() , valid_day) features = [f for f in tr.columns if f not in [ 'Id','ConfirmedCases','Fatalities','log0','log1','target0','target1','ypred0','ypred1','Province_State','Country_Region','Date','ForecastId','cid','geo','day', 'GDP_region','TRUE POPULATION','pct_in_largest_city',' TFR ',' Avg_age ','latitude','longitude','abs_latitude','temperature', 'humidity', 'Personality_pdi','Personality_idv','Personality_mas','Personality_uai','Personality_ltowvs','Personality_assertive','personality_perform','personality_agreeableness', 'murder','High_rises','max_high_rises','AIR_CITIES','AIR_AVG','continent_gdp_pc','continent_happiness','continent_generosity','continent_corruption','continent_Life_expectancy' ] ] self.features = features nrounds0 = 630 nrounds1 = 630 dtrain = xgb.DMatrix(tr[features], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed+1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[features], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed+1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features]) ypred0 =(self.model0.predict(dvalid)+ self.model1.predict(dvalid)) / 2 ypred1 =(self.model2.predict(dvalid)+ self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[ self.vl.ypred0<self.vl.log0_max, 'ypred0'] = self.vl.loc[ self.vl.ypred0<self.vl.log0_max, 'log0_max'] self.vl.loc[ self.vl.ypred1<self.vl.log1_max, 'ypred1'] = self.vl.loc[ self.vl.ypred1<self.vl.log1_max, 'log1_max'] VALID = self.vl[["geo", "day", 'ypred0', 'ypred1']].copy() VALID.columns = ["geo", "day", 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo','day'] self.train[ 'ypred0' ] = pd.merge(self.train[feats], yesterday[feats+['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[ self.train.ypred0.notnull() , 'target0'] = self.train.loc[ self.train.ypred0.notnull() , 'ypred0'] self.train[ 'ypred1' ] = pd.merge(self.train[feats], yesterday[feats+['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[ self.train.ypred1.notnull() , 'target1'] = self.train.loc[ self.train.ypred1.notnull() , 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy() , self.day) dvalid = xgb.DMatrix(vl[self.features]) ypred0 =(self.model0.predict(dvalid)+ self.model1.predict(dvalid)) /2 ypred1 =(self.model2.predict(dvalid)+ self.model3.predict(dvalid)) /2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[ vl.ypred0<vl.log0_max, 'ypred0'] = vl.loc[ vl.ypred0<vl.log0_max, 'log0_max'] vl.loc[ vl.ypred1<vl.log1_max, 'ypred1'] = vl.loc[ vl.ypred1<vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[["geo", "day", 'ypred0', 'ypred1']].copy() VALID.columns = ["geo", "day", 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True )
COVID19 Global Forecasting (Week 3)
8,824,741
train_data['Size'].value_counts()<count_values>
TARGETS = ["ConfirmedCases", "Fatalities"] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") def preprocess(df): for col in ["Country_Region", "Province_State"]: df[col].fillna("", inplace=True) df["Date"] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x)for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df["day"] -= df["day"].min() df["day"] -= df["day"].min()
COVID19 Global Forecasting (Week 3)
8,824,741
test_data['Size'].value_counts()<data_type_conversions>
TEST_FIRST = sub_df[sub_df["Date"] > df["Date"].max() ]["Date"].min() print(TEST_FIRST) TEST_DAYS =(sub_df["Date"].max() - TEST_FIRST ).days + 1 TEST_FIRST =(TEST_FIRST - df["Date"].min() ).days print(TEST_FIRST, TEST_DAYS) def get_blend(pred_dfs, weights, verbose=True): if verbose: for n1, n2 in [("cpmp", "giba"),("cpmp", "ahmet"),("giba", "ahmet")]: for t in TARGETS: print(n1, n2, t, np.round(rmse(pred_dfs[n1][t], pred_dfs[n2][t]), 4)) blend_df = pred_dfs["cpmp"].copy() blend_df[TARGETS] = 0 for name, pred_df in pred_dfs.items() : blend_df[TARGETS] += weights[name]*pred_df[TARGETS].values return blend_df cov_models = {"ahmet": CovidModelAhmet() , "cpmp": CovidModelCPMP() , 'giba': CovidModelGIBA() } weights = {"ahmet": 0.35, "cpmp": 0.30, "giba": 0.35} pred_dfs = {name: cm.predict_first_day(TEST_FIRST ).sort_values("geo")for name, cm in cov_models.items() } blend_df = get_blend(pred_dfs, weights) eval_df = blend_df.copy() for d in range(1, TEST_DAYS): pred_dfs = {name: cm.predict_next_day(blend_df ).sort_values("geo")for name, cm in cov_models.items() } blend_df = get_blend(pred_dfs, weights) eval_df = eval_df.append(blend_df) print(d, eval_df.shape, flush=True )
COVID19 Global Forecasting (Week 3)
8,824,741
train_data['Size'].fillna(value='Size_unk',inplace=True) test_data['Size'].fillna(value="Size_unk",inplace=True )<count_values>
print(sub_df.shape) sub_df = sub_df.merge(df.append(eval_df, sort=False), on=["geo", "day"], how="left") print(sub_df.shape) print(sub_df[TARGETS].isnull().mean() )
COVID19 Global Forecasting (Week 3)
8,824,741
<count_values><EOS>
sub_df[TARGETS] = np.expm1(sub_df[TARGETS].values) sub_df.to_csv("submission.csv", index=False, columns=["ForecastId"] + TARGETS) sub_df.head()
COVID19 Global Forecasting (Week 3)
8,827,057
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<data_type_conversions>
pd.options.display.max_rows = 500 pd.options.display.max_columns = 500 %matplotlib inline def rmse(yt, yp): return np.sqrt(np.mean(( yt-yp)**2)) class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelAhmet(CovidModel): def preprocess(self, df, meta_df): df["Date"] = pd.to_datetime(df['Date']) df = df.merge(meta_df, on=self.loc_group, how="left") df["lat"] =(df["lat"] // 30 ).astype(np.float32 ).fillna(0) df["lon"] =(df["lon"] // 60 ).astype(np.float32 ).fillna(0) df["population"] = np.log1p(df["population"] ).fillna(-1) df["area"] = np.log1p(df["area"] ).fillna(-1) for col in self.loc_group: df[col].fillna("", inplace=True) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x)for x in zip(df['Country_Region'], df['Province_State'])] return df def get_model(self): def nn_block(input_layer, size, dropout_rate, activation): out_layer = KL.Dense(size, activation=None )(input_layer) out_layer = KL.Activation(activation )(out_layer) out_layer = KL.Dropout(dropout_rate )(out_layer) return out_layer ts_inp = KL.Input(shape=(len(self.ts_features),)) global_inp = KL.Input(shape=(len(self.global_features),)) inp = KL.concatenate([global_inp, ts_inp]) hidden_layer = nn_block(inp, 64, 0.0, "relu") gate_layer = nn_block(hidden_layer, 32, 0.0, "sigmoid") hidden_layer = nn_block(hidden_layer, 32, 0.0, "relu") hidden_layer = KL.multiply([hidden_layer, gate_layer]) out = KL.Dense(len(self.TARGETS), activation="linear" )(hidden_layer) model = tf.keras.models.Model(inputs=[global_inp, ts_inp], outputs=out) return model def get_input(self, df): return [df[self.global_features], df[self.ts_features]] def train_models(self, df, num_models=20, save=False): def custom_loss(y_true, y_pred): return K.sum(K.sqrt(K.sum(K.square(y_true - y_pred), axis=0, keepdims=True)))/len(self.TARGETS) models = [] for i in range(num_models): model = self.get_model() model.compile(loss=custom_loss, optimizer=Nadam(lr=1e-4)) hist = model.fit(self.get_input(df), df[self.TARGETS], batch_size=2048, epochs=200, verbose=0, shuffle=True) if save: model.save_weights("model{}.h5".format(i)) models.append(model) return models def predict_one(self, df): pred = np.zeros(( df.shape[0], 2)) for model in self.models: pred += model.predict(self.get_input(df)) /len(self.models) pred = np.maximum(pred, df[self.prev_targets].values) pred[:, 0] = np.log1p(np.expm1(pred[:, 0])+ 0.1) pred[:, 1] = np.log1p(np.expm1(pred[:, 1])+ 0.01) return np.clip(pred, None, 15) def __init__(self): df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") meta_df = pd.read_csv(".. /input/covid19-forecasting-metadata/region_metadata.csv") self.loc_group = ["Province_State", "Country_Region"] df = self.preprocess(df, meta_df) sub_df = self.preprocess(sub_df, meta_df) df = df.merge(sub_df[["ForecastId", "Date", "geo"]], how="left", on=["Date", "geo"]) df = df.append(sub_df[sub_df["Date"] > df["Date"].max() ], sort=False) df["day"] = df["day"] - df["day"].min() self.TARGETS = ["ConfirmedCases", "Fatalities"] self.prev_targets = ['prev_ConfirmedCases_1', 'prev_Fatalities_1'] for col in self.TARGETS: df[col] = np.log1p(df[col]) self.NUM_SHIFT = 7 self.global_features = ["lat", "lon", "population", "area"] self.ts_features = [] for s in range(1, self.NUM_SHIFT+1): for col in self.TARGETS: df["prev_{}_{}".format(col, s)] = df.groupby(self.loc_group)[col].shift(s) self.ts_features.append("prev_{}_{}".format(col, s)) self.df = df[df["Date"] >= df["Date"].min() + timedelta(days=self.NUM_SHIFT)].copy() def predict_first_day(self, day): self.models = self.train_models(self.df[self.df["day"] < day]) temp_df = self.df.loc[self.df["day"] == day].copy() y_pred = self.predict_one(temp_df) self.y_prevs = [None]*self.NUM_SHIFT for i in range(1, self.NUM_SHIFT): self.y_prevs[i] = temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]].values temp_df[self.TARGETS] = y_pred self.day = day return temp_df[["geo", "day"] + self.TARGETS] def predict_next_day(self, yesterday_pred_df): self.day = self.day + 1 temp_df = self.df.loc[self.df["day"] == self.day].copy() yesterday_pred_df = temp_df[["geo"]].merge(yesterday_pred_df[["geo"] + self.TARGETS], on="geo", how="left") temp_df[self.prev_targets] = yesterday_pred_df[self.TARGETS].values for i in range(2, self.NUM_SHIFT+1): temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]] = self.y_prevs[i-1] y_pred, self.y_prevs = self.predict_one(temp_df), [None, temp_df[self.prev_targets].values] + self.y_prevs[1:-1] temp_df[self.TARGETS] = y_pred return temp_df[["geo", "day"] + self.TARGETS]
COVID19 Global Forecasting (Week 3)
8,827,057
train_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True) test_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True )<count_values>
class CovidModelCPMP(CovidModel): def __init__(self): train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv') test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min self.min_test_val_day = test.day.min() self.max_test_val_day = train.day.max() self.max_test_day = test.day.max() train['ForecastId'] = -1 test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 data = pd.concat([train, test[test.day > self.max_test_val_day][train.columns] ] ).reset_index(drop=True) self.data = data self.train = train self.test = test self.dates = data[data['geo'] == 'France_'].Date.values region_meta = pd.read_csv('.. /input/covid19-forecasting-metadata/region_metadata.csv') region_meta['Province_State'].fillna('', inplace=True) region_meta['geo'] = ['_'.join(x)for x in zip(region_meta['Country_Region'], region_meta['Province_State'],)] population = data[['geo']].merge(region_meta, how='left', on='geo' ).fillna(0) population = population.groupby('geo')[['population']].first() population['population'] = np.log1p(population['population']) self.population = population[['population']].values continents = region_meta['continent'] continents = pd.factorize(continents)[0] continents_ids_base = continents.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) self.continents_ids_base = ohe.fit_transform(continents_ids_base) self.geo_data = data.pivot(index='geo', columns='day', values='ForecastId') self.num_geo = self.geo_data.shape[0] self.ConfirmedCases = data.pivot(index='geo', columns='day', values='ConfirmedCases') self.Fatalities = data.pivot(index='geo', columns='day', values='Fatalities') self.cases = np.log1p(self.ConfirmedCases.values) self.deaths = np.log1p(self.Fatalities.values) self.case_threshold = 30 self.c_case = 10 self.t_case = 100 self.c_death = 10 self.t_death = 5 time_cases = self.c_case *(self.cases >= np.log1p(self.t_case)) time_cases = np.cumsum(time_cases, axis=1) self.time_cases = 1 * np.log1p(time_cases) time_deaths = self.c_death *(self.deaths >= np.log1p(self.t_death)) time_deaths = np.cumsum(time_deaths, axis=1) self.time_deaths = 1 *np.log1p(time_deaths) countries = [g.split('_')[0] for g in self.geo_data.index] countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) self.country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) self.start_lag_death = 13 self.end_lag_death = 5 self.num_train = 5 self.num_lag_case = 14 self.lag_period = max(self.start_lag_death, self.num_lag_case) self.df = train[['geo', 'day', 'ConfirmedCases', 'Fatalities']].copy() self.df.ConfirmedCases = np.log1p(self.df.ConfirmedCases) self.df.Fatalities = np.log1p(self.df.Fatalities) def get_country_ids(self): countries = [g.split('_')[0] for g in self.geo_data.index] countries = pd.factorize(countries)[0] countries[self.cases[:, :self.last_train+1].max(axis=1)< np.log1p(self.case_threshold)] = -1 countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) return country_ids_base def val_score(self, true, pred): return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) def get_dataset(self, start_pred, num_train): days = np.arange(start_pred - num_train + 1, start_pred + 1) lag_cases = np.vstack([self.cases[:, d - self.lag_period : d] for d in days]) lag_deaths = np.vstack([self.deaths[:, d - self.lag_period : d] for d in days]) target_cases = np.vstack([self.cases[:, d : d + 1] for d in days]) target_deaths = np.vstack([self.deaths[:, d : d + 1] for d in days]) continents_ids = np.vstack([self.continents_ids_base for d in days]) country_ids = np.vstack([self.country_ids_base for d in days]) population = np.vstack([self.population for d in days]) time_case = np.vstack([self.time_cases[:, d - 1: d ] for d in days]) time_death = np.vstack([self.time_deaths[:, d - 1 : d ] for d in days]) return(lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days) def update_time(self, time_death, time_case, pred_death, pred_case): new_time_death = np.expm1(time_death)+ self.c_death *(pred_death >= np.log1p(self.t_death)) new_time_death = 1 *np.log1p(new_time_death) new_time_case = np.expm1(time_case)+ self.c_case *(pred_case >= np.log1p(self.t_case)) new_time_case = 1 *np.log1p(new_time_case) return new_time_death, new_time_case def update_valid_dataset(self, dataset, pred_death, pred_case, pred_day): (lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days)= dataset if pred_day != days[-1]: print('error', pred_day, days[-1]) return None day = days[-1] + 1 new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case]) new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death]) new_target_cases = self.cases[:, day:day+1] new_target_deaths = self.deaths[:, day:day+1] new_continents_ids = continents_ids new_country_ids = country_ids new_population = population new_time_death, new_time_case = self.update_time(time_death, time_case, pred_death, pred_case) new_days = 1 + days return(new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_continents_ids, new_country_ids, new_population, new_time_case, new_time_death, new_days) def fit_eval(self, dataset, fit): (lag_cases, lag_deaths, target_cases, target_deaths, continents_ids, country_ids, population, time_case, time_death, days)= dataset X_death = np.hstack([lag_cases[:, -self.start_lag_death:-self.end_lag_death], lag_deaths[:, -self.num_lag_case:], country_ids, continents_ids, population, time_case, time_death, ]) y_death = target_deaths y_death_prev = lag_deaths[:, -1:] if fit: self.lr_death.fit(X_death, y_death) y_pred_death = self.lr_death.predict(X_death) y_pred_death = np.maximum(y_pred_death, y_death_prev) X_case = np.hstack([lag_cases[:, -self.num_lag_case:], country_ids, continents_ids, population, time_case, ]) y_case = target_cases y_case_prev = lag_cases[:, -1:] if fit: self.lr_case.fit(X_case, y_case) y_pred_case = self.lr_case.predict(X_case) y_pred_case = np.maximum(y_pred_case, y_case_prev) return y_pred_death, y_pred_case def get_pred_df(self, val_death_preds, val_case_preds,): pred_deaths = self.Fatalities.iloc[:, self.start_val:self.start_val+self.num_val].copy() pred_deaths.iloc[:, :] = val_death_preds pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = self.ConfirmedCases.iloc[:, self.start_val:self.start_val+self.num_val].copy() pred_cases.iloc[:, :] = val_case_preds pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = self.data[['geo', 'day']] sub = sub[sub.day == self.start_val] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[(sub.day >= self.start_val)&(sub.day <= self.end_val)] return sub def predict_first_day(self, day): self.start_val = day self.end_val = day + 1 self.num_val = self.end_val - self.start_val + 1 score = True self.last_train = self.start_val - 1 print(self.dates[self.last_train], self.start_val, self.num_val) self.country_ids_base = self.get_country_ids() train_data = self.get_dataset(self.last_train, self.num_train) alpha = 3 self.lr_death = Ridge(alpha=alpha, fit_intercept=True) self.lr_case = Ridge(alpha=alpha, fit_intercept=True) _ = self.fit_eval(train_data, fit=True) self.valid_data = self.get_dataset(self.start_val, 1) val_death_preds, val_case_preds = self.fit_eval(self.valid_data, fit=False) df = self.get_pred_df(val_death_preds, val_case_preds) return df def predict_next_day(self, yesterday_pred_df): yesterday_pred_df = yesterday_pred_df.sort_values(by='geo' ).reset_index(drop=True) if yesterday_pred_df.day.nunique() != 1: print('error', yesterday_pred_df.day.unique()) return None pred_death = yesterday_pred_df[['Fatalities']].values pred_case = yesterday_pred_df[['ConfirmedCases']].values pred_day = yesterday_pred_df.day.unique() [0] new_valid_data = self.update_valid_dataset(self.valid_data, pred_death, pred_case, pred_day) if len(new_valid_data)> 0: self.valid_data = new_valid_data self.start_val = pred_day + 1 self.end_val = pred_day + 2 val_death_preds, val_case_preds = self.fit_eval(self.valid_data, fit=False) df = self.get_pred_df(val_death_preds, val_case_preds) return df
COVID19 Global Forecasting (Week 3)
8,827,057
train_data['PRIMEUNIT'].value_counts()<count_values>
class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed print('Lag:', lag, 'Seed:', seed) train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1)) [:10] print('Last Date in Train:',self.maxdate, 'Test first Date:',self.testdate) train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[ test.Date > train.Date.max() ].copy() train = pd.concat(( train, publictest), sort=False) train.sort_values(['Country_Region','Province_State','Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region','Province_State','Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0)|(train.ForecastId.notnull())|(train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('.. /input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region','Province_State','Date','Type','Reference'] dt = dt.loc[ dt.Date == dt.Date ] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid','Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df = df.loc[ df.day>=(valid_day-50)].copy() df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag0_8'] = df.groupby('cid')['target0'].shift(8) df['lag0_8'] = df.groupby('cid')['lag0_8'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2 ).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3 ).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4 ).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5 ).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7 ).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10 ).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12 ).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16 ).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20 ).mean().values df['m9'] = df.groupby('cid')['lag0_1'].rolling(25 ).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2 ).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3 ).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4 ).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5 ).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7 ).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10 ).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12 ).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16 ).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20 ).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['m9'] = df.groupby('cid')['m9'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1*(df['Country_Region'] == 'China') df['flag_US'] = 1*(df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1*(df['cid'] == 'Kosovo_') df['flag_Korea'] = 1*(df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1*(df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1*(df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1*(df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1*(df['cid'] == 'Ghana_') df['flag_Togo_'] = 1*(df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1*(df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1*(df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1*(df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1*(df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1*(df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1*(df['cid'] == 'Honduras_') df['flag_Bangladesh']= 1*(df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1*(df['cid'] == 'Paraguay_') tr = df.loc[ df.day < valid_day ].copy() vl = df.loc[ df.day == valid_day ].copy() tr = tr.loc[ tr.lag0_1 > 0 ].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max' ).reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max' ).reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid' , how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid' , how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return tr, vl def train_models(self, valid_day = 10): train = self.train.copy() train.loc[(train.cid=='China_Guizhou')&(train.Date=='2020-03-17'), 'target0' ] = np.log1p(146) train.loc[(train.cid=='Guyana_')&(train.Date>='2020-03-22')&(train.Date<='2020-03-30'), 'target0' ] = np.log1p(12) train.loc[(train.cid=='US_Virgin Islands')&(train.Date>='2020-03-29')&(train.Date<='2020-03-29'), 'target0' ] = np.log1p(24) train.loc[(train.cid=='US_Virgin Islands')&(train.Date>='2020-03-30')&(train.Date<='2020-03-30'), 'target0' ] = np.log1p(27) train.loc[(train.cid=='Iceland_')&(train.Date>='2020-03-15')&(train.Date<='2020-03-15'), 'target1' ] = np.log1p(0) train.loc[(train.cid=='Kazakhstan_')&(train.Date>='2020-03-20')&(train.Date<='2020-03-20'), 'target1' ] = np.log1p(0) train.loc[(train.cid=='Serbia_')&(train.Date>='2020-03-26')&(train.Date<='2020-03-26'), 'target1' ] = np.log1p(5) train.loc[(train.cid=='Serbia_')&(train.Date>='2020-03-27')&(train.Date<='2020-03-27'), 'target1' ] = np.log1p(6) train.loc[(train.cid=='Slovakia_')&(train.Date>='2020-03-22')&(train.Date<='2020-03-31'), 'target1' ] = np.log1p(1) train.loc[(train.cid=='US_Hawaii')&(train.Date>='2020-03-25')&(train.Date<='2020-03-31'), 'target1' ] = np.log1p(1) param = { 'subsample': 1.000, 'colsample_bytree': 0.85, 'max_depth': 6, 'gamma': 0.000, 'learning_rate': 0.010, 'min_child_weight': 5.00, 'reg_alpha': 0.000, 'reg_lambda': 0.400, 'silent':1, 'objective':'reg:squarederror', 'nthread': 12, 'seed': self.seed } tr, vl = self.create_features(train.copy() , valid_day) features = [f for f in tr.columns if f not in [ 'lag0_8', 'Id','ConfirmedCases','Fatalities','log0','log1','target0','target1','ypred0','ypred1','Province_State','Country_Region','Date','ForecastId','cid','geo','day', 'GDP_region','TRUE POPULATION','pct_in_largest_city',' TFR ',' Avg_age ','latitude','longitude','abs_latitude','temperature', 'humidity', 'Personality_pdi','Personality_idv','Personality_mas','Personality_uai','Personality_ltowvs','Personality_assertive','personality_perform','personality_agreeableness', 'murder','High_rises','max_high_rises','AIR_CITIES','AIR_AVG','continent_gdp_pc','continent_happiness','continent_generosity','continent_corruption','continent_Life_expectancy' ] ] self.features0 = features features = [f for f in tr.columns if f not in [ 'm0','m1','m2','m3', 'Id','ConfirmedCases','Fatalities','log0','log1','target0','target1','ypred0','ypred1','Province_State','Country_Region','Date','ForecastId','cid','geo','day', 'GDP_region','TRUE POPULATION','pct_in_largest_city',' TFR ',' Avg_age ','latitude','longitude','abs_latitude','temperature', 'humidity', 'Personality_pdi','Personality_idv','Personality_mas','Personality_uai','Personality_ltowvs','Personality_assertive','personality_perform','personality_agreeableness', 'murder','High_rises','max_high_rises','AIR_CITIES','AIR_AVG','continent_gdp_pc','continent_happiness','continent_generosity','continent_corruption','continent_Life_expectancy' ] ] self.features1 = features nrounds0 = 680 nrounds1 = 630 dtrain = xgb.DMatrix(tr[self.features0], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed+1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[self.features1], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed+1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features0]) ypred0 =(self.model0.predict(dvalid)+ self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(self.vl[self.features1]) ypred1 =(self.model2.predict(dvalid)+ self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[ self.vl.ypred0<self.vl.log0_max, 'ypred0'] = self.vl.loc[ self.vl.ypred0<self.vl.log0_max, 'log0_max'] self.vl.loc[ self.vl.ypred1<self.vl.log1_max, 'ypred1'] = self.vl.loc[ self.vl.ypred1<self.vl.log1_max, 'log1_max'] VALID = self.vl[["geo", "day", 'ypred0', 'ypred1']].copy() VALID.columns = ["geo", "day", 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo','day'] self.train[ 'ypred0' ] = pd.merge(self.train[feats], yesterday[feats+['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[ self.train.ypred0.notnull() , 'target0'] = self.train.loc[ self.train.ypred0.notnull() , 'ypred0'] self.train[ 'ypred1' ] = pd.merge(self.train[feats], yesterday[feats+['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[ self.train.ypred1.notnull() , 'target1'] = self.train.loc[ self.train.ypred1.notnull() , 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy() , self.day) dvalid = xgb.DMatrix(vl[self.features0]) ypred0 =(self.model0.predict(dvalid)+ self.model1.predict(dvalid)) /2 dvalid = xgb.DMatrix(vl[self.features1]) ypred1 =(self.model2.predict(dvalid)+ self.model3.predict(dvalid)) /2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[ vl.ypred0<vl.log0_max, 'ypred0'] = vl.loc[ vl.ypred0<vl.log0_max, 'log0_max'] vl.loc[ vl.ypred1<vl.log1_max, 'ypred1'] = vl.loc[ vl.ypred1<vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[["geo", "day", 'ypred0', 'ypred1']].copy() VALID.columns = ["geo", "day", 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True )
COVID19 Global Forecasting (Week 3)
8,827,057
test_data['PRIMEUNIT'].value_counts()<data_type_conversions>
TARGETS = ["ConfirmedCases", "Fatalities"] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") def preprocess(df): for col in ["Country_Region", "Province_State"]: df[col].fillna("", inplace=True) df["Date"] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x)for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df["day"] -= df["day"].min() df["day"] -= df["day"].min()
COVID19 Global Forecasting (Week 3)
8,827,057
train_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True) test_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True )<categorify>
print(sub_df.shape) sub_df = sub_df.merge(df.append(eval_df, sort=False), on=["geo", "day"], how="left") print(sub_df.shape) print(sub_df[TARGETS].isnull().mean() )
COVID19 Global Forecasting (Week 3)
8,827,057
train_data['AUCGUART'].replace("AGREEN","GREEN",inplace=True) test_data['AUCGUART'].replace("ARED","RED",inplace=True )<data_type_conversions>
flat = [ 'China_Anhui', 'China_Beijing', 'China_Chongqing', 'China_Fujian', 'China_Gansu', 'China_Guangdong', 'China_Guangxi', 'China_Guizhou', 'China_Hainan', 'China_Hebei', 'China_Heilongjiang', 'China_Henan', 'China_Hubei', 'China_Hunan', 'China_Jiangsu', 'China_Jiangxi', 'China_Jilin', 'China_Liaoning', 'China_Ningxia', 'China_Qinghai', 'China_Shaanxi', 'China_Shandong', 'China_Shanxi', 'China_Sichuan', 'China_Tibet', 'China_Xinjiang', 'China_Yunnan', 'China_Zhejiang', 'Diamond Princess_', 'Holy See_', ]
COVID19 Global Forecasting (Week 3)
8,827,057
train_data['AUCGUART'].fillna(value="AUC_unk",inplace=True) test_data['AUCGUART'].fillna(value="AUC_unk",inplace=True )<drop_column>
dt = sub_df.loc[ sub_df.Date_x == "2020-04-07" ].copy() dt = dt.loc[ dt.geo.isin(flat)].copy() dt = dt[['geo','Date_x','day','ConfirmedCases','Fatalities']].copy() dt = dt.reset_index(drop=True) dt
COVID19 Global Forecasting (Week 3)
8,827,057
train_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1) test_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1 )<drop_column>
sub_df['ow0'] = pd.merge(sub_df, dt, on='geo', how='left')['ConfirmedCases_y'].values sub_df['ow1'] = pd.merge(sub_df, dt, on='geo', how='left')['Fatalities_y'].values sub_df.tail(60 )
COVID19 Global Forecasting (Week 3)
8,827,057
train_data.drop('PurchDate',axis=1,inplace=True) test_data.drop("PurchDate",axis=1,inplace=True )<drop_column>
sub_df.loc[ sub_df.geo.isin(flat)]
COVID19 Global Forecasting (Week 3)
8,827,057
train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object'<drop_column>
sub_df.loc[ sub_df.ow0.notnull() &(sub_df.Date_x >= '2020-04-08'), 'ConfirmedCases' ] = sub_df.loc[ sub_df.ow0.notnull() &(sub_df.Date_x >= '2020-04-08'), 'ow0' ] sub_df.loc[ sub_df.ow1.notnull() &(sub_df.Date_x >= '2020-04-08'), 'Fatalities' ] = sub_df.loc[ sub_df.ow1.notnull() &(sub_df.Date_x >= '2020-04-08'), 'ow1' ]
COVID19 Global Forecasting (Week 3)
8,827,057
not_categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object']<feature_engineering>
sub_df.loc[ sub_df.geo.isin(flat)]
COVID19 Global Forecasting (Week 3)
8,827,057
for i in not_categorical: maximum=np.max(train_data[i]) train_data[i]=train_data[i]/maximum maximum_test=np.max(test_data[i]) test_data[i]=test_data[i]/maximum_test<drop_column>
sub_df[TARGETS] = np.expm1(sub_df[TARGETS].values) sub_df.to_csv("submission.csv", index=False, columns=["ForecastId"] + TARGETS) sub_df.head()
COVID19 Global Forecasting (Week 3)
8,825,239
categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes=='object']<filter>
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2))) pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )
COVID19 Global Forecasting (Week 3)
8,825,239
train_data[categorical[0]]<categorify>
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature]
COVID19 Global Forecasting (Week 3)
8,825,239
pd.get_dummies(train_data[categorical[0]] )<categorify>
pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data )
COVID19 Global Forecasting (Week 3)
8,825,239
<feature_engineering><EOS>
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy() submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,820,802
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<feature_engineering>
import os from typing import Dict, List, Tuple from joblib import Parallel, delayed import pandas as pd import numpy as np from scipy.optimize.minpack import curve_fit from scipy.optimize import least_squares from xgboost import XGBRegressor
COVID19 Global Forecasting (Week 3)
8,820,802
for i in test_data.columns: if i not in train_data.columns: train_data[i]=np.zeros(len(train_data))<drop_column>
def load_kaggle_csv(dataset: str, datadir: str)-> pd.DataFrame: df = pd.read_csv( f"{os.path.join(datadir,dataset)}.csv", parse_dates=["Date"] ) df['country'] = df["Country_Region"] if "Province_State" in df: df["Country_Region"] = np.where( df["Province_State"].isnull() , df["Country_Region"], df["Country_Region"] + "_" + df["Province_State"], ) df.drop(columns="Province_State", inplace=True) if "ConfirmedCases" in df: df["ConfirmedCases"] = df.groupby("Country_Region")[ "ConfirmedCases" ].cummax() if "Fatalities" in df: df["Fatalities"] = df.groupby("Country_Region")["Fatalities"].cummax() if not "DayOfYear" in df: df["DayOfYear"] = df["Date"].dt.dayofyear df["Date"] = df["Date"].dt.date return df def RMSLE(actual: np.ndarray, prediction: np.ndarray)-> float: return np.sqrt( np.mean( np.power(np.log1p(np.maximum(0, prediction)) - np.log1p(actual), 2) ) ) def get_extra_features(df): df['school_closure_status_daily'] = np.where(df['school_closure'] < df['Date'], 1, 0) df['school_closure_first_fatality'] = np.where(df['school_closure'] < df['first_1Fatalities'], 1, 0) df['school_closure_first_10cases'] = np.where(df['school_closure'] < df['first_10ConfirmedCases'], 1, 0) df['case_delta1_10'] =(df['first_10ConfirmedCases'] - df['first_1ConfirmedCases'] ).dt.days df['case_death_delta1'] =(df['first_1Fatalities'] - df['first_1ConfirmedCases'] ).dt.days df['case_delta1_100'] =(df['first_100ConfirmedCases'] - df['first_1ConfirmedCases'] ).dt.days df['days_since'] = df['DayOfYear']-df['case1_DayOfYear'] df['weekday'] = pd.to_datetime(df['Date'] ).dt.weekday col = df.isnull().mean() rm_null_col = col[col > 0.2].index.tolist() return df def dateparse(x): try: return pd.datetime.strptime(x, '%Y-%m-%d') except: return pd.NaT def prepare_lat_long(df): df["Country_Region"] = np.where( df["Province/State"].isnull() , df["Country/Region"], df["Country/Region"] + "_" + df["Province/State"], ) return df[['Country_Region', 'Lat', 'Long']].drop_duplicates()
COVID19 Global Forecasting (Week 3)
8,820,802
test_data=test_data[train_data.drop("IsBadBuy",axis=1 ).columns]<prepare_x_and_y>
df_lat = prepare_lat_long(pd.read_csv("/kaggle/input/inputlat-long/lat_long.csv")) train = load_kaggle_csv("train", "/kaggle/input/covid19-global-forecasting-week-3") country_health_indicators =( (pd.read_csv("/kaggle/input/country-health-indicators/country_health_indicators_v3.csv", parse_dates=['first_1ConfirmedCases', 'first_10ConfirmedCases', 'first_50ConfirmedCases', 'first_100ConfirmedCases', 'first_1Fatalities', 'school_closure'], date_parser=dateparse)).rename( columns ={'Country_Region':'country'})) train =(pd.merge(train, country_health_indicators, on="country", how="left")).merge(df_lat, on='Country_Region', how='left') train = get_extra_features(train) train.head(3 )
COVID19 Global Forecasting (Week 3)
8,820,802
X=train_data.drop(['RefId','IsBadBuy'],axis=1) y=train_data['IsBadBuy']<split>
test = load_kaggle_csv("test", "/kaggle/input/covid19-global-forecasting-week-3") test =(pd.merge( test, country_health_indicators, on="country", how="left")).merge( df_lat, on ='Country_Region', how='left') test = get_extra_features(test) del country_health_indicators
COVID19 Global Forecasting (Week 3)
8,820,802
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42 )<import_modules>
def logistic(x: np.ndarray, x0: float, L: float, k: float)-> np.ndarray: return L /(1 + np.exp(-k *(x - x0))) def fit_single_logistic(x: np.ndarray, y: np.ndarray, maxfev: float)-> Tuple: p0 = [np.median(x), y[-1], 0.1] pn0 = p0 *(np.random.random(len(p0)) + [0.5, 1.0, 0.5]) try: params, pcov = curve_fit( logistic, x, y, p0=pn0, maxfev=maxfev, sigma=np.maximum(1, np.sqrt(y)) *(0.1 + 0.9 * np.random.random()), bounds=([0, y[-1], 0.01], [200, 1e6, 1.5]), ) pcov = pcov[np.triu_indices_from(pcov)] except(RuntimeError, ValueError): params = p0 pcov = np.zeros(len(p0)*(len(p0)- 1)) y_hat = logistic(x, *params) rmsle = RMSLE(y_hat, y) return(params, pcov, rmsle, y_hat) def fit_logistic( df: pd.DataFrame, n_jobs: int = 8, n_samples: int = 80, maxfev: int = 8000, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], )-> pd.DataFrame: def fit_one(df: pd.DataFrame, y_col: str)-> Dict: best_rmsle = None best_params = None x = df[x_col].to_numpy() y = df[y_col].to_numpy() for(params, cov, rmsle, y_hat)in Parallel(n_jobs=n_jobs )( delayed(fit_single_logistic )(x, y, maxfev=maxfev) for i in range(n_samples) ): if rmsle >=(best_rmsle or rmsle): best_rmsle = rmsle best_params = params result = {f"{y_col}_rmsle": best_rmsle} result.update({f"{y_col}_p_{i}": p for i, p in enumerate(best_params)}) return result result = {} for y_col in y_cols: result.update(fit_one(df, y_col)) return pd.DataFrame([result]) def predict_logistic( df: pd.DataFrame, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], ): def predict_one(col): df[f"yhat_logistic_{col}"] = logistic( df[x_col].to_numpy() , df[f"{col}_p_0"].to_numpy() , df[f"{col}_p_1"].to_numpy() , df[f"{col}_p_2"].to_numpy() , ) for y_col in y_cols: predict_one(y_col )
COVID19 Global Forecasting (Week 3)
8,820,802
from sklearn.neighbors import KNeighborsClassifier<import_modules>
train = pd.merge( train, train.groupby( ["Country_Region"], observed=True, sort=False ).apply(lambda x: fit_logistic(x, n_jobs=8, n_samples=80, maxfev=16000)).reset_index() , on=["Country_Region"], how="left") predict_logistic(train )
COVID19 Global Forecasting (Week 3)
8,820,802
from sklearn.neighbors import KNeighborsClassifier<import_modules>
def apply_xgb_model(train, x_columns, y_column, xgb_params): X = train[x_columns].to_numpy() y = train[y_column].to_numpy() xgb_fit = XGBRegressor(**xgb_params ).fit(X, y) y_hat = xgb_fit.predict(X) train[f"yhat_xgb_{y_column}"] = y_hat return RMSLE(y, y_hat), xgb_fit
COVID19 Global Forecasting (Week 3)
8,820,802
from sklearn.neighbors import KNeighborsClassifier<train_model>
xgb_params_c = dict( gamma=0.1, learning_rate=0.35, n_estimators=221, max_depth=15, min_child_weight=1, nthread=8, objective="reg:squarederror") xgb_params_f = dict( gamma=0.1022, learning_rate=0.338, n_estimators=292, max_depth=14, min_child_weight=1, nthread=8, objective="reg:squarederror") x_columns = ['DayOfYear', 'Diabetes, blood, & endocrine diseases(%)', 'Respiratory diseases(%)', 'Diarrhea & common infectious diseases(%)', 'Nutritional deficiencies(%)', 'obesity - adult prevalence rate', 'pneumonia-death-rates', 'animal_fats', 'animal_products', 'eggs', 'offals', 'treenuts', 'vegetable_oils', 'nbr_surgeons', 'nbr_anaesthesiologists', 'population', 'school_shutdown_1case', 'school_shutdown_10case', 'school_shutdown_50case', 'school_shutdown_1death', 'case1_DayOfYear', 'case10_DayOfYear', 'case50_DayOfYear', 'school_closure_status_daily', 'case_delta1_10', 'case_death_delta1', 'case_delta1_100', 'days_since','Lat','Long','weekday', 'yhat_logistic_ConfirmedCases', 'yhat_logistic_Fatalities' ] xgb_c_rmsle, xgb_c_fit = apply_xgb_model(train, x_columns, "ConfirmedCases", xgb_params_c) xgb_f_rmsle, xgb_f_fit = apply_xgb_model(train, x_columns, "Fatalities", xgb_params_f )
COVID19 Global Forecasting (Week 3)
8,820,802
KNN=KNeighborsClassifier(n_neighbors=11) KNN.fit(X_train,y_train )<choose_model_class>
def interpolate(alpha, x0, x1): return x0 * alpha + x1 *(1 - alpha) def RMSLE_interpolate(alpha, y, x0, x1): return RMSLE(y, interpolate(alpha, x0, x1)) def fit_hybrid( train: pd.DataFrame, y_cols: List[str] = ["ConfirmedCases", "Fatalities"] )-> pd.DataFrame: def fit_one(y_col: str): opt = least_squares( fun=RMSLE_interpolate, args=( train[y_col], train[f"yhat_logistic_{y_col}"], train[f"yhat_xgb_{y_col}"], ), x0=(0.5,), bounds=(( 0.0),(1.0,)) , ) return {f"{y_col}_alpha": opt.x[0], f"{y_col}_cost": opt.cost} result = {} for y_col in y_cols: result.update(fit_one(y_col)) return pd.DataFrame([result]) def predict_hybrid( df: pd.DataFrame, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], ): def predict_one(col): df[f"yhat_hybrid_{col}"] = interpolate( df[f"{y_col}_alpha"].to_numpy() , df[f"yhat_logistic_{y_col}"].to_numpy() , df[f"yhat_xgb_{y_col}"].to_numpy() , ) for y_col in y_cols: predict_one(y_col )
COVID19 Global Forecasting (Week 3)
8,820,802
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=11, p=2, weights='uniform' )<compute_test_metric>
train = pd.merge( train, train.groupby(["Country_Region"], observed=True, sort=False) .apply(lambda x: fit_hybrid(x)) .reset_index() , on=["Country_Region"], how="left", )
COVID19 Global Forecasting (Week 3)
8,820,802
KNN.score(X_test,y_test )<predict_on_test>
predict_hybrid(train )
COVID19 Global Forecasting (Week 3)