kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
8,864,825 | df_test = pd.read_csv('.. /input/walmart-recruiting-store-sales-forecasting/test.csv.zip', sep=',')
test = df_test.merge(df_features_stores, how='inner', on=['Store','Date','IsHoliday'])
test.head()<data_type_conversions> | train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv')
test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv')
submission = pd.read_csv('.. /input/covid19-global-forecasting-week-3/submission.csv')
clean_train = pd.read_csv('.. /input/clean-data/clean_train.csv')
clean_test = pd.read_csv('.. /input/clean-data/clean_test.csv' ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train['Week'] = train['Date'].dt.isocalendar().week
test['Week'] = test['Date'].dt.isocalendar().week
train['Year'] = train['Date'].dt.isocalendar().year
test['Year'] = test['Date'].dt.isocalendar().year<data_type_conversions> | tr_p = train[train['Province_State'].notnull() ]
tr_p['Country_Province'] = tr_p['Country_Region'] + '_' + tr_p['Province_State']
tr_np = train[train['Province_State'].isnull() ]
tr_np['Country_Province'] = tr_np['Country_Region'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | train['Date'] = pd.to_datetime(train['Date'])
train['Type'] = train['Type'].apply(lambda x: 3 if x == 'A' else(2 if x == 'B' else 1))
train['IsHoliday'] = train['IsHoliday'].apply(lambda x: 1 if x == True else 0)
cols = train.columns.drop(['Date'])
train[cols] = train[cols].apply(pd.to_numeric, errors='coerce' )<data_type_conversions> | train1 = pd.concat([tr_np,tr_p])
train1.drop(['Province_State','Country_Region'], axis=1,inplace = True ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | test['Date'] = pd.to_datetime(test['Date'])
test['Type'] = test['Type'].apply(lambda x: 3 if x == 'A' else(2 if x == 'B' else 1))
test['IsHoliday'] = test['IsHoliday'].apply(lambda x: 1 if x == True else 0)
cols = test.columns.drop(['Date'])
test[cols] = test[cols].apply(pd.to_numeric, errors='coerce' )<remove_duplicates> | te_p = test[test['Province_State'].notnull() ]
te_p['Country_Province'] = te_p['Country_Region'] + '_' + te_p['Province_State']
te_np = test[test['Province_State'].isnull() ]
te_np['Country_Province'] = te_np['Country_Region'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | holiday_train = train[['Date','Week','Year','IsHoliday']]
holiday_train = holiday_train.loc[holiday_train['IsHoliday']==True].drop_duplicates()
holiday_test = test[['Date','Week','Year','IsHoliday']]
holiday_test = holiday_test.loc[holiday_test['IsHoliday']==True].drop_duplicates()
holidays = pd.concat([holiday_train, holiday_test])
holidays<categorify> | test1 = pd.concat([te_np,te_p])
test1.drop(['Province_State','Country_Region'], axis=1,inplace = True ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | def holiday_type(x):
if(x['IsHoliday']== 1)&(x['Week']==6):
return 1
elif(x['IsHoliday']== 1)&(x['Week']==36):
return 2
elif(x['IsHoliday']== 1)&(x['Week']==47):
return 3
elif(x['IsHoliday']== 1)&(x['Week']==52):
return 4
else:
return 0<feature_engineering> | train1['Date'] = pd.to_datetime(train1['Date'])
test1['Date'] = pd.to_datetime(test1['Date'] ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | train['IsHoliday'] = train.apply(holiday_type, axis=1)
train['IsHoliday'].unique()<feature_engineering> | train1['Country_Province'] = train1['Country_Province'].astype('category')
test1['Country_Province'] = test1['Country_Province'].astype('category' ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | test['IsHoliday'] = test.apply(holiday_type, axis=1)
test['IsHoliday'].unique()<categorify> | cl_tr1 = clean_train[['Country_Region','Province_State','Lat','Long','firstcase','density','medianage','urbanpop','hospibed','lung','avgtemp','avghumidity']] | COVID19 Global Forecasting (Week 3) |
8,864,825 | train = train.replace('None', np.nan)
train = train.replace('NaN', np.nan)
train = train.replace('NaT', np.nan)
train = train.replace('', np.nan)
train_nulls =(train.isnull().sum(axis = 0)/len(train)) *100
train_nulls<categorify> | cl_tr1.drop_duplicates(subset=None, keep='first', inplace=True ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | test = test.replace('None', np.nan)
test = test.replace('NaN', np.nan)
test = test.replace('NaT', np.nan)
test = test.replace('', np.nan)
test_nulls =(test.isnull().sum(axis = 0)/len(test)) *100
test_nulls<data_type_conversions> | cl_p = cl_tr1[cl_tr1['Province_State'].notnull() ]
cl_p['Country_Province'] = cl_p['Country_Region'] + '_' + cl_p['Province_State']
cl_np = cl_tr1[cl_tr1['Province_State'].isnull() ]
cl_np['Country_Province'] = cl_np['Country_Region'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | train = train.fillna(0)
test = test.fillna(0)
train.isnull().sum()<groupby> | cl_tr = pd.concat([cl_p,cl_np])
cl_tr.drop(['Country_Region','Province_State'], axis = 1, inplace = True ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | weekly_sales = train.groupby(['Year','Week'] ).agg({'Weekly_Sales': ['mean', 'median']})
weekly_sales2010 = train.loc[train['Year']==2010].groupby(['Week'] ).agg({'Weekly_Sales': ['mean', 'median']})
weekly_sales2011 = train.loc[train['Year']==2011].groupby(['Week'] ).agg({'Weekly_Sales': ['mean', 'median']})
weekly_sales2012 = train.loc[train['Year']==2012].groupby(['Week'] ).agg({'Weekly_Sales': ['mean', 'median']} )<create_dataframe> | train_cl = pd.merge(train1,cl_tr)
test_cl = pd.merge(test1,cl_tr ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | sample_weight = train['IsHoliday'].apply(lambda x: 1 if x==0 else 5)
sample_weight_frame = pd.DataFrame(sample_weight, index=train.index )<compute_test_metric> | train_cl['Country_Province'] = train_cl['Country_Province'].astype('category')
test_cl['Country_Province'] = test_cl['Country_Province'].astype('category' ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | def WMAE(y_test, y_pred):
y_pred_df = pd.DataFrame(y_pred,index=y_test.index)
weights_5 = sample_weight_frame.loc[(y_test.index)].loc[sample_weight_frame.IsHoliday==5].index
weights_1 = sample_weight_frame.loc[(y_test.index)].loc[sample_weight_frame.IsHoliday==1].index
sum_5 = np.sum(5*(abs(y_test.loc[weights_5].values-y_pred_df.loc[weights_5].values)))
sum_1 = np.sum(abs(y_test.loc[weights_1].values-y_pred_df.loc[weights_1].values))
return np.round(( sum_5+sum_1)/(5*len(weights_5)+len(weights_1)) ,2)
my_score = make_scorer(WMAE,greater_is_better=False )<drop_column> | train_cl['firstcase'] = pd.to_datetime(train_cl['firstcase'])
test_cl['firstcase'] = pd.to_datetime(test_cl['firstcase'] ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | train_all = train.drop(['Date'],axis=1)
train_all<prepare_x_and_y> | train_cl['jan']="2020-01-01"
train_cl['jan'] = pd.to_datetime(train_cl['jan'])
test_cl['jan']="2020-01-01"
test_cl['jan'] = pd.to_datetime(test_cl['jan'])
train_cl['days_since_jan1'] = train_cl['Date']-train_cl['jan']
test_cl['days_since_jan1'] = test_cl['Date']-test_cl['jan'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | y_train_all = train_all.loc[:, ['Weekly_Sales']]
x_train_all = train_all.drop(['Weekly_Sales'], axis=1 )<split> | for i in range(len(train_cl)) :
train_cl['days_since_jan1'][i]=train_cl['days_since_jan1'][i].days
for i in range(len(test_cl)) :
test_cl['days_since_jan1'][i]=test_cl['days_since_jan1'][i].days | COVID19 Global Forecasting (Week 3) |
8,864,825 | x_train, x_test, y_train, y_test = train_test_split(x_train_all, y_train_all, test_size=0.2, random_state=0)
print(x_train.shape)
print(x_test.shape )<define_search_space> | train_cl['days_since_firstcase'] = train_cl['firstcase']-train_cl['Date']
test_cl['days_since_firstcase'] = test_cl['firstcase']-test_cl['Date'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | clf = RandomForestRegressor(random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('clf', clf)])
param_grid = [ {
'clf':[RandomForestRegressor() ],
'clf__n_estimators': [50,100,150],
'clf__max_depth': [10,20,30]
},
{
'clf': [ExtraTreesRegressor() ],
'clf__n_estimators': [50,100,150],
'clf__max_depth': [10,20,30]
},
{
'clf': [XGBRegressor() ],
'clf__learning_rate':[0.1,0.05],
'clf__min_samples_split':[5,7,9],
'clf__max_depth':[10,20,30]
}
]
rscv_all_tree = RandomizedSearchCV(pipe, param_grid, cv = 3, scoring = my_score, n_jobs=-1)
model_all_tree = rscv_all_tree.fit(x_train, y_train )<find_best_params> | for i in range(len(train_cl)) :
train_cl['days_since_firstcase'][i]=train_cl['days_since_firstcase'][i].days
for i in range(len(test_cl)) :
test_cl['days_since_firstcase'][i]=test_cl['days_since_firstcase'][i].days | COVID19 Global Forecasting (Week 3) |
8,864,825 | rscv_all_tree.best_estimator_<predict_on_test> | cols = ['days_since_jan1','days_since_firstcase']
for col in cols:
train_cl[col] = train_cl[col].astype('int64')
test_cl[col] = test_cl[col].astype('int64' ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | y_pred = rscv_all_tree.best_estimator_.predict(x_test)
print('WMAE:', WMAE(y_test, y_pred))<drop_column> | train_clean_cases = train_cl[['Lat', 'Long','density', 'medianage', 'urbanpop',
'hospibed','lung', 'avgtemp', 'avghumidity','days_since_jan1', 'days_since_firstcase']]
test_clean_cases = test_cl[['Lat', 'Long','density', 'medianage', 'urbanpop',
'hospibed','lung', 'avgtemp', 'avghumidity','days_since_jan1', 'days_since_firstcase']] | COVID19 Global Forecasting (Week 3) |
8,864,825 | train_relevant = train.drop(['Date','Temperature','Fuel_Price','MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5','CPI','Unemployment'],axis=1)
train_relevant<prepare_x_and_y> | train_clean_fatal = train_cl[['Lat', 'Long','density', 'medianage', 'urbanpop',
'hospibed','lung', 'avgtemp', 'avghumidity','days_since_jan1', 'days_since_firstcase','ConfirmedCases']]
test_clean_fatal = test_cl[['Lat', 'Long','density', 'medianage', 'urbanpop',
'hospibed','lung', 'avgtemp', 'avghumidity','days_since_jan1', 'days_since_firstcase']] | COVID19 Global Forecasting (Week 3) |
8,864,825 | y_relevant = train_relevant.loc[:, ['Weekly_Sales']]
x_relevant = train_relevant.drop(['Weekly_Sales'], axis=1 )<split> | train_y1 = train_cl['ConfirmedCases']
train_y2 = train_cl['Fatalities'] | COVID19 Global Forecasting (Week 3) |
8,864,825 | x_train_relevant, x_test_relevant, y_train_relevant, y_test_relevant = train_test_split(x_relevant, y_relevant, test_size=0.2, random_state=0)
print(x_train_relevant.shape)
print(x_test_relevant.shape )<set_options> | dt_1=DecisionTreeRegressor(max_depth=30,max_features=8,min_samples_split=2,min_samples_leaf=1)
dt_2=DecisionTreeRegressor(max_depth=30,max_features=8,min_samples_split=2,min_samples_leaf=1)
dt_1.fit(train_clean_cases,train_y1)
dt_2.fit(train_clean_fatal,train_y2 ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | clf = RandomForestRegressor(random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('clf', clf)])
param_grid = [ {
'clf':[RandomForestRegressor() ],
'clf__n_estimators': [50,100,150],
'clf__max_depth': [10,20,30]
},
{
'clf': [ExtraTreesRegressor() ],
'clf__n_estimators': [50,100,150],
'clf__max_depth': [10,20,30]
},
{
'clf': [XGBRegressor() ],
'clf__learning_rate':[0.1,0.05],
'clf__min_samples_split':[5,7,9],
'clf__max_depth':[10,20,30]
}
]
rscv_relevant_tree = RandomizedSearchCV(pipe, param_grid, cv = 3, scoring = my_score, n_jobs=-1)
model_relevant_tree = rscv_relevant_tree.fit(x_train_relevant, y_train_relevant )<find_best_params> | dt_train_cases_pred = dt_1.predict(train_clean_cases)
dt_train_fatal_pred = dt_2.predict(train_clean_fatal ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | rscv_relevant_tree.best_estimator_<predict_on_test> | dt_mse_train_cases = mean_squared_error(dt_train_cases_pred, train_y1)
dt_rmse_train_cases = np.sqrt(dt_mse_train_cases)
print("DT Regression MSE on train cases: %.4f" %dt_mse_train_cases)
print('DT Regression RMSE on train cases: %.4f' % dt_rmse_train_cases ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | y_pred= rscv_relevant_tree.best_estimator_.predict(x_test_relevant)
print('WMAE:', WMAE(y_test_relevant, y_pred))<define_search_space> | dt_mse_train_fatalities = mean_squared_error(dt_train_fatal_pred, train_y2)
dt_rmse_train_fatalities = np.sqrt(dt_mse_train_fatalities)
print("DT Regression MSE on train fatalities: %.4f" %dt_mse_train_cases)
print('DT Regression RMSE on train fatalities: %.4f' % dt_rmse_train_cases ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | clf = RandomForestRegressor(random_state=0)
pipe = Pipeline(steps=[('clf', clf)])
param_grid_rf = [ {
'clf':[RandomForestRegressor() ],
'clf__n_estimators': [140,150,160],
'clf__max_depth': [25,30,35],
'clf__max_features': ['auto',5,6]
}
]
gscv_rf1 = GridSearchCV(pipe, param_grid_rf, cv = 3, scoring = my_score, n_jobs=-1)
model_rf1 = gscv_rf1.fit(x_train_relevant, y_train_relevant )<find_best_params> | dt_test_cases_pred = dt_1.predict(test_clean_cases)
dt_test_cases_pred = np.where(dt_test_cases_pred<0,0,np.rint(dt_test_cases_pred)) | COVID19 Global Forecasting (Week 3) |
8,864,825 | gscv_rf1.best_estimator_<predict_on_test> | test_clean_fatal['ConfirmedCases']= dt_test_cases_pred | COVID19 Global Forecasting (Week 3) |
8,864,825 | y_pred_rf = gscv_rf1.best_estimator_.predict(x_test_relevant)
print('WMAE:', WMAE(y_test_relevant, y_pred_rf))<drop_column> | dt_test_fatal_pred = dt_2.predict(test_clean_fatal ) | COVID19 Global Forecasting (Week 3) |
8,864,825 | date = test['Date']
test = test.drop(['Date'], axis=1 )<predict_on_test> | submission['ForecastId'] = test_cl['ForecastId']
submission['ConfirmedCases'] = dt_test_cases_pred
submission['Fatalities'] = dt_test_fatal_pred | COVID19 Global Forecasting (Week 3) |
8,864,825 | test_relevant = test.drop(['Temperature','Fuel_Price','MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5','CPI', 'Unemployment'],axis=1)
test_relevant = test_relevant.sort_values(['Store', 'Dept'], ascending=[True, True])
y_pred_rf = gscv_rf1.best_estimator_.predict(test_relevant )<sort_values> | submission.to_csv('submission.csv',index=False ) | COVID19 Global Forecasting (Week 3) |
8,808,306 | test_relevant['Date'] = date
test_relevant = test_relevant.sort_values(['Store', 'Dept'], ascending=[True, True])
test_relevant['Weekly_Sales'] = y_pred_rf
test_relevant<load_from_csv> | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) ) | COVID19 Global Forecasting (Week 3) |
8,808,306 | sampleSubmission = pd.read_csv('.. /input/walmart-recruiting-store-sales-forecasting/sampleSubmission.csv.zip', sep=',' )<save_to_csv> | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv")
train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv")
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] ) | COVID19 Global Forecasting (Week 3) |
8,808,306 | sampleSubmission['Weekly_Sales'] = y_pred_rf
sampleSubmission.to_csv('submission.csv',index=False)
sampleSubmission<set_options> | feature_day = [1,20,50,100,200,500,1000,2000,5000,10000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 50):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val | COVID19 Global Forecasting (Week 3) |
8,808,306 | %matplotlib inline
<load_from_csv> | country = "Ukraine"
df_country = df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()
df_country | COVID19 Global Forecasting (Week 3) |
8,808,306 | <count_values><EOS> | submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | <SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<feature_engineering> | import pandas as pd
import datetime
import lightgbm as lgb
import numpy as np
from sklearn import preprocessing | COVID19 Global Forecasting (Week 3) |
8,803,205 | df_train['boilerplate'].replace(to_replace=r'"title":', value="",inplace=True,regex=True)
df_train['boilerplate'].replace(to_replace=r'"url":',value="",inplace=True,regex=True)
df_train['boilerplate'].replace(to_replace=r'{|}',value="",inplace=True,regex=True)
df_train['boilerplate']=df_train['boilerplate'].str.lower()
df_test['boilerplate'].replace(to_replace=r'"title":', value="",inplace=True,regex=True)
df_test['boilerplate'].replace(to_replace=r'"url":',value="",inplace=True,regex=True)
df_test['boilerplate'].replace(to_replace=r'{|}',value="",inplace=True,regex=True)
df_test['boilerplate']=df_test['boilerplate'].str.lower()<load_pretrained> | train = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv")
test = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv")
sub = pd.read_csv(".. /input/covid19-global-forecasting-week-3/submission.csv" ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased' )<prepare_x_and_y> | train = train.append(test[test['Date']>'2020-04-07'] ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | SEQ_length=512
Xids=np.zeros(( df_train.shape[0],SEQ_length))
Xmask=np.zeros(( df_train.shape[0],SEQ_length))
y=np.zeros(( df_train.shape[0],1))
Xids_test=np.zeros(( df_test.shape[0],SEQ_length))
Xmask_test=np.zeros(( df_test.shape[0],SEQ_length))
Xids<categorify> | train['Date'] = pd.to_datetime(train['Date'], format='%Y-%m-%d' ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | for i,sequence in enumerate(df_train['boilerplate']):
tokens=tokenizer.encode_plus(sequence,max_length=SEQ_length,padding='max_length',add_special_tokens=True,
truncation=True,return_token_type_ids=False,return_attention_mask=True,
return_tensors='tf')
Xids[i,:],Xmask[i,:],y[i,0]=tokens['input_ids'],tokens['attention_mask'],df_train.loc[i,'label']
for i,sequence in enumerate(df_test['boilerplate']):
tokens=tokenizer.encode_plus(sequence,max_length=SEQ_length,padding='max_length',add_special_tokens=True,
truncation=True,return_token_type_ids=False,return_attention_mask=True,
return_tensors='tf')
Xids_test[i,:],Xmask_test[i,:]=tokens['input_ids'],tokens['attention_mask']<set_options> | train['day_dist'] = train['Date']-train['Date'].min() | COVID19 Global Forecasting (Week 3) |
8,803,205 | tf.config.get_visible_devices()<categorify> | train['day_dist'] = train['day_dist'].dt.days | COVID19 Global Forecasting (Week 3) |
8,803,205 | dataset=tf.data.Dataset.from_tensor_slices(( Xids,Xmask,y))
def map_func(input_ids,mask,labels):
return {'input_ids':input_ids,'attention_mask':mask},labels
dataset=dataset.map(map_func)
dataset=dataset.shuffle(100000 ).batch(32 ).prefetch(1000)
DS_size=len(list(dataset))
train=dataset.take(round(DS_size*0.85))
val=dataset.skip(round(DS_size*0.85))<categorify> | cat_cols = train.dtypes[train.dtypes=='object'].keys()
cat_cols | COVID19 Global Forecasting (Week 3) |
8,803,205 | dataset_test=tf.data.Dataset.from_tensor_slices(( Xids_test,Xmask_test))
def map_func(input_ids,mask):
return {'input_ids':input_ids,'attention_mask':mask}
dataset_test=dataset_test.map(map_func)
dataset_test=dataset_test.batch(32 ).prefetch(1000 )<choose_model_class> | for cat_col in cat_cols:
train[cat_col].fillna('no_value', inplace = True ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | distil_bert = 'distilbert-base-uncased'
config = DistilBertConfig(dropout=0.2, attention_dropout=0.2)
config.output_hidden_states = False
transformer_model = TFDistilBertModel.from_pretrained(distil_bert, config = config)
input_ids_in = tf.keras.layers.Input(shape=(SEQ_length,), name='input_ids', dtype='int32')
input_masks_in = tf.keras.layers.Input(shape=(SEQ_length,), name='attention_mask', dtype='int32')
embedding_layer = transformer_model(input_ids_in, attention_mask=input_masks_in)[0]
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(embedding_layer)
X = tf.keras.layers.GlobalMaxPool1D()(X)
X = tf.keras.layers.Dense(50, activation='relu' )(X)
X = tf.keras.layers.Dropout(0.2 )(X)
X = tf.keras.layers.Dense(1, activation='sigmoid' )(X)
model = tf.keras.Model(inputs=[input_ids_in, input_masks_in], outputs = X)
for layer in model.layers[:3]:
layer.trainable = False<choose_model_class> | train['place'] = train['Province_State']+'_'+train['Country_Region']
| COVID19 Global Forecasting (Week 3) |
8,803,205 | model.compile(loss=tf.keras.losses.BinaryCrossentropy() ,
optimizer='adam',metrics=[tf.keras.metrics.AUC() ,tf.keras.metrics.Precision() ,tf.keras.metrics.Recall()
] )<train_model> | cat_cols = train.dtypes[train.dtypes=='object'].keys()
cat_cols | COVID19 Global Forecasting (Week 3) |
8,803,205 | history=model.fit(train,validation_data=val,epochs=3 )<save_to_csv> | for cat_col in ['place']:
le = preprocessing.LabelEncoder()
le.fit(train[cat_col])
train[cat_col]=le.transform(train[cat_col] ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | predictions=model.predict(dataset_test)
df_test['label']=predictions
df_test.to_csv('submission.csv',columns=['urlid','label'],index=False )<categorify> | drop_cols = ['Id','ForecastId', 'ConfirmedCases','Date', 'Fatalities',
'day_dist', 'Province_State', 'Country_Region'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | input_x=tf.data.Dataset.from_tensor_slices(( Xids,Xmask,y))
def map_func(input_ids,mask,labels):
return {'input_ids':input_ids,'attention_mask':mask}
input_x=input_x.map(map_func)
input_x=input_x.shuffle(100000 ).batch(32 ).prefetch(1000)
y_true = y<predict_on_test> | val = train[(train['Date']>='2020-03-20')&(train['Id'].isnull() ==False)]
| COVID19 Global Forecasting (Week 3) |
8,803,205 | y_pred=model.predict(dataset)
y_pred<prepare_output> | y_ft = train["Fatalities"]
y_val_ft = val["Fatalities"]
y_cc = train["ConfirmedCases"]
y_val_cc = val["ConfirmedCases"]
| COVID19 Global Forecasting (Week 3) |
8,803,205 | y_pred = np.round(y_pred)
y_pred<compute_test_metric> | def rmsle(y_true, y_pred):
return np.sqrt(np.mean(np.power(np.log1p(y_pred)- np.log1p(y_true), 2)) ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | print(metrics.classification_report(y_true, y_pred))<import_modules> | def mape(y_true, y_pred):
return np.mean(np.abs(y_pred -y_true)*100/(y_true+1)) | COVID19 Global Forecasting (Week 3) |
8,803,205 | import os
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_validate, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score<define_variables> | dates = dates[dates>'2020-04-07'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | FILEDIR = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament' )<load_from_csv> | params = {
"objective": "regression",
"boosting": 'gbdt',
"num_leaves": 1280,
"learning_rate": 0.05,
"feature_fraction": 0.9,
"reg_lambda": 2,
"metric": "rmse",
'min_data_in_leaf':20
} | COVID19 Global Forecasting (Week 3) |
8,803,205 | sub = pd.read_csv(FILEDIR / 'MSampleSubmissionStage1_2020.csv', usecols=['ID'])
id_splited = sub['ID'].str.split('_', expand=True ).astype(int ).rename(columns={0: 'Season', 1: 'Team1', 2: 'Team2'})
sub = pd.concat([sub, id_splited], axis=1 ).set_index(['Season', 'Team1', 'Team2'] ).sort_index()<count_duplicates> | train[train['Date']==date] | COVID19 Global Forecasting (Week 3) |
8,803,205 | tourney_teams = {}
tourney_teams_all = set()
for season in sub.index.get_level_values('Season' ).drop_duplicates() :
tourney_teams[season] = set()
tourney_teams[season].update(sub.loc[season].index.get_level_values('Team1'))
tourney_teams[season].update(sub.loc[season].index.get_level_values('Team2'))
tourney_teams_all.update(tourney_teams[season])
{k: len(v)for k, v in tourney_teams.items() }<load_from_csv> | test[test['Country_Region']=='Italy'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | conferences = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamConferences.csv')
conferences = pd.concat(
[conferences.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ])
conferences = conferences.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv> | test[(test['Country_Region']=='China')&(test['Province_State']=='Zhejiang')] | COVID19 Global Forecasting (Week 3) |
8,803,205 | coaches = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamCoaches.csv')
coaches = pd.concat(
[coaches.query('Season == @season and TeamID in @team')for season, team in tourney_teams.items() ])
coaches = coaches[coaches['LastDayNum'] == 154].set_index(['Season', 'TeamID'] ).sort_index() [['CoachName']]<load_from_csv> | test[test['Country_Region']=='Italy'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | teams = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeams.csv', usecols=['TeamID', 'FirstD1Season'])
teams['FirstD1Season'] = 2020 - teams['FirstD1Season']
teams = pd.concat(
[teams.query('TeamID in @team' ).assign(Season=season)for season, team in tourney_teams.items() ])
teams = teams.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv> | train_sub = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv" ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | seeds = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneySeeds.csv')
seeds = pd.concat(
[seeds.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ])
seeds = seeds.set_index(['Season', 'TeamID'] ).sort_index()
seeds['Region'] = seeds['Seed'].str[0]
seeds['Number'] = seeds['Seed'].str[1:3].astype(int)
del seeds['Seed']<load_from_csv> | test = pd.merge(test,train_sub[['Province_State','Country_Region',
'Date','ConfirmedCases',
'Fatalities']],
on=['Province_State','Country_Region',
'Date'], how='left' ) | COVID19 Global Forecasting (Week 3) |
8,803,205 | regular = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MRegularSeasonDetailedResults.csv')
regular = regular.drop(columns=['DayNum', 'LTeamID'])
regular = pd.concat(
[regular.query('Season == @season and WTeamID in @teams')for season, teams in tourney_teams.items() ])
regular = regular.groupby(['Season', 'WTeamID'] ).sum()
regular = regular.rename_axis(index=['Season', 'TeamID'] )<concatenate> | test.loc[test['ConfirmedCases_x'].isnull() ==True] | COVID19 Global Forecasting (Week 3) |
8,803,205 | ctcsr = pd.concat([coaches, teams, conferences, seeds, regular], axis=1 )<load_from_csv> | test.loc[test['ConfirmedCases_x'].isnull() ==True, 'ConfirmedCases_x'] =test.loc[test['ConfirmedCases_x'].isnull() ==True, 'ConfirmedCases_y'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | result = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MNCAATourneyCompactResults.csv')
result = result[result['Season'] >= 2015].set_index(['Season', 'WTeamID', 'LTeamID'] )<concatenate> | test.loc[test['Fatalities_x'].isnull() ==True, 'Fatalities_x'] = test.loc[test['Fatalities_x'].isnull() ==True, 'Fatalities_y'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | merged_teams = pd.concat(
[ctcsr.loc[[(season, wteam),(season, lteam)], :] for season, wteam, lteam, in result.index])
team1 = merged_teams.iloc[::2, :].reset_index('TeamID')
team2 = merged_teams.iloc[1::2, :].reset_index('TeamID')
merged_teams = pd.concat([
pd.concat([team1.add_suffix('1'), team2.add_suffix('2')], axis=1 ).assign(Res=1),
pd.concat([team2.add_suffix('1'), team1.add_suffix('2')], axis=1 ).assign(Res=0),
] ).reset_index().set_index(['Season', 'TeamID1', 'TeamID2'] ).sort_index()<categorify> | last_amount = test.loc[(test['Country_Region']=='Italy')&(test['Date']=='2020-04-07'),'ConfirmedCases_x']
last_fat = test.loc[(test['Country_Region']=='Italy')&(test['Date']=='2020-04-07'),'Fatalities_x'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | x_columns = merged_teams.columns[merged_teams.columns != 'Res']
X = merged_teams[x_columns]
for column in X.select_dtypes(include='number'):
X[column] = MinMaxScaler().fit_transform(X[column].to_numpy().reshape(-1,1))
X = pd.get_dummies(X, columns=x_columns[X.dtypes == 'object'] )<prepare_x_and_y> | i = 0
k = 30 | COVID19 Global Forecasting (Week 3) |
8,803,205 | y = merged_teams['Res']<define_search_space> | test.loc[(test['Country_Region']=='Italy')] | COVID19 Global Forecasting (Week 3) |
8,803,205 | clfs = {}
clfs['SVC'] = {
'instance': SVC(probability=True),
'params': [
{'kernel': ['linear'], 'C': [0.01, 0.05, 0.1, 0.5, 1]},
{'kernel': ['rbf'], 'C': [1, 10, 50, 100, 250], 'gamma': [0.1, 0.2, 0.3]}
]
}
clfs['RandomForestClassifier'] = {
'instance': RandomForestClassifier(n_jobs=-1),
'params': {
'n_estimators': [25, 50, 100],
'criterion': ['gini', 'entropy'],
'max_depth': [10, 25, 50, None]
}
}
clfs['LogisticRegression'] = {
'instance': LogisticRegression(max_iter=500, n_jobs=-1),
'params': [
{'penalty': ['l2'], 'C': [0.1, 0.5, 1, 5, 10]},
{'penalty': ['l1'], 'solver': ['liblinear', 'saga'], 'C': [0.1, 0.5, 1, 5, 10]},
{'penalty': ['elasticnet'], 'C': [0.1, 0.5, 1, 5, 10], 'l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9]}
]
}<train_on_grid> | for date in dates:
k = k-1
i = i+1
test.loc[(test['Country_Region']=='Italy')&(test['Date']==date),
'ConfirmedCases_x']=last_amount.values[0] + i*(5000-(100*i))
test.loc[(test['Country_Region']=='Italy')&(test['Date']==date),
'Fatalities_x'] = last_fat.values[0]+i*(800-(10*i)) | COVID19 Global Forecasting (Week 3) |
8,803,205 | for clf_name, clf in clfs.items() :
print('<{}>'.format(clf_name))
print(' training...'.format(clf_name))
gs = GridSearchCV(clf['instance'], param_grid=clf['params'], cv=5, n_jobs=-1)
gs.fit(X, y)
clfs[clf_name]['best_estimator'] = gs.best_estimator_
print(' best_score: {:.3f}'.format(gs.best_score_))
print(' best_params: {}'.format(gs.best_params_))<train_on_grid> | test.loc[(test['Country_Region']=='Spain')] | COVID19 Global Forecasting (Week 3) |
8,803,205 | vote = VotingClassifier(
estimators=[(clf_name, clf['best_estimator'])for clf_name, clf in clfs.items() ],
voting='soft',
n_jobs=-1
)
vote.fit(X, y)
vote.estimators_<compute_test_metric> | last_amount = test.loc[(test['Country_Region']=='Spain')&(test['Date']=='2020-04-07'),'ConfirmedCases_x']
last_fat = test.loc[(test['Country_Region']=='Spain')&(test['Date']=='2020-04-07'),'Fatalities_x']
i = 0
k = 30
for date in dates:
k = k-1
i = i+1
test.loc[(test['Country_Region']=='Spain')&(test['Date']==date),
'ConfirmedCases_x']=last_amount.values[0] + i*(5000-(100*i))
test.loc[(test['Country_Region']=='Spain')&(test['Date']==date),
'Fatalities_x'] = last_fat.values[0]+i*(800-(10*i)) | COVID19 Global Forecasting (Week 3) |
8,803,205 | for clf_name, clf in clfs.items() :
score = accuracy_score(y, clf['best_estimator'].predict(X))
print(clf_name, score)
print('Vote', accuracy_score(y, vote.predict(X)) )<predict_on_test> | last_amount = test.loc[(test['Country_Region']=='China')&(test['Province_State']!='Hubei')&(test['Date']=='2020-04-07'),'ConfirmedCases_x']
last_fat = test.loc[(test['Country_Region']=='China')&(test['Province_State']!='Hubei')&(test['Date']=='2020-04-07'),'Fatalities_x'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | predict_proba = pd.DataFrame(
{clf_name: clf['best_estimator'].predict_proba(X)[:, 1] for clf_name, clf in clfs.items() },
index=X.index)
predict_proba['Vote'] = vote.predict_proba(X)[:, 1]
_ = predict_proba.plot(kind='hist', bins=50, grid=True, alpha=0.5, figsize=(16,8))<save_to_csv> | i = 0
k = 30
for date in dates:
k = k-1
i = i+1
test.loc[(test['Country_Region']=='China')&(test['Province_State']!='Hubei')&(test['Date']==date),
'Fatalities_x']= last_fat.values
test.loc[(test['Country_Region']=='China')&(test['Province_State']!='Hubei')&(test['Date']==date),
'ConfirmedCases_x']= last_amount.values + i | COVID19 Global Forecasting (Week 3) |
8,803,205 | columns = predict_proba.columns
for column in columns:
sub[column] = 0.5
mask = [idx for idx in sub.index if idx in X.index]
sub.loc[mask, columns] = predict_proba.loc[mask, columns]
for column in columns:
sub[['ID', column]].rename(columns={column: 'pred'} ).to_csv('predict_proba-{}.csv'.format(column), index=False )<save_to_csv> | last_amount = test.loc[(test['Country_Region']=='China')&(test['Province_State']=='Hubei')&(test['Date']=='2020-04-07'),'ConfirmedCases_x']
last_fat = test.loc[(test['Country_Region']=='China')&(test['Province_State']=='Hubei')&(test['Date']=='2020-04-07'),'Fatalities_x'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | predict = pd.DataFrame(
{clf_name: clf['best_estimator'].predict(X)for clf_name, clf in clfs.items() },
index=X.index)
predict['Vote'] = vote.predict(X)
columns = predict.columns
for column in columns:
sub[column] = 0.5
mask = [idx for idx in sub.index if idx in X.index]
sub.loc[mask, columns] = predict.loc[mask, columns]
for column in columns:
sub[['ID', column]].rename(columns={column: 'pred'} ).to_csv('predict-{}.csv'.format(column), index=False )<load_from_csv> | k=30
i=0
for date in dates:
k = k-1
i = i+1
test.loc[(test['Country_Region']=='China')&(test['Province_State']=='Hubei')&(test['Date']==date),'ConfirmedCases_x']= last_amount.values[0]
test.loc[(test['Country_Region']=='China')&(test['Province_State']=='Hubei')&(test['Date']==date),'Fatalities_x']= last_fat.values[0] + i | COVID19 Global Forecasting (Week 3) |
8,803,205 | target_name = 'predict_proba-RandomForestClassifier.csv'
new_name = 'final-submission.csv'
shutil.copy(target_name, new_name )<import_modules> | sub = test[['ForecastId','ConfirmedCases_x','Fatalities_x']] | COVID19 Global Forecasting (Week 3) |
8,803,205 | import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBClassifier
import gc
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor<load_from_csv> | sub.columns=['ForecastId','ConfirmedCases','Fatalities'] | COVID19 Global Forecasting (Week 3) |
8,803,205 | Tourney_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv')
Tourney_Seeds = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv' )<load_from_csv> | sub.loc[sub['ConfirmedCases']<0,'ConfirmedCases']=0 | COVID19 Global Forecasting (Week 3) |
8,803,205 | RegularSeason_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv')
MSeasons = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MSeasons.csv')
MTeams=pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MTeams.csv' )<merge> | sub.loc[sub['Fatalities']<0, 'Fatalities']=0 | COVID19 Global Forecasting (Week 3) |
8,803,205 | Tourney_Results_Compact=pd.merge(Tourney_Compact_Results, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
Tourney_Results_Compact.rename(columns={'Seed':'WinningSeed'},inplace=True)
Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID'],axis=1)
Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
Tourney_Results_Compact.rename(columns={'Seed':'LoosingSeed'}, inplace=True)
Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID','NumOT','WLoc'],axis=1)
Tourney_Results_Compact<drop_column> | sub.to_csv('submission.csv',index=False ) | COVID19 Global Forecasting (Week 3) |
8,757,927 | Tourney_Results_Compact=Tourney_Results_Compact.drop(['WScore','LScore'],axis=1)
Tourney_Results_Compact.head()<data_type_conversions> | X_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv')
X_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv')
X_train.rename(columns={'Country_Region':'Country'}, inplace=True)
X_test.rename(columns={'Country_Region':'Country'}, inplace=True)
X_train.rename(columns={'Province_State':'State'}, inplace=True)
X_test.rename(columns={'Province_State':'State'}, inplace=True)
X_train.Date = pd.to_datetime(X_train.Date, infer_datetime_format=True)
X_test.Date = pd.to_datetime(X_test.Date, infer_datetime_format=True)
EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if state == EMPTY_VAL: return country
return state | COVID19 Global Forecasting (Week 3) |
8,757,927 | Tourney_Results_Compact['WinningSeed'] = Tourney_Results_Compact['WinningSeed'].str.extract('(\d+)', expand=True)
Tourney_Results_Compact['LoosingSeed'] = Tourney_Results_Compact['LoosingSeed'].str.extract('(\d+)', expand=True)
Tourney_Results_Compact.WinningSeed = pd.to_numeric(Tourney_Results_Compact.WinningSeed, errors='coerce')
Tourney_Results_Compact.LoosingSeed = pd.to_numeric(Tourney_Results_Compact.LoosingSeed, errors='coerce' )<rename_columns> | X_xTrain = X_train.copy()
X_xTrain.State.fillna(EMPTY_VAL, inplace=True)
X_xTrain.State = X_xTrain.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
X_xTrain.loc[:, 'Date'] = X_xTrain.Date.dt.strftime("%m%d")
X_xTrain.Date = X_xTrain.Date.astype(int)
X_xTest = X_test.copy()
X_xTest.State.fillna(EMPTY_VAL, inplace=True)
X_xTest.State = X_xTest.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
X_xTest.loc[:, 'Date'] = X_xTest.Date.dt.strftime("%m%d")
X_xTest.Date = X_xTest.Date.astype(int)
| COVID19 Global Forecasting (Week 3) |
8,757,927 | season_winning_team = RegularSeason_Compact_Results[['Season', 'WTeamID', 'WScore']]
season_losing_team = RegularSeason_Compact_Results[['Season', 'LTeamID', 'LScore']]
season_winning_team.rename(columns={'WTeamID':'TeamID','WScore':'Score'}, inplace=True)
season_losing_team.rename(columns={'LTeamID':'TeamID','LScore':'Score'}, inplace=True)
RegularSeason_Compact_Results = pd.concat(( season_winning_team, season_losing_team)).reset_index(drop=True)
RegularSeason_Compact_Results<groupby> | le = preprocessing.LabelEncoder()
X_xTrain.Country = le.fit_transform(X_xTrain.Country)
X_xTrain.State = le.fit_transform(X_xTrain.State)
X_xTest.Country = le.fit_transform(X_xTest.Country)
X_xTest.State = le.fit_transform(X_xTest.State ) | COVID19 Global Forecasting (Week 3) |
8,757,927 | RegularSeason_Compact_Results_Final = RegularSeason_Compact_Results.groupby(['Season', 'TeamID'])['Score'].sum().reset_index()
RegularSeason_Compact_Results_Final<merge> | filterwarnings('ignore')
le = preprocessing.LabelEncoder()
countries = X_xTrain.Country.unique() | COVID19 Global Forecasting (Week 3) |
8,757,927 | Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
Tourney_Results_Compact.rename(columns={'Score':'WScoreTotal'}, inplace=True)
Tourney_Results_Compact<save_to_csv> | xout = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
for country in countries:
states = X_xTrain.loc[X_xTrain.Country == country, :].State.unique()
for state in states:
X_xTrain_CS = X_xTrain.loc[(X_xTrain.Country == country)&(X_xTrain.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']]
y1_xTrain_CS = X_xTrain_CS.loc[:, 'ConfirmedCases']
y2_xTrain_CS = X_xTrain_CS.loc[:, 'Fatalities']
X_xTrain_CS = X_xTrain_CS.loc[:, ['State', 'Country', 'Date']]
X_xTrain_CS.Country = le.fit_transform(X_xTrain_CS.Country)
X_xTrain_CS.State = le.fit_transform(X_xTrain_CS.State)
X_xTest_CS = X_xTest.loc[(X_xTest.Country == country)&(X_xTest.State == state), ['State', 'Country', 'Date', 'ForecastId']]
X_xTest_CS_Id = X_xTest_CS.loc[:, 'ForecastId']
X_xTest_CS = X_xTest_CS.loc[:, ['State', 'Country', 'Date']]
X_xTest_CS.Country = le.fit_transform(X_xTest_CS.Country)
X_xTest_CS.State = le.fit_transform(X_xTest_CS.State)
xmodel1 = DecisionTreeRegressor()
xmodel1.fit(X_xTrain_CS, y1_xTrain_CS)
y1_xpred = xmodel1.predict(X_xTest_CS)
xmodel2 = DecisionTreeRegressor()
xmodel2.fit(X_xTrain_CS, y2_xTrain_CS)
y2_xpred = xmodel2.predict(X_xTest_CS)
xdata = pd.DataFrame({'ForecastId': X_xTest_CS_Id, 'ConfirmedCases': y1_xpred, 'Fatalities': y2_xpred})
xout = pd.concat([xout, xdata], axis=0 ) | COVID19 Global Forecasting (Week 3) |
8,757,927 | <drop_column><EOS> | xout.ForecastId = xout.ForecastId.astype('int')
xout.tail()
xout.to_csv('submission.csv', index=False ) | COVID19 Global Forecasting (Week 3) |
8,825,780 | <SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<rename_columns> | for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv')
df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv')
| COVID19 Global Forecasting (Week 3) |
8,825,780 | Tourney_Win_Results.rename(columns={'WinningSeed':'Seed1', 'LoosingSeed':'Seed2', 'WScoreTotal':'ScoreT1', 'LScoreTotal':'ScoreT2'}, inplace=True )<prepare_output> | def fillState(state, country):
if state == "NA": return country
return state
def fixData(input_set):
input_set['Province_State'].fillna("NA", inplace=True)
input_set['Province_State'] = input_set.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
input_set['Date'] = pd.to_datetime(input_set['Date'], infer_datetime_format=True)
input_set.loc[:, 'Date'] = input_set.Date.dt.strftime("%m%d")
input_set["Date"] = input_set["Date"].astype(int)
return input_set
| COVID19 Global Forecasting (Week 3) |
8,825,780 | tourney_lose_result = Tourney_Win_Results.copy()
tourney_lose_result['Seed1'] = Tourney_Win_Results['Seed2']
tourney_lose_result['Seed2'] = Tourney_Win_Results['Seed1']
tourney_lose_result['ScoreT1'] = Tourney_Win_Results['ScoreT2']
tourney_lose_result['ScoreT2'] = Tourney_Win_Results['ScoreT1']
tourney_lose_result<feature_engineering> | X_train = df_train
X_test = df_test
X_train = fixData(X_train)
X_test = fixData(X_test)
X_train.head() | COVID19 Global Forecasting (Week 3) |
8,825,780 | Tourney_Win_Results['Seed_diff'] = Tourney_Win_Results['Seed1'] - Tourney_Win_Results['Seed2']
Tourney_Win_Results['ScoreT_diff'] = Tourney_Win_Results['ScoreT1'] - Tourney_Win_Results['ScoreT2']
tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2']
tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']<save_to_csv> | COVID19 Global Forecasting (Week 3) |
|
8,825,780 | Tourney_Win_Results['result'] = 1
tourney_lose_result['result'] = 0
tourney_result_Final = pd.concat(( Tourney_Win_Results, tourney_lose_result)).reset_index(drop=True)
tourney_result_Final.to_csv('Tourneyvalidate.csv', index=False )<drop_column> | label_encoder = preprocessing.LabelEncoder()
countries = X_test.Country_Region.unique() | COVID19 Global Forecasting (Week 3) |
8,825,780 | tourney_result_Final1 = tourney_result_Final[[
'Seed1', 'Seed2', 'ScoreT1', 'ScoreT2', 'Seed_diff', 'ScoreT_diff', 'result']]<feature_engineering> | sub = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
sub = []
for country in countries:
province_list = X_train.loc[X_train['Country_Region'] == country].Province_State.unique()
for province in province_list:
X_train2 = X_train.loc[(X_train['Country_Region'] == country)&(X_train['Province_State'] == province),['Date']].astype('int')
Y_train21 = X_train.loc[(X_train['Country_Region'] == country)&(X_train['Province_State'] == province),['ConfirmedCases']]
Y_train22 = X_train.loc[(X_train['Country_Region'] == country)&(X_train['Province_State'] == province),['Fatalities']]
X_test2 = X_test.loc[(X_test['Country_Region'] == country)&(X_test['Province_State'] == province), ['Date']].astype('int')
X_forecastId2 = X_test.loc[(X_test['Country_Region'] == country)&(X_test['Province_State'] == province), ['ForecastId']]
X_forecastId2 = X_forecastId2.values.tolist()
X_forecastId2 = [v[0] for v in X_forecastId2]
model2 = XGBRegressor(n_estimators=1020)
model2.fit(X_train2, Y_train21)
Y_pred2 = model2.predict(X_test2)
model3 = XGBRegressor(n_estimators=1020)
model3.fit(X_train2, Y_train22)
Y_pred3 = model3.predict(X_test2)
for j in range(len(Y_pred2)) :
dic = { 'ForecastId': X_forecastId2[j], 'ConfirmedCases': Y_pred2[j], 'Fatalities': Y_pred3[j]}
sub.append(dic)
| COVID19 Global Forecasting (Week 3) |
8,825,780 | tourney_result_Final1.loc[lambda x:(x['Seed1'].isin([14,15,16])) &(x['Seed2'].isin([1,2,3])) ,'result'
] = 0
<load_from_csv> | submission = pd.DataFrame(sub)
submission[['ForecastId','ConfirmedCases','Fatalities']].to_csv(path_or_buf='submission.csv',index=False ) | COVID19 Global Forecasting (Week 3) |
8,825,780 | test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' )<feature_engineering> | COVID19 Global Forecasting (Week 3) |
|
8,825,780 | test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4]))
test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9]))
test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14]))
test_df<merge> | COVID19 Global Forecasting (Week 3) |
|
8,791,119 | test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1 )<save_to_csv> | from pandas_profiling import ProfileReport | COVID19 Global Forecasting (Week 3) |
8,791,119 | test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df
test_df.to_csv('test_df_Test.csv', index=False )<data_type_conversions> | xtrain = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv')
xtest = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv')
xsubmission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv' ) | COVID19 Global Forecasting (Week 3) |
8,791,119 | test_df['Seed1'] = test_df['Seed1'].str.extract('(\d+)', expand=True)
test_df['Seed2'] = test_df['Seed2'].str.extract('(\d+)', expand=True)
test_df.Seed1 = pd.to_numeric(test_df.Seed1, errors='coerce')
test_df.Seed2 = pd.to_numeric(test_df.Seed2, errors='coerce' )<feature_engineering> | train_profile = ProfileReport(xtrain, title='Pandas Profiling Report', html={'style':{'full_width':True}})
train_profile | COVID19 Global Forecasting (Week 3) |
8,791,119 | test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2']
test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2']
test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1)
test_df<prepare_x_and_y> | xtrain.rename(columns={'Country_Region':'Country'}, inplace=True)
xtest.rename(columns={'Country_Region':'Country'}, inplace=True)
xtrain.rename(columns={'Province_State':'State'}, inplace=True)
xtest.rename(columns={'Province_State':'State'}, inplace=True)
xtrain['Date'] = pd.to_datetime(xtrain['Date'], infer_datetime_format=True)
xtest['Date'] = pd.to_datetime(xtest['Date'], infer_datetime_format=True)
xtrain.info()
xtest.info()
y1_xTrain = xtrain.iloc[:, -2]
y1_xTrain.head()
y2_xTrain = xtrain.iloc[:, -1]
y2_xTrain.head()
EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if state == EMPTY_VAL: return country
return state | COVID19 Global Forecasting (Week 3) |
8,791,119 | X = tourney_result_Final1.drop('result', axis=1)
y = tourney_result_Final1.result<normalization> | X_xTrain = xtrain.copy()
X_xTrain['State'].fillna(EMPTY_VAL, inplace=True)
X_xTrain['State'] = X_xTrain.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
X_xTrain.loc[:, 'Date'] = X_xTrain.Date.dt.strftime("%m%d")
X_xTrain["Date"] = X_xTrain["Date"].astype(int)
X_xTrain.head()
X_xTest = xtest.copy()
X_xTest['State'].fillna(EMPTY_VAL, inplace=True)
X_xTest['State'] = X_xTest.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
X_xTest.loc[:, 'Date'] = X_xTest.Date.dt.strftime("%m%d")
X_xTest["Date"] = X_xTest["Date"].astype(int)
X_xTest.head() | COVID19 Global Forecasting (Week 3) |
8,791,119 | df = pd.concat([X, test_df], axis=0, sort=False ).reset_index(drop=True)
df_log = pd.DataFrame(
preprocessing.MinMaxScaler().fit_transform(df),
columns=df.columns,
index=df.index
)
train_log, test_log = df_log.iloc[:len(X),:], df_log.iloc[len(X):,:].reset_index(drop=True )<train_on_grid> | le = preprocessing.LabelEncoder()
X_xTrain.Country = le.fit_transform(X_xTrain.Country)
X_xTrain['State'] = le.fit_transform(X_xTrain['State'])
X_xTrain.head()
X_xTest.Country = le.fit_transform(X_xTest.Country)
X_xTest['State'] = le.fit_transform(X_xTest['State'])
X_xTest.head()
xtrain.head()
xtrain.loc[xtrain.Country == 'Afghanistan', :]
xtest.tail() | COVID19 Global Forecasting (Week 3) |
8,791,119 | logreg = LogisticRegression()
logreg.fit(train_log, y)
coeff_logreg = pd.DataFrame(train_log.columns.delete(0))
coeff_logreg.columns = ['feature']
coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0])
coeff_logreg.sort_values(by='score_logreg', ascending=False )<predict_on_test> | filterwarnings('ignore')
le = preprocessing.LabelEncoder()
countries = X_xTrain.Country.unique()
| COVID19 Global Forecasting (Week 3) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.