kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,811,204
sub.ConfirmedCases = sub.ConfirmedCases.astype(int) sub.Fatalities = sub.Fatalities.astype(int) sub.ForecastId = sub.ForecastId.astype(int )<save_to_csv>
df = pd.DataFrame() def create_time_feat(data): df['date']= data['date'] df['hour']=df['date'].dt.hour df['weekofyear']=df['date'].dt.weekofyear df['quarter'] =df['date'].dt.quarter df['month'] = df['date'].dt.month df['dayofyear']=df['date'].dt.dayofyear x=df[['hour','weekofyear','quarter','month','dayofyear']] return x cr_tr = create_time_feat(train) cr_te = create_time_feat(test )
COVID19 Global Forecasting (Week 3)
8,811,204
sub.to_csv('submission.csv', index=False )<save_to_csv>
train_df = pd.concat([train,cr_tr], axis=1) test_df = pd.concat([test, cr_te], axis =1) test_df.dropna(inplace=True )
COVID19 Global Forecasting (Week 3)
8,811,204
sub.to_csv('submission.csv', index=False )<load_from_csv>
le=LabelEncoder() train_df['cp_le']=le.fit_transform(train_df['cp']) test_df['cp_le']=le.transform(test_df['cp']) train_df.drop(['cp'], axis=1, inplace=True) test_df.drop(['cp'], axis=1, inplace=True )
COVID19 Global Forecasting (Week 3)
8,811,204
df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv') sample_submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/submission.csv' )<define_variables>
def create_date_feat(data, cf, ft): for d in data['date'].drop_duplicates() : for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc, cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc, ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 create_date_feat(train_df,'confirmedcases','fatalities' )
COVID19 Global Forecasting (Week 3)
8,811,204
df_train_original = df_train df_test_original = df_test<define_variables>
def rmsle(pred,true): assert pred.shape[0]==true.shape[0] return K.sqrt(K.mean(K.square(K.log(pred+1)- K.log(true+1)))) es = EarlyStopping(monitor='val_loss', min_delta = 0, verbose=0, patience=10, mode='auto') mc_cf = ModelCheckpoint('model_cf.h5', monitor='val_loss', verbose=0, save_best_only=True) mc_ft = ModelCheckpoint('model_ft.h5', monitor='val_loss', verbose=0, save_best_only=True) def lstm_model(hidden_nodes, second_dim, third_dim): model = Sequential([LSTM(hidden_nodes, input_shape=(second_dim, third_dim), activation='relu'), Dense(64, activation='relu'), Dense(32, activation='relu'), Dense(1, activation='relu')]) model.compile(loss=rmsle, optimizer = 'adam') return model model_cf = lstm_model(10, tr_x_cf.shape[1], tr_x_cf.shape[2]) model_ft = lstm_model(10, tr_x_ft.shape[1], tr_x_ft.shape[2]) history_cf = model_cf.fit(tr_x_cf, tr_y_cf, epochs=200, batch_size=512, validation_data=(val_x_cf,val_y_cf), callbacks=[es,mc_cf]) history_ft = model_ft.fit(tr_x_ft, tr_y_ft, epochs=200, batch_size=512, validation_data=(val_x_ft,val_y_ft), callbacks=[es,mc_ft] )
COVID19 Global Forecasting (Week 3)
8,811,204
df_train = df_train_original.replace(np.nan, '', regex=True) df_test = df_test_original.replace(np.nan, '', regex=True )<feature_engineering>
feat = ['confirmedcases','fatalities','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] c_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'cf_2', 'cf_3', 'cf_4', 'cf_5', 'cf_6', 'cf_7', 'cf_8', 'cf_9','cf_10', 'cf_11', 'cf_12', 'cf_13', 'cf_14'] f_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','ft_1', 'ft_2', 'ft_3', 'ft_4', 'ft_5', 'ft_6', 'ft_7', 'ft_8', 'ft_9','ft_10', 'ft_11', 'ft_12', 'ft_13', 'ft_14'] tot_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] test_new = test_df.copy().join(pd.DataFrame(columns=feat)) test_mask =(test_df['date'] <= train_df['date'].max()) train_mask =(train_df['date'] >= test_df['date'].min()) test_new.loc[test_mask,feat] = train_df.loc[train_mask, feat].values future_df = pd.date_range(start = train_df['date'].max() +pd.Timedelta(days=1),end=test_df['date'].max() , freq='1D') def create_add_trend_pred(data, cf, ft): for d in future_df: for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc,cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc,ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 test_x = data.loc[org_mask,tot_feat] test_x_cf = test_x[c_feat] test_x_cf = test_x_cf.to_numpy().reshape(1,-1) test_x_cf_reshape = test_x_cf.reshape(test_x_cf.shape[0],1,test_x_cf.shape[1]) test_x_ft = test_x[f_feat] test_x_ft = test_x_ft.to_numpy().reshape(1,-1) test_x_ft_reshape = test_x_ft.reshape(test_x_ft.shape[0],1,test_x_ft.shape[1]) data.loc[org_mask, cf] = model_cf.predict(test_x_cf_reshape) data.loc[org_mask, ft] = model_ft.predict(test_x_ft_reshape) create_add_trend_pred(test_new, 'confirmedcases', 'fatalities' )
COVID19 Global Forecasting (Week 3)
8,811,204
<filter><EOS>
sub_pred = pd.DataFrame({'ForecastId': test_id, 'ConfirmedCases':test_new['confirmedcases'],'Fatalities':test_new['fatalities']}) sub_pred.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,797,635
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<groupby>
import os from typing import Dict, List, Tuple from joblib import Parallel, delayed import pandas as pd import numpy as np from scipy.optimize.minpack import curve_fit from scipy.optimize import least_squares from xgboost import XGBRegressor
COVID19 Global Forecasting (Week 3)
8,797,635
groups_train = df_train.groupby(['Country_Region', 'Province_State']) print(len(groups_train))<groupby>
def load_kaggle_csv(dataset: str, datadir: str)-> pd.DataFrame: df = pd.read_csv( f"{os.path.join(datadir,dataset)}.csv", parse_dates=["Date"] ) df['country'] = df["Country_Region"] if "Province_State" in df: df["Country_Region"] = np.where( df["Province_State"].isnull() , df["Country_Region"], df["Country_Region"] + "_" + df["Province_State"], ) df.drop(columns="Province_State", inplace=True) if "ConfirmedCases" in df: df["ConfirmedCases"] = df.groupby("Country_Region")[ "ConfirmedCases" ].cummax() if "Fatalities" in df: df["Fatalities"] = df.groupby("Country_Region")["Fatalities"].cummax() if not "DayOfYear" in df: df["DayOfYear"] = df["Date"].dt.dayofyear df["Date"] = df["Date"].dt.date return df def RMSLE(actual: np.ndarray, prediction: np.ndarray)-> float: return np.sqrt( np.mean( np.power(np.log1p(np.maximum(0, prediction)) - np.log1p(actual), 2) ) )
COVID19 Global Forecasting (Week 3)
8,797,635
<sort_values>
train = load_kaggle_csv( "train", "/kaggle/input/covid19-global-forecasting-week-3") country_health_indicators =( pd.read_csv("/kaggle/input/country-health-indicators/country_health_indicators_v3.csv")).rename( columns={'Country_Region': 'country'}) train = pd.merge(train, country_health_indicators, on="country", how="left" )
COVID19 Global Forecasting (Week 3)
8,797,635
min_date_sorted = min_date.sort_values()<define_variables>
test = load_kaggle_csv( "test", "/kaggle/input/covid19-global-forecasting-week-3") test = pd.merge(test, country_health_indicators, on="country", how="left" )
COVID19 Global Forecasting (Week 3)
8,797,635
for x,y in zip(min_date_sorted.index, min_date_sorted): print(x,y )<filter>
def logistic(x: np.ndarray, x0: float, L: float, k: float)-> np.ndarray: return L /(1 + np.exp(-k *(x - x0))) def fit_single_logistic(x: np.ndarray, y: np.ndarray, maxfev: float)-> Tuple: p0 = [np.median(x), y[-1], 0.1] pn0 = p0 *(np.random.random(len(p0)) + [0.5, 1.0, 0.5]) try: params, pcov = curve_fit( logistic, x, y, p0=pn0, maxfev=maxfev, sigma=np.maximum(1, np.sqrt(y)) *(0.1 + 0.9 * np.random.random()), bounds=([0, y[-1], 0.01], [200, 1e6, 1.5]), ) pcov = pcov[np.triu_indices_from(pcov)] except(RuntimeError, ValueError): params = p0 pcov = np.zeros(len(p0)*(len(p0)- 1)) y_hat = logistic(x, *params) rmsle = RMSLE(y_hat, y) return(params, pcov, rmsle, y_hat) def fit_logistic( df: pd.DataFrame, n_jobs: int = 8, n_samples: int = 80, maxfev: int = 8000, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], )-> pd.DataFrame: def fit_one(df: pd.DataFrame, y_col: str)-> Dict: best_rmsle = None best_params = None x = df[x_col].to_numpy() y = df[y_col].to_numpy() for(params, cov, rmsle, y_hat)in Parallel(n_jobs=n_jobs )( delayed(fit_single_logistic )(x, y, maxfev=maxfev) for i in range(n_samples) ): if rmsle >=(best_rmsle or rmsle): best_rmsle = rmsle best_params = params result = {f"{y_col}_rmsle": best_rmsle} result.update({f"{y_col}_p_{i}": p for i, p in enumerate(best_params)}) return result result = {} for y_col in y_cols: result.update(fit_one(df, y_col)) return pd.DataFrame([result]) def predict_logistic( df: pd.DataFrame, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], ): def predict_one(col): df[f"yhat_logistic_{col}"] = logistic( df[x_col].to_numpy() , df[f"{col}_p_0"].to_numpy() , df[f"{col}_p_1"].to_numpy() , df[f"{col}_p_2"].to_numpy() , ) for y_col in y_cols: predict_one(y_col )
COVID19 Global Forecasting (Week 3)
8,797,635
list(df_train[df_train['Country_Region'] == 'China']['Province_State'] )<filter>
train = pd.merge( train, train.groupby( ["Country_Region"], observed=True, sort=False ).apply(lambda x: fit_logistic(x, n_jobs=8, n_samples=16, maxfev=16000)).reset_index() , on=["Country_Region"], how="left") predict_logistic(train )
COVID19 Global Forecasting (Week 3)
8,797,635
df_train[(df_train['Country_Region'] == 'Pakistan')]<filter>
def apply_xgb_model(train, x_columns, y_column, xgb_params): X = train[x_columns].to_numpy() y = train[y_column].to_numpy() xgb_fit = XGBRegressor(**xgb_params ).fit(X, y) y_hat = xgb_fit.predict(X) train[f"yhat_xgb_{y_column}"] = y_hat return RMSLE(y, y_hat), xgb_fit
COVID19 Global Forecasting (Week 3)
8,797,635
df_train[(df_train['Country_Region'] == 'US')&(df_train['Province_State'] == 'Washington')]<define_variables>
xgb_params = dict( gamma=0.2, learning_rate=0.15, n_estimators=100, max_depth=11, min_child_weight=1, nthread=8, objective="reg:squarederror") x_columns = [ 'DayOfYear', 'cases_growth', 'death_growth', 'Cardiovascular diseases(%)', 'Cancers(%)', 'Diabetes, blood, & endocrine diseases(%)', 'Respiratory diseases(%)', 'Liver disease(%)', 'Diarrhea & common infectious diseases(%)', 'Musculoskeletal disorders(%)', 'HIV/AIDS and tuberculosis(%)', 'Malaria & neglected tropical diseases(%)', 'Nutritional deficiencies(%)', 'pneumonia-death-rates', 'Share of deaths from smoking(%)', 'alcoholic_beverages', 'animal_fats', 'animal_products', 'aquatic_products,_other', 'cereals_-_excluding_beer', 'eggs', 'fish,_seafood', 'fruits_-_excluding_wine', 'meat', 'milk_-_excluding_butter', 'miscellaneous', 'offals', 'oilcrops', 'pulses', 'spices', 'starchy_roots', 'stimulants', 'sugar_&_sweeteners', 'treenuts', 'vegetable_oils', 'vegetables', 'vegetal_products', 'hospital_beds_per10k', 'hospital_density', 'nbr_surgeons', 'nbr_obstetricians', 'nbr_anaesthesiologists', 'medical_doctors_per10k', 'bcg_coverage', 'bcg_year_delta', 'population', 'median age', 'population growth rate', 'birth rate', 'death rate', 'net migration rate', 'maternal mortality rate', 'infant mortality rate', 'life expectancy at birth', 'total fertility rate', 'obesity - adult prevalence rate', 'school_shutdown_1case', 'school_shutdown_10case', 'school_shutdown_50case', 'school_shutdown_1death', 'FF_DayOfYear', 'case1_DayOfYear', 'case10_DayOfYear', 'case50_DayOfYear', 'yhat_logistic_ConfirmedCases', 'yhat_logistic_Fatalities'] xgb_c_rmsle, xgb_c_fit = apply_xgb_model( train, x_columns, "ConfirmedCases", xgb_params) xgb_f_rmsle, xgb_f_fit = apply_xgb_model( train, x_columns, "Fatalities", xgb_params )
COVID19 Global Forecasting (Week 3)
8,797,635
index = 0 for x,y in zip(min_date_sorted.index, min_date_sorted): print(index, x, y) index = index + 1<import_modules>
def interpolate(alpha, x0, x1): return x0 * alpha + x1 *(1 - alpha) def RMSLE_interpolate(alpha, y, x0, x1): return RMSLE(y, interpolate(alpha, x0, x1)) def fit_hybrid( train: pd.DataFrame, y_cols: List[str] = ["ConfirmedCases", "Fatalities"] )-> pd.DataFrame: def fit_one(y_col: str): opt = least_squares( fun=RMSLE_interpolate, args=( train[y_col], train[f"yhat_logistic_{y_col}"], train[f"yhat_xgb_{y_col}"], ), x0=(0.5,), bounds=(( 0.0),(1.0,)) , ) return {f"{y_col}_alpha": opt.x[0], f"{y_col}_cost": opt.cost} result = {} for y_col in y_cols: result.update(fit_one(y_col)) return pd.DataFrame([result]) def predict_hybrid( df: pd.DataFrame, x_col: str = "DayOfYear", y_cols: List[str] = ["ConfirmedCases", "Fatalities"], ): def predict_one(col): df[f"yhat_hybrid_{col}"] = interpolate( df[f"{y_col}_alpha"].to_numpy() , df[f"yhat_logistic_{y_col}"].to_numpy() , df[f"yhat_xgb_{y_col}"].to_numpy() , ) for y_col in y_cols: predict_one(y_col )
COVID19 Global Forecasting (Week 3)
8,797,635
import matplotlib.pyplot as plt<define_variables>
train = pd.merge( train, train.groupby(["Country_Region"], observed=True, sort=False) .apply(lambda x: fit_hybrid(x)) .reset_index() , on=["Country_Region"], how="left", )
COVID19 Global Forecasting (Week 3)
8,797,635
index = 34<filter>
predict_hybrid(train )
COVID19 Global Forecasting (Week 3)
8,797,635
record = df_train[(df_train['Country_Region'] == min_date_sorted.index[index][0])&(df_train['Province_State'] == min_date_sorted.index[index][1])]<train_model>
print( "Confirmed: " f'Logistic\t{RMSLE(train["ConfirmedCases"], train["yhat_logistic_ConfirmedCases"])} ' f'XGBoost\t{RMSLE(train["ConfirmedCases"], train["yhat_xgb_ConfirmedCases"])} ' f'Hybrid\t{RMSLE(train["ConfirmedCases"], train["yhat_hybrid_ConfirmedCases"])} ' f"Fatalities: " f'Logistic\t{RMSLE(train["Fatalities"], train["yhat_logistic_Fatalities"])} ' f'XGBoost\t{RMSLE(train["Fatalities"], train["yhat_xgb_Fatalities"])} ' f'Hybrid\t{RMSLE(train["Fatalities"], train["yhat_hybrid_Fatalities"])} ' )
COVID19 Global Forecasting (Week 3)
8,797,635
x = np.linspace(0, 10, num = 40) y = 3.45 * np.sin(1.334 * x)+ np.random.normal(size = 40) def test(x, a, b): return a * np.sin(b * x) param, param_cov = curve_fit(test, x, y )<feature_engineering>
test = pd.merge( test, train[["Country_Region"] + ['ConfirmedCases_p_0', 'ConfirmedCases_p_1', 'ConfirmedCases_p_2']+ ['Fatalities_p_0','Fatalities_p_1', 'Fatalities_p_2'] + ["Fatalities_alpha"] + ["ConfirmedCases_alpha"]].groupby(['Country_Region'] ).head(1), on="Country_Region", how="left" )
COVID19 Global Forecasting (Week 3)
8,797,635
record = df_train[(df_train['Country_Region'] == min_date_sorted.index[index][0])&(df_train['Province_State'] == min_date_sorted.index[index][1])] record = record[record['ConfirmedCases'] > 0] base_date_object = datetime.strptime('2020-01-22', "%Y-%m-%d" ).date() record['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in record['Date']] <prepare_x_and_y>
predict_logistic(test) test["yhat_xgb_ConfirmedCases"] = xgb_c_fit.predict(test[x_columns].to_numpy()) test["yhat_xgb_Fatalities"] = xgb_f_fit.predict(test[x_columns].to_numpy()) predict_hybrid(test )
COVID19 Global Forecasting (Week 3)
8,797,635
record2 = record[record['Fatalities'] > 0] x = record['days'].values x2 = record2['days'].values y1 = record['ConfirmedCases'].values y2 = record2['Fatalities'].values<normalization>
submission = test[["ForecastId", "yhat_hybrid_ConfirmedCases", "yhat_hybrid_Fatalities"]].round().astype(int ).rename( columns={ "yhat_hybrid_ConfirmedCases": "ConfirmedCases", "yhat_hybrid_Fatalities": "Fatalities", } ) submission["ConfirmedCases"] = np.maximum(0, submission["ConfirmedCases"]) submission["Fatalities"] = np.maximum(0, submission["Fatalities"] )
COVID19 Global Forecasting (Week 3)
8,797,635
def gaussian(x, amp, cen, wid): return amp * exp(-(x-cen)**2 / wid) def test(x, a, b, c): return a*1/(1+exp(-b*(x-c))) def test_linear(x, a, b, c, d, e, f): return a*1/(1+exp(-b*(x-c)))+ d*log(1+exp(x-e)) - f def custom(x, a, b , c, d, e, f, g): return a*1/(1+exp(-(x-b)/c)) *(d*1/(1+exp(-(x-e)/f)) + g )<feature_engineering>
submission.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,797,635
y_max_ = y1[-1] y1_prime = np.diff(y1) y1_prime2 = np.diff(y1_prime) if len(y1)>0 and len(y1_prime)> 0 and len(y1_prime2)> 0: max_slope_index = len(y1_prime)- 1 - list(y1_prime)[::-1].index(max(y1_prime)) max_slope_range =(max_slope_index+1)/len(y1_prime) y_max_ = y1[-1] if max_slope_range < 0.75: if y1_prime[max_slope_index] > 0 and max_slope_range < 0.75 and(((y1_prime[max_slope_index] - max(y1_prime[-2:])) /y1_prime[max_slope_index])< 0.5): y_max_ = y1[-1] pass else: y_max_ = y1[max_slope_index + 1] pass else: y_max_ = y1[-1]<find_best_params>
submission.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,805,462
param<categorify>
warnings.filterwarnings("ignore" )
COVID19 Global Forecasting (Week 3)
8,805,462
y1_pred = test(x,param[0], param[1], param[2]) base_x = range(61,100,1) y1_pred_test = test(base_x, param[0], param[1], param[2]) <feature_engineering>
df_train=pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") df_test=pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") df_sub=pd.read_csv(".. /input/covid19-global-forecasting-week-3/submission.csv" )
COVID19 Global Forecasting (Week 3)
8,805,462
( x2[-1] - x2[0])/2 + x2[0]<compute_test_metric>
print(df_train.shape) print(df_test.shape) print(df_sub.shape )
COVID19 Global Forecasting (Week 3)
8,805,462
y2_pred = test(x2,param2[0], param2[1], param2[2] )<prepare_x_and_y>
print(f"Unique Countries: {len(df_train.Country_Region.unique())}") train_dates=list(df_train.Date.unique()) print(f"Period : {len(df_train.Date.unique())} days") print(f"From : {df_train.Date.min() } To : {df_train.Date.max() }" )
COVID19 Global Forecasting (Week 3)
8,805,462
base_x = range(61,100,1) print(len(base_x)) base_y1 = test(base_x,param[0], param[1], param[2]) base_y2 = test(base_x,param2[0], param2[1], param2[2] )<define_variables>
print(f"Unique Regions: {df_train.shape[0]/75}") df_train.Country_Region.value_counts()
COVID19 Global Forecasting (Week 3)
8,805,462
day_index_pred = 0 diff1_list = [] diff2_list = [] for day in base_x: if day in x: day_index = np.where(x == day) diff1 = y1[day_index] - base_y1[day_index_pred] diff1_list.append(diff1) if day in x2: day_index = np.where(x2 == day) diff2 = y2[day_index] - base_y2[day_index_pred] diff2_list.append(diff2) day_index_pred = day_index_pred + 1 diff1_mean = np.max(diff1_list) diff2_mean = np.max(diff2_list) if np.isnan(diff1_mean): pass else: base_y1_mod = list(np.array(base_y1)+ diff1_mean) if np.isnan(diff2_mean): pass else: base_y2_mod = list(np.array(base_y2)+ diff2_mean) base_y1_pred = [int(n)for n in base_y1_mod] base_y2_pred = [int(m)for m in base_y2_mod]<groupby>
print(f"Number of rows without Country_Region : {df_train.Country_Region.isna().sum() }") df_train["UniqueRegion"]=df_train.Country_Region df_train.UniqueRegion[df_train.Province_State.isna() ==False]=df_train.Province_State+" , "+df_train.Country_Region df_train[df_train.Province_State.isna() ==False]
COVID19 Global Forecasting (Week 3)
8,805,462
test_groups = df_test.groupby(['Country_Region', 'Province_State'] )<feature_engineering>
df_train.drop(labels=["Id","Province_State","Country_Region"], axis=1, inplace=True) df_train
COVID19 Global Forecasting (Week 3)
8,805,462
index = 0 for key_,_ in zip(min_date_sorted.index, min_date_sorted): record = df_train[(df_train['Country_Region'] == key_[0])&(df_train['Province_State'] == key_[1])] record['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in record['Date']] x = record['days'] y1 = record['ConfirmedCases'] y2 = record['Fatalities'] y1_prime = np.diff(y1) stage0 = False stage1 = False stage2 = False stage3 = False count1 = 0 count2 = 0 for start in range(len(y1_prime)-3): if sum(y1_prime[start:start+3])<=12: count1 = count1 + 1 count2 = 0 else: count2 = count2 + 1 count1 = 0 if not stage0 and count2 == 0 and count1 > 2: stage0 = True count1 = 0 if not stage1 and count1 == 0 and count2 > 5: stage0 = True stage1 = True count2 = 0 if stage1 and count2 == 0 and count1 > 3: stage2 = True count1 = 0 if stage2 and count1 == 0 and count2 > 2: stage3 = True count2 = 0 if stage3: print(index, key_) print(y1_prime) plt.plot(x, y1, label = "Confirmed Cases") plt.xlabel('Date') plt.ylabel('Label') plt.title(str(key_[0])+ " " + str(key_[1])+ ' - Confirmed Cases') plt.show() index = index + 1<define_variables>
only_train_dates=set(train_dates)-set(test_dates) print("Only train dates : ",len(only_train_dates)) intersection_dates=set(test_dates)&set(train_dates) print("Intersection dates : ",len(intersection_dates)) only_test_dates=set(test_dates)-set(train_dates) print("Only Test dates : ",len(only_test_dates))
COVID19 Global Forecasting (Week 3)
8,805,462
total_confirmed = 0 total_fatalities = 0 rate = [] max_y1 = [] max_y2 = [] details = [] for index, start_date in zip(min_date_sorted.index, min_date_sorted): print(index, start_date) record = df_train[(df_train['Country_Region'] == index[0])&(df_train['Province_State'] == index[1])] if len(record[record['ConfirmedCases'] > 0])!= 0: record = record[record['ConfirmedCases'] > 0] record2 = record if len(record[record['Fatalities'] > 0])!= 0: record2 = record[record['Fatalities'] > 0] y1 = record['ConfirmedCases'].values y2 = record2['Fatalities'].values b = -1 bad_index = 0 mod_count = 0 y1_copy = list(y1) for a in y1: if a < b: y1[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 b = -1 bad_index = 0 mod_count = 0 y2_copy = list(y2) for a in y2: if a < b: y2[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 y1_prime = np.diff(y1) y1_prime2 = np.diff(y1_prime) y_max_ = y1[-1]*2 + 1500 if len(y1)>0 and len(y1_prime)> 0 and len(y1_prime2)> 0: max_slope_index = len(y1_prime)- 1 - list(y1_prime)[::-1].index(max(y1_prime)) max_slope_range =(max_slope_index+1)/len(y1_prime) if max_slope_range < 0.75: if y1_prime[max_slope_index] > 0 and max_slope_range < 0.5 and(((y1_prime[max_slope_index] - max(y1_prime[-2:])) /y1_prime[max_slope_index])< 0.5): y_max_ = y1[-1]*2 + 1500 pass else: y_max_ = y1[max_slope_index + 1]*2 + 1500 pass else: y_max_ = y1[-1]*2 + 1500 ratio = 0 if y2[-1] > 0: ratio = y1[-1]/y2[-1] else: ratio = y1[-1] max_y1.append(y1[-1]) max_y2.append(y2[-1]) rate.append(ratio) details.append(" ".join([str(x)for x in [y1[-1], " ------- ", y2[-1], " ---- ", ratio, " --------------- ", record['Date'].values[-1], " ---- ", index, "----", list(min_date_sorted.index ).index(index)]])) total_confirmed = total_confirmed + y1[-1] total_fatalities = total_fatalities + y2[-1] print(total_confirmed/total_fatalities )<define_variables>
df_test_temp=pd.DataFrame() df_test_temp["Date"]=df_test.Date df_test_temp["ConfirmedCases"]=0.0 df_test_temp["Fatalities"]=0.0 df_test_temp["UniqueRegion"]=df_test.UniqueRegion df_test_temp["Delta"]=1.0
COVID19 Global Forecasting (Week 3)
8,805,462
for a1, a2, b, c in zip(max_y1, max_y2, rate, details): print(c )<categorify>
%%time final_df=pd.DataFrame(columns=["Date","ConfirmedCases","Fatalities","UniqueRegion"]) for region in df_train.UniqueRegion.unique() : df_temp=df_train[df_train.UniqueRegion==region].reset_index() df_temp["Delta"]=1.0 size_train=df_temp.shape[0] for i in range(1,df_temp.shape[0]): if(df_temp.ConfirmedCases[i-1]>0): df_temp.Delta[i]=df_temp.ConfirmedCases[i]/df_temp.ConfirmedCases[i-1] n=5 delta_avg=df_temp.tail(n ).Delta.mean() delta_list=df_temp.tail(n ).Delta death_rate=df_temp.tail(1 ).Fatalities.sum() /df_temp.tail(1 ).ConfirmedCases.sum() df_test_app=df_test_temp[df_test_temp.UniqueRegion==region] df_test_app=df_test_app[df_test_app.Date>df_temp.Date.max() ] X=np.arange(1,n+1 ).reshape(-1,1) Y=delta_list model=LinearRegression() model.fit(X,Y) df_temp=pd.concat([df_temp,df_test_app]) df_temp=df_temp.reset_index() for i in range(size_train, df_temp.shape[0]): n=n+1 df_temp.Delta[i]=max(1,model.predict(np.array([n] ).reshape(-1,1)) [0]) df_temp.ConfirmedCases[i]=round(df_temp.ConfirmedCases[i-1]*df_temp.Delta[i],0) df_temp.Fatalities[i]=round(death_rate*df_temp.ConfirmedCases[i],0) size_test=df_temp.shape[0]-df_test_temp[df_test_temp.UniqueRegion==region].shape[0] df_temp=df_temp.iloc[size_test:,:] df_temp=df_temp[["Date","ConfirmedCases","Fatalities","UniqueRegion"]] final_df=pd.concat([final_df,df_temp], ignore_index=True) final_df.shape
COVID19 Global Forecasting (Week 3)
8,805,462
for a1, a2, b, c in zip(max_y1, max_y2, rate, details): if(a1 < 100 and a2 < 4 and b < avg): print(c) pass else: pass<create_dataframe>
df_sub.Fatalities=final_df.Fatalities df_sub.ConfirmedCases=final_df.ConfirmedCases df_sub.to_csv("submission.csv", index=None )
COVID19 Global Forecasting (Week 3)
8,733,262
df = pd.DataFrame(columns = ['ForecastId','ConfirmedCases','Fatalities']) df_hr = pd.DataFrame(columns = ['ForecastId', 'Country_Region', 'Province_State', 'Days', 'ConfirmedCases','Fatalities','Date']) <define_variables>
data= pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv" )
COVID19 Global Forecasting (Week 3)
8,733,262
public_start_date = '2020-03-19' public_end_date = '2020-04-01' count = 0 for index, start_date in zip(min_date_sorted.index, min_date_sorted): print(list(min_date_sorted.index ).index(index), index, start_date) record = df_train[(df_train['Country_Region'] == index[0])&(df_train['Province_State'] == index[1])] if len(record[record['ConfirmedCases'] > 0])== 0: pass else: record = record[record['ConfirmedCases'] > 0] base_date_object = datetime.strptime(start_date, "%Y-%m-%d" ).date() public_start_date_object = datetime.strptime(public_start_date, "%Y-%m-%d" ).date() public_end_date_object = datetime.strptime(public_end_date, "%Y-%m-%d" ).date() record['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in record['Date']] public_start_day =(public_start_date_object - base_date_object ).days + 1 public_end_day =(public_end_date_object - base_date_object ).days + 1 if len(record[record['days'] < public_start_day])> 0: record = record[record['days'] < public_start_day] record2 = record if len(record[record['Fatalities'] > 0])!= 0: record2 = record[record['Fatalities'] > 0] x = record['days'].values x2 = record2['days'].values y1 = record['ConfirmedCases'].values y2 = record2['Fatalities'].values b = -1 bad_index = 0 mod_count = 0 for a in y1: if a < b: y1[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 if mod_count > 0: print("*****************") print(list(min_date_sorted.index ).index(index), index) print(mod_count) print(y1) print("*****************") b = -1 bad_index = 0 mod_count = 0 for a in y2: if a < b: y2[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 if mod_count > 0: print("*****************") print(list(min_date_sorted.index ).index(index), index) print(mod_count) print(y2) print("*****************") if len(y1)> 0: y_max_ = y1[-1] y1_prime = np.diff(y1) y1_prime2 = np.diff(y1_prime) if len(y1)>0 and len(y1_prime)> 0 and len(y1_prime2)> 0: max_slope_index = len(y1_prime)- 1 - list(y1_prime)[::-1].index(max(y1_prime)) max_slope_range =(max_slope_index+1)/len(y1_prime) y_max_ = y1[-1] if max_slope_range < 0.75: if y1_prime[max_slope_index] > 0 and max_slope_range < 0.75 and(((y1_prime[max_slope_index] - max(y1_prime[-2:])) /y1_prime[max_slope_index])< 0.5): y_max_ = y1[-1] pass else: y_max_ = y1[max_slope_index + 1] pass else: y_max_ = y1[-1] else: y_max_ = 0 stage0 = False stage1 = False stage2 = False stage3 = False count1 = 0 count2 = 0 for start in range(len(y1_prime)-3): if sum(y1_prime[start:start+3])<=12: count1 = count1 + 1 count2 = 0 else: count2 = count2 + 1 count1 = 0 if not stage0 and count2 == 0 and count1 > 2: stage0 = True count1 = 0 if not stage1 and count1 == 0 and count2 > 5: stage0 = True stage1 = True count2 = 0 if stage1 and count2 == 0 and count1 > 3: stage2 = True count1 = 0 if stage2 and count1 == 0 and count2 > 2: stage3 = True count2 = 0 if stage3: param, param_cov = curve_fit(custom, np.array(x), np.array(y1), maxfev = 100000, bounds=([1, 0, 1, 1, 30, 1, 1], [2, 60, 8, 200, 90, 8, 1400])) y1_pred = custom(x, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) elif index[0] == 'Korea, South': param, param_cov = curve_fit(test_linear, np.array(x), np.array(y1), [y_max_, 0.5,(x[-1] - x[0])/2 + x[0], 50, 45, 0], maxfev = 100000, bounds=([y_max_/2, 0.1, 0, 1, 30, -100],[y_max_*5 + 1500, 1, 150, 100, 100, 1000])) y1_pred = test_linear(x, param[0], param[1], param[2], param[3], param[4], param[5]) elif index[0] in ['US', 'Spain', 'Germany', 'France', 'Iran', 'United Kingdom']: param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_*7, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_*5, 0.1, 0],[y_max_*10 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) elif index[0] == 'China': param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_/2, 0.1, 0],[y_max_*5 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) else: param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_*5, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_*4, 0.1, 0],[y_max_*8 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) param2, param_cov2 = curve_fit(test, np.array(x2), np.array(y2), [y2[-1]/2, 0.5,(x2[-1] - x2[0])/2 + x2[0] - 3], maxfev = 100000, bounds=([y2[-1]/2, 0.1, 0],[y2[-1]*5 + 1, 0.8, 150])) y2_pred = test(x2,param2[0], param2[1], param2[2]) group = test_groups.get_group(index) group['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in group['Date'].values] group = group[group['days'] <= public_end_day] ids = group['ForecastId'].values days = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in group['Date'].values] prev_days = range(public_start_day - 6, public_start_day - 1, 1) if stage3: test_y1_pred_raw = custom(days, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) prev_y1_pred_raw = custom(prev_days, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) elif index[0] == 'Korea, South': test_y1_pred_raw = test_linear(days, param[0], param[1], param[2], param[3], param[4], param[5]) prev_y1_pred_raw = test_linear(prev_days, param[0], param[1], param[2], param[3], param[4], param[5]) elif index[0] in ['US', 'Spain', 'Germany', 'France', 'Iran', 'United Kingdom']: test_y1_pred_raw = test(days, param[0], param[1], param[2]) prev_y1_pred_raw = test(prev_days, param[0], param[1], param[2]) else: test_y1_pred_raw = test(days, param[0], param[1], param[2]) prev_y1_pred_raw = test(prev_days, param[0], param[1], param[2]) test_y2_pred_raw = test(days, param2[0], param2[1], param2[2]) prev_y2_pred_raw = test(prev_days, param2[0], param2[1], param2[2]) day_index_pred = 0 diff1_list = [] diff2_list = [] for day in prev_days: if day in x: day_index = np.where(x == day) diff1 = y1[day_index] - prev_y1_pred_raw[day_index_pred] diff1_list.append(diff1) if day in x2: day_index = np.where(x2 == day) diff2 = y2[day_index] - prev_y2_pred_raw[day_index_pred] diff2_list.append(diff2) day_index_pred = day_index_pred + 1 if len(diff1_list)> 0: diff1_mean = np.max(diff1_list) else: diff1_mean = 0 if len(diff2_list)> 0: diff2_mean = np.max(diff2_list) else: diff2_mean = 0 if np.isnan(diff1_mean): pass else: test_y1_pred_raw = list(np.array(test_y1_pred_raw)+ diff1_mean) if np.isnan(diff2_mean): pass else: test_y2_pred_raw = list(np.array(test_y2_pred_raw)+ diff2_mean) test_y1_pred = test_y1_pred_raw test_y2_pred = test_y2_pred_raw ratio = 0 if y2[-1] > 0: ratio = y1[-1]/y2[-1] else: ratio = y1[-1] train_day_index = days.index(public_start_day)- 1 if(y1[-1] < 100 and y2[-1] < 4 and ratio < avg): for pred_index in range(len(test_y2_pred)) : if pred_index > train_day_index: if test_y2_pred[pred_index] < test_y1_pred[pred_index]/avg: test_y2_pred[pred_index] = test_y1_pred[pred_index]/avg else: for pred_index in range(len(test_y2_pred)) : if pred_index > train_day_index: if test_y2_pred[pred_index] < test_y1_pred[pred_index]/ratio: test_y2_pred[pred_index] = test_y1_pred[pred_index]/ratio test_y1_pred = [int(n)for n in test_y1_pred] test_y2_pred = [int(m)for m in test_y2_pred] local_df_hr = pd.DataFrame(ids, columns=['ForecastId']) print() local_df_hr.insert(1, 'Country_Region', [index[0]]*len(days)) local_df_hr.insert(2, 'Province_State', [index[1]]*len(days)) local_df_hr.insert(3, 'Days', days) local_df_hr.insert(4, 'ConfirmedCases', test_y1_pred) local_df_hr.insert(5, 'Fatalities', test_y2_pred) local_df_hr.insert(6, 'Date', group['Date'].values) local_df = pd.DataFrame(ids, columns=['ForecastId']) local_df.insert(1, 'ConfirmedCases', test_y1_pred) local_df.insert(2, 'Fatalities', test_y2_pred) df = df.append(local_df) df_hr = df_hr.append(local_df_hr) count = count + 1 <define_variables>
data['Province_State']=data['Province_State'].fillna('') test['Province_State']=test['Province_State'].fillna('' )
COVID19 Global Forecasting (Week 3)
8,733,262
private_start_date = '2020-04-02' private_end_date = '2020-04-30' count = 0 for index, start_date in zip(min_date_sorted.index, min_date_sorted): print(list(min_date_sorted.index ).index(index), index, start_date) record = df_train[(df_train['Country_Region'] == index[0])&(df_train['Province_State'] == index[1])] if len(record[record['ConfirmedCases'] > 0])== 0: pass else: record = record[record['ConfirmedCases'] > 0] base_date_object = datetime.strptime(start_date, "%Y-%m-%d" ).date() private_start_date_object = datetime.strptime(private_start_date, "%Y-%m-%d" ).date() private_end_date_object = datetime.strptime(private_end_date, "%Y-%m-%d" ).date() record['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in record['Date']] private_start_day =(private_start_date_object - base_date_object ).days + 1 private_end_day =(private_end_date_object - base_date_object ).days + 1 if len(record[record['days'] < private_start_day])> 0: record = record[record['days'] < private_start_day] record2 = record if len(record[record['Fatalities'] > 0])!= 0: record2 = record[record['Fatalities'] > 0] x = record['days'].values x2 = record2['days'].values y1 = record['ConfirmedCases'].values y2 = record2['Fatalities'].values b = -1 bad_index = 0 mod_count = 0 for a in y1: if a < b: y1[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 if mod_count > 0: print("*****************") print(list(min_date_sorted.index ).index(index), index) print(mod_count) print(y1) print("*****************") b = -1 bad_index = 0 mod_count = 0 for a in y2: if a < b: y2[bad_index] = b mod_count = mod_count + 1 else: b = a bad_index = bad_index + 1 if mod_count > 0: print("*****************") print(list(min_date_sorted.index ).index(index), index) print(mod_count) print(y2) print("*****************") y_max_ = y1[-1] y1_prime = np.diff(y1) y1_prime2 = np.diff(y1_prime) if len(y1)>0 and len(y1_prime)> 0 and len(y1_prime2)> 0: max_slope_index = len(y1_prime)- 1 - list(y1_prime)[::-1].index(max(y1_prime)) max_slope_range =(max_slope_index+1)/len(y1_prime) y_max_ = y1[-1] if max_slope_range < 0.75: if y1_prime[max_slope_index] > 0 and max_slope_range < 0.75 and(((y1_prime[max_slope_index] - max(y1_prime[-2:])) /y1_prime[max_slope_index])< 0.5): y_max_ = y1[-1] pass else: y_max_ = y1[max_slope_index + 1] pass else: y_max_ = y1[-1] stage0 = False stage1 = False stage2 = False stage3 = False count1 = 0 count2 = 0 for start in range(len(y1_prime)-3): if sum(y1_prime[start:start+3])<=12: count1 = count1 + 1 count2 = 0 else: count2 = count2 + 1 count1 = 0 if not stage0 and count2 == 0 and count1 > 2: stage0 = True count1 = 0 if not stage1 and count1 == 0 and count2 > 5: stage0 = True stage1 = True count2 = 0 if stage1 and count2 == 0 and count1 > 3: stage2 = True count1 = 0 if stage2 and count1 == 0 and count2 > 2: stage3 = True count2 = 0 if stage3: param, param_cov = curve_fit(custom, np.array(x), np.array(y1), maxfev = 100000, bounds=([1, 0, 1, 1, 30, 1, 1], [2, 60, 8, 200, 90, 8, 1400])) y1_pred = custom(x, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) elif index[0] == 'Korea, South': param, param_cov = curve_fit(test_linear, np.array(x), np.array(y1), [y_max_, 0.5,(x[-1] - x[0])/2 + x[0], 50, 45, 0], maxfev = 100000, bounds=([y_max_/2, 0.1, 0, 1, 30, -100],[y_max_*5 + 1500, 1, 150, 100, 100, 1000])) y1_pred = test_linear(x, param[0], param[1], param[2], param[3], param[4], param[5]) elif index[0] in ['US', 'Spain', 'Germany', 'France', 'Iran', 'United Kingdom']: param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_*6, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_*5, 0.1, 0],[y_max_*10 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) elif index[0] == 'China': param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_/2, 0.1, 0],[y_max_*5 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) elif index[0] in ['Italy', 'Switzerland']: param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_*3, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_*2, 0.1, 0],[y_max_*5 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) else: param, param_cov = curve_fit(test, np.array(x), np.array(y1), [y_max_*4, 0.5,(x[-1] - x[0])/2 + x[0]], maxfev = 100000, bounds=([y_max_*3, 0.1, 0],[y_max_*8 + 1500, 1, 150])) y1_pred = test(x, param[0], param[1], param[2]) param2, param_cov2 = curve_fit(test, np.array(x2), np.array(y2), [y2[-1]/2, 0.5,(x2[-1] - x2[0])/2 + x2[0] - 3], maxfev = 100000, bounds=([y2[-1]/2, 0.1, 0],[y2[-1]*5 + 1, 0.8, 150])) y2_pred = test(x2,param2[0], param2[1], param2[2]) group = test_groups.get_group(index) group['days'] = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in group['Date'].values] group = group[group['days'] >= private_start_day] ids = group['ForecastId'].values days = [(datetime.strptime(date, "%Y-%m-%d" ).date() - base_date_object ).days + 1 for date in group['Date'].values] prev_days = range(private_start_day - 6, private_start_day - 1, 1) if stage3: test_y1_pred_raw = custom(days, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) prev_y1_pred_raw = custom(prev_days, param[0], param[1], param[2], param[3], param[4], param[5], param[6]) elif index[0] == 'Korea, South': test_y1_pred_raw = test_linear(days, param[0], param[1], param[2], param[3], param[4], param[5]) prev_y1_pred_raw = test_linear(prev_days, param[0], param[1], param[2], param[3], param[4], param[5]) elif index[0] in ['US', 'Spain', 'Germany', 'France', 'Iran', 'United Kingdom']: test_y1_pred_raw = test(days, param[0], param[1], param[2]) prev_y1_pred_raw = test(prev_days, param[0], param[1], param[2]) else: test_y1_pred_raw = test(days, param[0], param[1], param[2]) prev_y1_pred_raw = test(prev_days, param[0], param[1], param[2]) test_y2_pred_raw = test(days, param2[0], param2[1], param2[2]) prev_y2_pred_raw = test(prev_days, param2[0], param2[1], param2[2]) day_index_pred = 0 diff1_list = [] diff2_list = [] for day in prev_days: if day in x: day_index = np.where(x == day) diff1 = y1[day_index] - prev_y1_pred_raw[day_index_pred] diff1_list.append(diff1) if day in x2: day_index = np.where(x2 == day) diff2 = y2[day_index] - prev_y2_pred_raw[day_index_pred] diff2_list.append(diff2) day_index_pred = day_index_pred + 1 if len(diff1_list)> 0: diff1_mean = np.max(diff1_list) else: diff1_mean = 0 if len(diff2_list)> 0: diff2_mean = np.max(diff2_list) else: diff2_mean = 0 if np.isnan(diff1_mean): pass else: test_y1_pred_raw = list(np.array(test_y1_pred_raw)+ diff1_mean) if np.isnan(diff2_mean): pass else: test_y2_pred_raw = list(np.array(test_y2_pred_raw)+ diff2_mean) test_y1_pred = test_y1_pred_raw test_y2_pred = test_y2_pred_raw ratio = 0 if y2[-1] > 0: ratio = y1[-1]/y2[-1] else: ratio = y1[-1] train_day_index = days.index(private_start_day)- 1 if(y1[-1] < 100 and y2[-1] < 4 and ratio < avg): for pred_index in range(len(test_y2_pred)) : if pred_index > train_day_index: if test_y2_pred[pred_index] < test_y1_pred[pred_index]/avg: test_y2_pred[pred_index] = test_y1_pred[pred_index]/avg else: for pred_index in range(len(test_y2_pred)) : if pred_index > train_day_index: if test_y2_pred[pred_index] < test_y1_pred[pred_index]/ratio: test_y2_pred[pred_index] = test_y1_pred[pred_index]/ratio test_y1_pred = [int(n)for n in test_y1_pred] test_y2_pred = [int(m)for m in test_y2_pred] local_df_hr = pd.DataFrame(ids, columns=['ForecastId']) local_df_hr.insert(1, 'Country_Region', [index[0]]*len(days)) local_df_hr.insert(2, 'Province_State', [index[1]]*len(days)) local_df_hr.insert(3, 'Days', days) local_df_hr.insert(4, 'ConfirmedCases', test_y1_pred) local_df_hr.insert(5, 'Fatalities', test_y2_pred) local_df_hr.insert(6, 'Date', group['Date'].values) local_df = pd.DataFrame(ids, columns=['ForecastId']) local_df.insert(1, 'ConfirmedCases', test_y1_pred) local_df.insert(2, 'Fatalities', test_y2_pred) df = df.append(local_df) df_hr = df_hr.append(local_df_hr) count = count + 1 <sort_values>
import matplotlib.pyplot as plt
COVID19 Global Forecasting (Week 3)
8,733,262
df = df.sort_values(by=['ForecastId'], ascending=True) df_hr = df_hr.sort_values(by=['ForecastId'], ascending=True )<save_to_csv>
datetime_str = '01/22/20 00:00:00' datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S') data['days']=pd.to_datetime(data['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D') test['days']=pd.to_datetime(test['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D' )
COVID19 Global Forecasting (Week 3)
8,733,262
df.to_csv('submission.csv', index=False) df_hr.to_csv('hr_submission.csv', index=False )<import_modules>
data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:].sort_values(by="Date" )
COVID19 Global Forecasting (Week 3)
8,733,262
import numpy as np import pandas as pd import matplotlib.pyplot as plt<define_variables>
data.to_csv("train_1.csv") test.to_csv("test_1.csv")
COVID19 Global Forecasting (Week 3)
8,733,262
EMPTY_VAL = "EMPTY_VAL" NAN_VAL = "NaN" def get_state(state, country): if state == EMPTY_VAL: return country if state == NAN_VAL: return country return state<compute_test_metric>
from statsmodels.tsa.arima_model import ARIMA
COVID19 Global Forecasting (Week 3)
8,733,262
def calc_score(y_true, y_pred): y_true[y_true<0] = 0 score = metrics.mean_squared_error(np.log(y_true.clip(0, 1e10)+1), np.log(y_pred[:]+1)) **0.5 return score<load_from_csv>
data['Date']=pd.to_datetime(data['Date']) test['Date']=pd.to_datetime(test['Date'] )
COVID19 Global Forecasting (Week 3)
8,733,262
PATH_WEEK2='/kaggle/input/covid19-global-forecasting-week-2' df_train = pd.read_csv(f'{PATH_WEEK2}/train.csv') df_test = pd.read_csv(f'{PATH_WEEK2}/test.csv') df_train.rename(columns={'Country_Region':'Country'}, inplace=True) df_test.rename(columns={'Country_Region':'Country'}, inplace=True) df_train.rename(columns={'Province_State':'State'}, inplace=True) df_test.rename(columns={'Province_State':'State'}, inplace=True) df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True) y1_Train = df_train.iloc[:, -2] y2_Train = df_train.iloc[:, -1] <data_type_conversions>
pd.DataFrame(data.loc[data['Country_Region']=='Afghanistan',['ConfirmedCases']] ).reset_index(drop=True )
COVID19 Global Forecasting (Week 3)
8,733,262
X_Train = df_train.copy() X_Train['State'].fillna(EMPTY_VAL, inplace=True) X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : get_state(x['State'], x['Country']), axis=1) X_Train.loc[:, 'Date'] = X_Train.Date.dt.strftime("%m%d") X_Train["Date"] = X_Train["Date"].astype(int) X_Train.head()<data_type_conversions>
data.isna().sum(axis=0 )
COVID19 Global Forecasting (Week 3)
8,733,262
X_Pred = df_test.copy() X_Pred['State'].fillna(EMPTY_VAL, inplace=True) X_Pred['State'] = X_Pred.loc[:, ['State', 'Country']].apply(lambda x : get_state(x['State'], x['Country']), axis=1) X_Pred.loc[:, 'Date'] = X_Pred.Date.dt.strftime("%m%d") X_Pred["Date"] = X_Pred["Date"].astype(int) X_Pred.head()<categorify>
data['ConfirmedCases'][data['Country_Region']==''][51:]
COVID19 Global Forecasting (Week 3)
8,733,262
le = preprocessing.LabelEncoder() X_Train.Country = le.fit_transform(X_Train.Country) X_Train['State'] = le.fit_transform(X_Train['State']) X_Train.head()<categorify>
data['ConfirmedCases'][data['Country_Region']=='India'].value_counts()
COVID19 Global Forecasting (Week 3)
8,733,262
X_Pred.Country = le.fit_transform(X_Pred.Country) X_Pred['State'] = le.fit_transform(X_Pred['State']) X_Pred.head()<create_dataframe>
pd.DataFrame(data.loc[data['Country_Region']=='India',['ConfirmedCases']] )
COVID19 Global Forecasting (Week 3)
8,733,262
filterwarnings('ignore') le = preprocessing.LabelEncoder() n_estimators = 5000 n_poly_degree = 5 countries = X_Train.Country.unique() df_out_xgb = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) df_out_lgb = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) df_out_ply = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in countries: states = X_Train.loc[X_Train.Country == country, :].State.unique() for state in states: X_Train_CS = X_Train.loc[(X_Train.Country == country)&(X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y_Train_CS_Cases = X_Train_CS.loc[:, 'ConfirmedCases'] y_Train_CS_Fatal = X_Train_CS.loc[:, 'Fatalities'] X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']] X_Train_CS.Country = le.fit_transform(X_Train_CS.Country) X_Train_CS['State'] = le.fit_transform(X_Train_CS['State']) X_Pred_CS = X_Pred.loc[(X_Pred.Country == country)&(X_Pred.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_Pred_CS_Id = X_Pred_CS.loc[:, 'ForecastId'] X_Pred_CS = X_Pred_CS.loc[:, ['State', 'Country', 'Date']] X_Pred_CS.Country = le.fit_transform(X_Pred_CS.Country) X_Pred_CS['State'] = le.fit_transform(X_Pred_CS['State']) model_xgb_cases = XGBRegressor(n_estimators = n_estimators) model_xgb_cases.fit(X_Train_CS, y_Train_CS_Cases) y_xgb_cases_pred = model_xgb_cases.predict(X_Pred_CS) model_xgb_fatal = XGBRegressor(n_estimators = n_estimators) model_xgb_fatal.fit(X_Train_CS, y_Train_CS_Fatal) y_xgb_fatal_pred = model_xgb_fatal.predict(X_Pred_CS) model_lgb_cases = XGBRegressor(n_estimators = n_estimators) model_lgb_cases.fit(X_Train_CS, y_Train_CS_Cases) y_lgb_cases_pred = model_lgb_cases.predict(X_Pred_CS) model_lgb_fatal = XGBRegressor(n_estimators = n_estimators) model_lgb_fatal.fit(X_Train_CS, y_Train_CS_Fatal) y_lgb_fatal_pred = model_lgb_fatal.predict(X_Pred_CS) pl_cases_pred = np.poly1d(np.polyfit(X_Train_CS.Date, y_Train_CS_Cases, n_poly_degree)) y_ply_cases_pred = pl_cases_pred(X_Pred_CS.Date) pl_fatal_pred = np.poly1d(np.polyfit(X_Train_CS.Date, y_Train_CS_Fatal, n_poly_degree)) y_ply_fatal_pred = pl_fatal_pred(X_Pred_CS.Date) df_xgb = pd.DataFrame({'ForecastId': X_Pred_CS_Id, 'ConfirmedCases': y_xgb_cases_pred, 'Fatalities': y_xgb_fatal_pred}) df_lgb = pd.DataFrame({'ForecastId': X_Pred_CS_Id, 'ConfirmedCases': y_lgb_cases_pred, 'Fatalities': y_lgb_fatal_pred}) df_ply = pd.DataFrame({'ForecastId': X_Pred_CS_Id, 'ConfirmedCases': y_ply_cases_pred, 'Fatalities': y_ply_fatal_pred}) df_out_xgb = pd.concat([df_out_xgb, df_xgb], axis=0) df_out_lgb = pd.concat([df_out_lgb, df_lgb], axis=0) df_out_ply = pd.concat([df_out_ply, df_ply], axis=0) <data_type_conversions>
datetime_str = '03/22/20 00:00:00' datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S' )
COVID19 Global Forecasting (Week 3)
8,733,262
df_out_xgb.ForecastId = df_out_xgb.ForecastId.astype('int') df_out_lgb.ForecastId = df_out_lgb.ForecastId.astype('int') df_out_ply.ForecastId = df_out_ply.ForecastId.astype('int' )<create_dataframe>
from datetime import timedelta
COVID19 Global Forecasting (Week 3)
8,733,262
df_out = df_out_ply.copy()<feature_engineering>
import math
COVID19 Global Forecasting (Week 3)
8,733,262
df_out['ConfirmedCases'] =(1/4)*(df_out_xgb['ConfirmedCases'] + df_out_lgb['ConfirmedCases'])+(1/2)* df_out_ply['ConfirmedCases'] df_out['Fatalities'] =(1/4)*(df_out_xgb['Fatalities'] + df_out_lgb['Fatalities'])+(1/2)* df_out_ply['Fatalities']<data_type_conversions>
def rmsle(y, y_pred): assert len(y)== len(y_pred) terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(terms_to_sum)*(1.0/len(y)))** 0.5
COVID19 Global Forecasting (Week 3)
8,733,262
df_out['ConfirmedCases'] = df_out['ConfirmedCases'].round().astype(int) df_out['Fatalities'] = df_out['Fatalities'].round().astype(int )<save_to_csv>
def evaluate_arima_model(X,forecast_days, arima_order): X=[x for x in X] train_size = int(len(X)* 0.8) train, test1 = X[0:train_size], X[train_size:] history=train model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) predictions = list() predictions =model_fit.forecast(steps=len(test1)) [0] model = ARIMA(X, order=arima_order) model_fit = model.fit(disp=0) if np.isnan(model_fit.forecast(steps=forecast_days)[0] ).sum() >0: return float('inf') error = rmsle(test1, predictions) return error
COVID19 Global Forecasting (Week 3)
8,733,262
df_out.to_csv('submission.csv', index=False )<import_modules>
def evaluate_models(dataset,forcast_days, p_values, d_values, q_values): best_score, best_cfg = float("inf"),(0,0,0) for p in p_values: for d in d_values: for q in q_values: order =(p,d,q) try: mse = evaluate_arima_model(dataset,forcast_days, order) if mse < best_score: best_score, best_cfg = mse, order except: continue print('Best ARIMA%s MSE=%.3f' %(best_cfg, best_score)) return best_cfg, best_score
COVID19 Global Forecasting (Week 3)
8,733,262
import numpy as np import pandas as pd import lightgbm as lgb import matplotlib.pyplot as plt from scipy.optimize import curve_fit<load_from_csv>
warnings.filterwarnings('ignore' )
COVID19 Global Forecasting (Week 3)
8,733,262
train = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv') test = pd.read_csv('.. /input/covid19-global-forecasting-week-2/test.csv') world_population = pd.read_csv('.. /input/population-by-country-2020/population_by_country_2020.csv' )<merge>
test['ConfirmedCases']=0 test['Fatalities']=0
COVID19 Global Forecasting (Week 3)
8,733,262
world_population = world_population[['Country(or dependency)', 'Population(2020)']] world_population.columns = ['Country(or dependency)', 'Population'] world_population.loc[world_population['Country(or dependency)']=='United States', 'Country(or dependency)'] = 'US' train = train.merge(world_population, left_on='Country_Region', right_on='Country(or dependency)', how='left') test = test.merge(world_population, left_on='Country_Region', right_on='Country(or dependency)', how='left' )<feature_engineering>
sliced_data=data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:]
COVID19 Global Forecasting (Week 3)
8,733,262
train['State_Country'] = [s + '_' + c if s == s else c for s,c in train[['Province_State', 'Country_Region']].values ] test['State_Country'] = [s + '_' + c if s == s else c for s,c in test[['Province_State', 'Country_Region']].values ]<feature_engineering>
from pandas import read_csv from pandas import datetime from matplotlib import pyplot from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from time import time from sklearn.metrics import mean_squared_error
COVID19 Global Forecasting (Week 3)
8,733,262
train.loc[(train['Date']=='2020-03-24')&(train['State_Country']=='France'),'ConfirmedCases'] = 22654 train.loc[(train['Date']=='2020-03-24')&(train['State_Country']=='France'),'Fatalities'] = 1000<groupby>
country='India' state='' sliced_data=data.loc[(data['Province_State']==state)&(data['Country_Region']==country),:] test_sliced=test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:] print(sliced_data) sliced_data=sliced_data.drop_duplicates() sliced_data=sliced_data.reset_index(drop=True) sliced_data=sliced_data.sort_values(by='Date') if sliced_data.loc[sliced_data['ConfirmedCases']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['ConfirmedCases']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['ConfirmedCases'].to_list() print('history') print(history) if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(10),range(7),range(7)) preds=[] model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions=pd.DataFrame() predictions['Date']=dates predictions['ConfirmedCases']=preds test_sliced=test_sliced.merge(sliced_data[['Date','ConfirmedCases']], on='Date',how='left') test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] test_sliced=test_sliced.merge(predictions, on='Date',how='left') test_sliced['ConfirmedCases_x'][test_sliced['ConfirmedCases_x'].isna() ]=test_sliced['ConfirmedCases_y'][test_sliced['ConfirmedCases_x'].isna() ] test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_x'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] sliced_data_bck=sliced_data.copy() if sliced_data.loc[sliced_data['Fatalities']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['Fatalities']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['Fatalities'].to_list() if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(5),range(5),range(5)) preds=[] model=None model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions_f=pd.DataFrame() predictions_f['Date']=dates predictions_f['Fatalities']=preds test_sliced=test_sliced.merge(sliced_data_bck[['Date','Fatalities']], on='Date',how='left') test_sliced['Fatalities']=test_sliced['Fatalities_y'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test_sliced=test_sliced.merge(predictions_f, on='Date',how='left') test_sliced['Fatalities_x'][test_sliced['Fatalities_x'].isna() ]=test_sliced['Fatalities_y'][test_sliced['Fatalities_x'].isna() ] test_sliced['Fatalities']=test_sliced['Fatalities_x'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test=test.merge(test_sliced,on='ForecastId',how='left') test['ConfirmedCases_x'][test['ConfirmedCases_y'].notna() ]=test['ConfirmedCases_y'][test['ConfirmedCases_y'].notna() ] test['Fatalities_x'][test['Fatalities_y'].notna() ]=test['Fatalities_y'][test['Fatalities_y'].notna() ] new_cols=[] for col in test.columns: if col[-2:]=='_y': del test[col] elif col[-2:]=='_x': new_cols.append(col[:-2]) else: new_cols.append(col) test.columns=new_cols test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:].head() plt.plot('Date', 'ConfirmedCases', data=sliced_data, color='blue', linewidth=2) plt.plot('Date','ConfirmedCases',data=test_sliced,color='orange',linewidth=2) plt.plot('Date', 'Fatalities', data=sliced_data, color='purple', linewidth=2) plt.plot('Date','Fatalities',data=test_sliced,color='red',linewidth=2) plt.show()
COVID19 Global Forecasting (Week 3)
8,733,262
for metric in ['ConfirmedCases', 'Fatalities']: dict_values = train.groupby('State_Country')[metric].apply(np.array ).to_dict() for country in dict_values: if sum(np.diff(dict_values[country])< 0): print(country, metric) new_val = [dict_values[country][-1]] for val_1, val_2 in zip(dict_values[country][1:][::-1], dict_values[country][:-1][::-1]): if val_2 <= new_val[-1]: new_val += [val_2] else: new_val += [new_val[-1]] new_val = np.array(new_val[::-1]) train.loc[train.State_Country == country, metric] = new_val<feature_engineering>
test.loc[(test['Province_State']==state)&(test['Country_Region']==country),['Country_Region','Date','ConfirmedCases','Fatalities']]
COVID19 Global Forecasting (Week 3)
8,733,262
train['max_case'] = train['State_Country'].map(train.groupby('State_Country' ).ConfirmedCases.max()) train['pct_c'] = train.ConfirmedCases.pct_change()<predict_on_test>
test['ConfirmedCases']=0 test['Fatalities']=0
COVID19 Global Forecasting (Week 3)
8,733,262
def predict_cc(data, country, len_predict, metrics, len_intersection): country_data = data[data['State_Country']==country] if country_data[metrics].values.max() > 1000: start_people = 2 else: start_people = 0 country_data = country_data.iloc[dict_case_date[country][start_people]:, :] x_data = range(len(country_data.index)) y_data = country_data[metrics].values if len(x_data)<= 1: y_min = 0 y_max = 100 y_val = np.arange(len(x_data), len(x_data)+ len_predict) return [-1, -1, -1], log_curve(y_val, 0.3, 30, 100) else: add_max =(1 + country_data.pct_c.values[-3:].mean())**(1 / 2) if y_data.max() > 10000: add_min =(1 + country_data.pct_c.values[-3:].mean())**(1 / 5) elif y_data.max() > 1000: add_min =(1 + country_data.pct_c.values[-3:].mean())**(1 / 3) else: add_min =(1 + country_data.pct_c.values[-3:].mean())**(1 / 2) add_max =(1 + country_data.pct_c.values[-3:].mean())**(1) day_left = max(10, 50 - len(x_data)) y_min = y_data[-1] *(add_min ** day_left) if y_min > country_data['Population'].values[0] * 0.05: y_min = country_data['Population'].values[0] * 0.05 y_max = y_data[-1] *(add_max ** day_left + 30) if y_max > country_data['Population'].values[0] * 0.15: y_max = country_data['Population'].values[0] * 0.15 if add_min == add_max or add_max != add_max: y_min = y_data[-1] y_max = max(100, y_data[-1] *(1.3 ** 30)) if y_max > 100000 and y_data[-1] < 10000: y_max = y_data[-1] * 10 y_min = y_data[-1] popt, pcov = curve_fit(log_curve, x_data, y_data, bounds=([0.1, 10, y_min ],[0.35, 50, y_max]), p0=[0.2,30,(y_min + y_max)/ 2], maxfev=10000) y_val = np.arange(len(x_data)- len_intersection, len(x_data)+ len_predict - len_intersection) return popt, log_curve(y_val, popt[0], popt[1], popt[2]) def log_curve(x, k, x_0, ymax): return ymax /(1 + np.exp(-k*(x-x_0)) )<groupby>
for country in countries: for state in countries[country]: print('Country : '+country,'State : '+state) sliced_data=data.loc[(data['Province_State']==state)&(data['Country_Region']==country),:] test_sliced=test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:] sliced_data=sliced_data.drop_duplicates() sliced_data=sliced_data.reset_index(drop=True) sliced_data=sliced_data.sort_values(by='Date') if sliced_data.loc[sliced_data['ConfirmedCases']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['ConfirmedCases']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['ConfirmedCases'].to_list() if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(5),range(5),range(5)) preds=[] model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions=pd.DataFrame() predictions['Date']=dates predictions['ConfirmedCases']=preds test_sliced=test_sliced.merge(sliced_data[['Date','ConfirmedCases']], on='Date',how='left') test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] test_sliced=test_sliced.merge(predictions, on='Date',how='left') test_sliced['ConfirmedCases_x'][test_sliced['ConfirmedCases_x'].isna() ]=test_sliced['ConfirmedCases_y'][test_sliced['ConfirmedCases_x'].isna() ] test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_x'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] sliced_data_bck=sliced_data.copy() if sliced_data.loc[sliced_data['Fatalities']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['Fatalities']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['Fatalities'].to_list() if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(5),range(5),range(5)) preds=[] model=None model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions_f=pd.DataFrame() predictions_f['Date']=dates predictions_f['Fatalities']=preds test_sliced=test_sliced.merge(sliced_data_bck[['Date','Fatalities']], on='Date',how='left') test_sliced['Fatalities']=test_sliced['Fatalities_y'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test_sliced=test_sliced.merge(predictions_f, on='Date',how='left') test_sliced['Fatalities_x'][test_sliced['Fatalities_x'].isna() ]=test_sliced['Fatalities_y'][test_sliced['Fatalities_x'].isna() ] test_sliced['Fatalities']=test_sliced['Fatalities_x'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test=test.merge(test_sliced,on='ForecastId',how='left') test['ConfirmedCases_x'][test['ConfirmedCases_y'].notna() ]=test['ConfirmedCases_y'][test['ConfirmedCases_y'].notna() ] test['Fatalities_x'][test['Fatalities_y'].notna() ]=test['Fatalities_y'][test['Fatalities_y'].notna() ] new_cols=[] for col in test.columns: if col[-2:]=='_y': del test[col] elif col[-2:]=='_x': new_cols.append(col[:-2]) else: new_cols.append(col) test.columns=new_cols print(test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:].head()) plt.plot('Date', 'ConfirmedCases', data=sliced_data, color='blue', linewidth=2) plt.plot('Date','ConfirmedCases',data=test_sliced,color='orange',linewidth=2) plt.plot('Date', 'Fatalities', data=sliced_data, color='purple', linewidth=2) plt.plot('Date','Fatalities',data=test_sliced,color='red',linewidth=2) plt.show()
COVID19 Global Forecasting (Week 3)
8,733,262
metrics = 'ConfirmedCases' dict_values = train.groupby('State_Country')[metrics].apply(np.array ).to_dict() dict_case_date = {} for country in dict_values: dict_case_date[country] = [] for case in [1, 10, 100, 250, 500, 1000, 2500, 5000]: try: dict_case_date[country] += [np.where(dict_values[country] >= case)[0][0]] except: dict_case_date[country] += [-1] dict_case_date[country] = np.array(dict_case_date[country]) dict_predict = {} data_train = train.copy() data_val = test.copy() len_predict = data_val[data_val.State_Country == country].shape[0] len_intersection = len(set(data_train.Date.unique())& set(data_val.Date.unique())) for country in train.State_Country.unique() : popt, pred_values = predict_cc(data_train, country, len_predict, metrics, len_intersection) dict_predict[country] = pred_values<feature_engineering>
test.to_csv("test_2.csv" )
COVID19 Global Forecasting (Week 3)
8,733,262
<find_best_params><EOS>
sumb=pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv') output=pd.DataFrame() output['ForecastId']=test['ForecastId'].astype(int) output['ConfirmedCases']=test['ConfirmedCases'].astype(int) output['Fatalities']=test['Fatalities'].astype(int) output.to_csv('submission.csv',index=False)
COVID19 Global Forecasting (Week 3)
3,775,898
<SOS> metric: MacroFScore Kaggle data source: iwildcam-2019-fgvc6<prepare_output>
import os from glob import glob import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2
iWildCam 2019 - FGVC6
3,775,898
predicted_class_indices=np.argmax(predict,axis=1 )<load_pretrained>
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv') sub = pd.read_csv('.. /input/sample_submission.csv') train_dir = '.. /input/train_images' test_dir = '.. /input/test_images'
iWildCam 2019 - FGVC6
3,775,898
with open('.. /input/iwildcam2020-classes-dict/cid_invert_dict.pkl', mode='rb')as fin: cid_invert_dict=pickle.load(fin )<categorify>
print('Total images for train {0}'.format(len(os.listdir(train_dir)))) print('Total images for test {0}'.format(len(os.listdir(test_dir))))
iWildCam 2019 - FGVC6
3,775,898
def transform(x): return cid_invert_dict[str(x)]<save_to_csv>
classes_wild = {0: 'empty', 1: 'deer', 2: 'moose', 3: 'squirrel', 4: 'rodent', 5: 'small_mammal', \ 6: 'elk', 7: 'pronghorn_antelope', 8: 'rabbit', 9: 'bighorn_sheep', 10: 'fox', 11: 'coyote', \ 12: 'black_bear', 13: 'raccoon', 14: 'skunk', 15: 'wolf', 16: 'bobcat', 17: 'cat',\ 18: 'dog', 19: 'opossum', 20: 'bison', 21: 'mountain_goat', 22: 'mountain_lion'} train_df['classes_wild'] = train_df['category_id'].apply(lambda cw: classes_wild[cw] )
iWildCam 2019 - FGVC6
3,775,898
sam_sub_df["Category"] = predicted_class_indices sam_sub_df["Category"]=sam_sub_df["Category"].apply(transform) sam_sub_df = sam_sub_df.loc[:,["Id", "Category"]] sam_sub_df.to_csv("submission.csv",index=False) sam_sub_df.head()<set_options>
train_df['classes_wild'].value_counts()
iWildCam 2019 - FGVC6
3,775,898
ImageFile.LOAD_TRUNCATED_IMAGES = True %matplotlib inline<define_search_model>
import torch import torch.nn as nn import torchvision from torchvision.transforms import transforms from torch.utils.data import Dataset, DataLoader from torchvision import models from sklearn.model_selection import train_test_split
iWildCam 2019 - FGVC6
3,775,898
def kaggle_commit_logger(str_to_log, need_print = True): if need_print: print(str_to_log) os.system('echo ' + str_to_log )<load_from_disk>
train_df = train_df[['file_name','category_id']] train_df.head()
iWildCam 2019 - FGVC6
3,775,898
with open(r'/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_train_annotations.json')as json_file: train_data = json.load(json_file )<prepare_output>
category = train_df['category_id'].unique() encoder = dict([(v, k)for v, k in zip(category, range(len(category)))]) decoder = dict([(v, k)for k, v in encoder.items() ]) print(pd.DataFrame({ 'Before encoding': list(encoder.keys()), 'After encoding': list(encoder.values())} ).to_string(index=False)) def encoding(labels): return encoder[int(labels)]
iWildCam 2019 - FGVC6
3,775,898
df_train = pd.DataFrame({'id': [item['id'] for item in train_data['annotations']], 'category_id': [item['category_id'] for item in train_data['annotations']], 'image_id': [item['image_id'] for item in train_data['annotations']], 'file_name': [item['file_name'] for item in train_data['images']]}) df_train.head()<create_dataframe>
train_df['category_id'] = train_df['category_id'].apply(encoding) train_df['category_id'].value_counts()
iWildCam 2019 - FGVC6
3,775,898
df_image = pd.DataFrame.from_records(train_data['images']) indices = [] for _id in df_image[df_image['location'] == 537]['id'].values: indices.append(df_train[ df_train['image_id'] == _id ].index) for the_index in indices: df_train = df_train.drop(df_train.index[the_index] )<load_pretrained>
class WildDataset(Dataset): def __init__(self, df, img_dir, transforms=None): self.df = df self.img_dir = img_dir self.transforms = transforms def __len__(self): return len(self.df) def __getitem__(self, idx): img_name = os.path.join(self.img_dir, self.df.iloc[idx, 0]) image = cv2.imread(img_name) label = self.df.iloc[idx, 1] if self.transforms is not None: image = self.transforms(image) return image, label
iWildCam 2019 - FGVC6
3,775,898
%%time indices = [] for i in df_train['file_name']: try: Image.open('/kaggle/input/iwildcam-2020-fgvc7/train/' + i) except: print(i) df_train.drop(df_train.loc[df_train['file_name']==i].index, inplace=True )<load_from_disk>
train, val = train_test_split(train_df, stratify=train_df.category_id, test_size=0.1) len(train), len(val )
iWildCam 2019 - FGVC6
3,775,898
with open(r'/kaggle/input/iwildcam-2020-fgvc7/iwildcam2020_test_information.json')as f: test_data = json.load(f )<prepare_output>
aug = transforms.Compose([transforms.ToPILImage() , transforms.Resize(( 32, 32)) , transforms.ToTensor() , transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])
iWildCam 2019 - FGVC6
3,775,898
df_test = pd.DataFrame.from_records(test_data['images']) df_test.head()<define_variables>
dataset_train = WildDataset(df=train, img_dir=train_dir, transforms=aug) dataset_valid = WildDataset(df=val, img_dir=train_dir, transforms=aug) train_loader = DataLoader(dataset=dataset_train, batch_size=24, shuffle=True) val_loader = DataLoader(dataset_valid, batch_size=24, shuffle=False, num_workers=0 )
iWildCam 2019 - FGVC6
3,775,898
batch_size = 64 IMG_SIZE = 64 N_EPOCHS = 1 ID_COLNAME = 'file_name' ANSWER_COLNAME = 'category_id' TRAIN_IMGS_DIR = r'.. /input/iwildcam-2020-fgvc7/train/' TEST_IMGS_DIR = r'.. /input/iwildcam-2020-fgvc7/test/'<split>
num_epochs = 2 num_classes = 14 learning_rate = 0.02 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
iWildCam 2019 - FGVC6
3,775,898
train_df, test_df = train_test_split(df_train[[ID_COLNAME, ANSWER_COLNAME]], test_size = 0.15, shuffle = True )<define_variables>
def conv3x3(in_channels, out_channels, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(ResidualBlock, self ).__init__() self.conv1 = conv3x3(in_channels, out_channels, stride) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(out_channels, out_channels) self.bn2 = nn.BatchNorm2d(out_channels) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=14): super(ResNet, self ).__init__() self.in_channels = 16 self.conv = conv3x3(3, 16) self.bn = nn.BatchNorm2d(16) self.relu = nn.LeakyReLU(inplace=True) self.layer1 = self.make_layer(block, 16, layers[0]) self.layer2 = self.make_layer(block, 32, layers[1], 2) self.layer3 = self.make_layer(block, 64, layers[2], 2) self.layer4 = self.make_layer(block, 128, layers[3], 2) self.avg_pool = nn.AdaptiveAvgPool2d(4) self.fc = nn.Linear(128, num_classes) def make_layer(self, block, out_channels, blocks, stride=1): downsample = None if(stride != 1)or(self.in_channels != out_channels): downsample = nn.Sequential( conv3x3(self.in_channels, out_channels, stride=stride), nn.BatchNorm2d(out_channels)) layers = [] layers.append(block(self.in_channels, out_channels, stride, downsample)) self.in_channels = out_channels for i in range(1, blocks): layers.append(block(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): out = self.conv(x) out = self.bn(out) out = self.relu(out) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.avg_pool(out) out = out.view(out.size(0), -1) out = self.fc(out) return out def create_resnet_model(output_dim: int = 1)-> nn.Module: model = ResNet(ResidualBlock, [2, 2, 2, 2]) in_features = model.fc.in_features model.avg_pool = nn.AdaptiveAvgPool2d(1) model.fc = nn.Linear(in_features, output_dim) model = model.to(device) return model model = create_resnet_model(output_dim=num_classes) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adamax(model.parameters() , lr=learning_rate )
iWildCam 2019 - FGVC6
3,775,898
NUM_CLASSES = len(CLASSES_TO_USE) NUM_CLASSES<define_variables>
total_step = len(train_loader) for epoch in range(num_epochs): for i,(images, labels)in enumerate(train_loader): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if(i+1)% 100 == 0: print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}" .format(epoch+1, num_epochs, i+1, total_step, loss.item()))
iWildCam 2019 - FGVC6
3,775,898
CLASSMAP = dict( [(i, j)for i, j in zip(CLASSES_TO_USE, range(NUM_CLASSES)) ] ) CLASSMAP<define_variables>
model.eval() with torch.no_grad() : correct = 0 total = 0 for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct +=(predicted == labels ).sum().item() print('Accuracy of the model on the 19630 test images: {} %'.format(100 * correct / total))
iWildCam 2019 - FGVC6
3,775,898
REVERSE_CLASSMAP = dict([(v, k)for k, v in CLASSMAP.items() ]) REVERSE_CLASSMAP<choose_model_class>
sub = pd.read_csv('.. /input/sample_submission.csv') sub['Id'] = sub['Id'] + '.jpg' sub.head()
iWildCam 2019 - FGVC6
3,775,898
model = models.densenet121(pretrained='imagenet' )<choose_model_class>
dataset_valid = WildDataset(df=sub, img_dir=test_dir, transforms=aug) test_loader = DataLoader(dataset_valid, batch_size=24, shuffle=False )
iWildCam 2019 - FGVC6
3,775,898
new_head = torch.nn.Linear(model.classifier.in_features, NUM_CLASSES) model.classifier = new_head<load_from_disk>
model.eval() preds = [] for images, labels in test_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) predicted for i in predicted.detach().cpu().numpy() : preds.append(i)
iWildCam 2019 - FGVC6
3,775,898
model.load_state_dict(torch.load('.. /input/iwild2020-torch/model'))<categorify>
sub['Predicted'] = preds sub['Id'] = sub['Id'].str[:-4] sub.head()
iWildCam 2019 - FGVC6
3,775,898
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_augmentation = transforms.Compose([ transforms.Resize(( IMG_SIZE,IMG_SIZE)) , transforms.ToTensor() , normalizer, ]) val_augmentation = transforms.Compose([ transforms.Resize(( IMG_SIZE,IMG_SIZE)) , transforms.ToTensor() , normalizer, ] )<create_dataframe>
def decoding(labels): return decoder[int(labels)]
iWildCam 2019 - FGVC6
3,775,898
class IMetDataset(Dataset): def __init__(self, df, images_dir, n_classes = NUM_CLASSES, id_colname = ID_COLNAME, answer_colname = ANSWER_COLNAME, label_dict = CLASSMAP, transforms = None ): self.df = df self.images_dir = images_dir self.n_classes = n_classes self.id_colname = id_colname self.answer_colname = answer_colname self.label_dict = label_dict self.transforms = transforms def __len__(self): return self.df.shape[0] def __getitem__(self, idx): cur_idx_row = self.df.iloc[idx] img_id = cur_idx_row[self.id_colname] img_name = img_id img_path = os.path.join(self.images_dir, img_name) img = Image.open(img_path) if self.transforms is not None: img = self.transforms(img) if self.answer_colname is not None: label = torch.zeros(( self.n_classes,), dtype=torch.float32) label[self.label_dict[cur_idx_row[self.answer_colname]]] = 1.0 return img, label else: return img, img_id<create_dataframe>
sub['Predicted'] = sub['Predicted'].apply(decoding) sub.head() sub.to_csv('submission.csv', index=False )
iWildCam 2019 - FGVC6
3,775,898
train_dataset = IMetDataset(train_df, TRAIN_IMGS_DIR, transforms = train_augmentation) test_dataset = IMetDataset(test_df, TRAIN_IMGS_DIR, transforms = val_augmentation )<load_pretrained>
sub['Predicted'].value_counts()
iWildCam 2019 - FGVC6
3,508,271
BS = 24 train_loader = DataLoader(train_dataset, batch_size=BS, shuffle=True, num_workers=2, pin_memory=True) test_loader = DataLoader(test_dataset, batch_size=BS, shuffle=False, num_workers=2, pin_memory=True )<compute_test_metric>
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy') x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy') y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy') print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255. x_test /= 255 .
iWildCam 2019 - FGVC6
3,508,271
def f1_score(y_true, y_pred, threshold=0.5): return fbeta_score(y_true, y_pred, 1, threshold) def fbeta_score(y_true, y_pred, beta, threshold, eps=1e-9): beta2 = beta**2 y_pred = torch.ge(y_pred.float() , threshold ).float() y_true = y_true.float() true_positive =(y_pred * y_true ).sum(dim=1) precision = true_positive.div(y_pred.sum(dim=1 ).add(eps)) recall = true_positive.div(y_true.sum(dim=1 ).add(eps)) return torch.mean( (precision*recall ). div(precision.mul(beta2)+ recall + eps ). mul(1 + beta2))<train_model>
datagen_train = ImageDataGenerator( width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) datagen_train.fit(x_train )
iWildCam 2019 - FGVC6
3,508,271
def train_one_epoch(model, train_loader, criterion, optimizer, steps_upd_logging = 250): model.train() ; total_loss = 0.0 train_tqdm = tqdm_notebook(train_loader) for step,(features, targets)in enumerate(train_tqdm): try: features, targets = cuda(features), cuda(targets) optimizer.zero_grad() logits = model(features) loss = criterion(logits, targets) loss.backward() optimizer.step() total_loss += loss.item() if(step + 1)% steps_upd_logging == 0: logstr = f'Train loss on step {step + 1} was {round(total_loss /(step + 1), 5)}' train_tqdm.set_description(logstr) kaggle_commit_logger(logstr, need_print=False) except: pass return total_loss /(step + 1 )<choose_model_class>
from keras.applications import DenseNet121 from keras.layers import * from keras.models import Sequential
iWildCam 2019 - FGVC6
3,508,271
criterion = torch.nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters() , lr=0.0005) sheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=3 )<train_model>
conv_base = DenseNet121(weights='imagenet',include_top=False,input_shape=(32,32,3))
iWildCam 2019 - FGVC6
3,508,271
%%time TRAIN_LOGGING_EACH = 500 train_losses = [] valid_losses = [] valid_f1s = [] best_model_f1 = 0.0 best_model = None best_model_ep = 0 for epoch in range(1, N_EPOCHS + 1): ep_logstr = f"Starting {epoch} epoch..." kaggle_commit_logger(ep_logstr) tr_loss = train_one_epoch(model, train_loader, criterion, optimizer, TRAIN_LOGGING_EACH) train_losses.append(tr_loss) tr_loss_logstr = f'Mean train loss: {round(tr_loss,5)}' kaggle_commit_logger(tr_loss_logstr) valid_loss, valid_f1 = validate(model, test_loader, criterion) valid_losses.append(valid_loss) valid_f1s.append(valid_f1) val_loss_logstr = f'Mean valid loss: {round(valid_loss,5)}' kaggle_commit_logger(val_loss_logstr) sheduler.step(valid_loss) if valid_f1 >= best_model_f1: best_model = model best_model_f1 = valid_f1 best_model_ep = epoch<save_model>
model = Sequential() model.add(conv_base) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(14, activation='softmax')) model.summary()
iWildCam 2019 - FGVC6
3,508,271
torch.save(best_model.state_dict() , 'model' )<find_best_params>
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
iWildCam 2019 - FGVC6
3,508,271
bestmodel_logstr = f'Best f1 is {round(best_model_f1, 5)} on epoch {best_model_ep}' kaggle_commit_logger(bestmodel_logstr )<load_from_csv>
str_ = 'Traning Started' os.system('echo '+str_ )
iWildCam 2019 - FGVC6
3,508,271
SAMPLE_SUBMISSION_DF = pd.read_csv(r'.. /input/iwildcam-2020-fgvc7/sample_submission.csv') SAMPLE_SUBMISSION_DF.head()<feature_engineering>
batch_size = 128 epochs = 25 checkpoint = ModelCheckpoint( 'model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto' ) history = model.fit( x=x_train, y=y_train, batch_size=64, epochs=10, callbacks=[checkpoint], validation_split=0.1 )
iWildCam 2019 - FGVC6
3,508,271
SAMPLE_SUBMISSION_DF.rename(columns={'Id':'file_name','Category':'category_id'}, inplace=True) SAMPLE_SUBMISSION_DF['file_name'] = SAMPLE_SUBMISSION_DF['file_name'] + '.jpg' SAMPLE_SUBMISSION_DF.head()<create_dataframe>
model.load_weights('model.h5' )
iWildCam 2019 - FGVC6
3,508,271
subm_dataset = IMetDataset(SAMPLE_SUBMISSION_DF, TEST_IMGS_DIR, transforms = val_augmentation, answer_colname=None )<load_pretrained>
pred = model.predict_classes(x_test,verbose=1 )
iWildCam 2019 - FGVC6
3,508,271
SUMB_BS = 48 subm_dataloader = DataLoader(subm_dataset, batch_size=SUMB_BS, shuffle=False, pin_memory=True )<find_best_params>
sam_sub = pd.read_csv('.. /input/iwildcam-2019-fgvc6/sample_submission.csv') sam_sub.head()
iWildCam 2019 - FGVC6
3,508,271
<find_best_params><EOS>
output = np.array(np.concatenate(( _id, pred), 1)) output = pd.DataFrame(output,columns = ["Id","Predicted"]) output.to_csv('submission.csv',index = False)
iWildCam 2019 - FGVC6