kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,993,081
SVC_model = SVC(probability=True) SVC_model.fit(X_train,y_train )<predict_on_test>
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'CoachName_W', 'CoachName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'CoachName_L', 'CoachName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'CoachName_W', 'CoachName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'CoachName_1', 'CoachName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
SVC_predict = SVC_model.predict(X_test) SVC_score = SVC_model.score(X_test,y_test) print("Support Vector Classifier Score :",SVC_score )<compute_test_metric>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
print("Accuracy Score of Support Vector Classifier SVC Model:") print(metrics.accuracy_score(y_test,SVC_predict)) print(" ","Classification Report:") print(metrics.classification_report(y_test,SVC_predict),' ' )<train_model>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
decisionTreeModel = DecisionTreeClassifier(max_leaf_nodes=17, random_state=0) decisionTreeModel.fit(X_train, y_train )<predict_on_test>
categoricals = ["CoachName_1", "CoachName_2", "TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
decisionTree_predict = decisionTreeModel.predict(X_test) decisionTree_score = decisionTreeModel.score(X_test,y_test) print("Decision Tree Classifier Score :",decisionTree_score )<compute_test_metric>
target = 'result' features = data.columns.values.tolist() features.remove(target )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
print("Accuracy Score of Decision Tree Classifier Model:") print(metrics.accuracy_score(y_test,decisionTree_predict)) print(" ","Classification Report:") print(metrics.classification_report(y_test,decisionTree_predict),' ' )<train_model>
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
Random_forest = RandomForestClassifier(n_estimators=17) Random_forest.fit(X_train,y_train )<predict_on_test>
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
randomForest_predict = Random_forest.predict(X_test) randomForest_score = Random_forest.score(X_test,y_test) print("Random Forest Score :",randomForest_score )<compute_test_metric>
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
print("Accuracy Score of Random Forest Classifier Model:") print(metrics.accuracy_score(y_test,randomForest_predict)) print(" ","Classification Report:") print(metrics.classification_report(y_test,randomForest_predict),' ' )<train_model>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission_df['Pred'] = test_preds submission_df.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,993,081
<predict_on_test><EOS>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<compute_test_metric>
py.init_notebook_mode(connected=True) pd.set_option('max_columns', 50 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
print("Accuracy Score of KNN Model:") print(metrics.accuracy_score(y_test,KNN_predict)) print(" ","Classification Report:") print(metrics.classification_report(y_test,KNN_predict),' ' )<train_model>
datadir = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament') stage1dir = datadir/'MDataFiles_Stage1'
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
gbk = GradientBoostingClassifier(random_state=101, n_estimators=150,min_samples_split=100, max_depth=6) gbk.fit(X_train, y_train )<predict_on_test>
teams_df = pd.read_csv(stage1dir/'MTeams.csv') print('teams_df', teams_df.shape) teams_df.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
gbk_predict = gbk.predict(X_test) gbk_score = gbk.score(X_test,y_test) print("Gradient Boosting Score :",gbk_score )<compute_test_metric>
seasons_df = pd.read_csv(stage1dir/'MSeasons.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
print("Accuracy Score of Gradient Boosting Model:") print(metrics.accuracy_score(y_test,gbk_predict))<import_modules>
tourney_seeds_df = pd.read_csv(stage1dir/'MNCAATourneySeeds.csv') tourney_seeds_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
from sklearn import ensemble from sklearn.model_selection import GridSearchCV<choose_model_class>
regular_season_results_df = pd.read_csv(stage1dir/'MRegularSeasonCompactResults.csv') tournament_results_df = pd.read_csv(stage1dir/'MNCAATourneyCompactResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
GridList =[ {'n_estimators' : [10, 15, 20, 25, 30, 35, 40], 'max_depth' : [5,10,15, 20]},] randomForest_ensemble = ensemble.RandomForestClassifier(random_state=31, max_features= 3) gridSearchCV = GridSearchCV(randomForest_ensemble,GridList, cv = 5 )<train_on_grid>
sample_submission = pd.read_csv(datadir/'MSampleSubmissionStage1_2020.csv') sample_submission
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
gridSearchCV.fit(X_train,y_train )<compute_train_metric>
regular_season_detailed_results_df = pd.read_csv(stage1dir/'MRegularSeasonDetailedResults.csv') tournament_detailed_results_df = pd.read_csv(stage1dir/'MNCAATourneyDetailedResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
gridSearchCV_predict = gridSearchCV.predict(X_test) gridSearchCV_score = gridSearchCV.score(X_test,y_test) print("Grid SearchCV Score :",gridSearchCV_score )<import_modules>
cities_df = pd.read_csv(stage1dir/'Cities.csv') mgame_cities_df = pd.read_csv(stage1dir/'MGameCities.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
from tabulate import tabulate<predict_on_test>
massey_df = pd.read_csv(stage1dir/'MMasseyOrdinals.csv') massey_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
ids = test_data['PassengerId'] print(len(ids)) predictions = gridSearchCV.predict(test_data )<save_to_csv>
event2015_df = pd.read_csv(datadir/'MEvents2015.csv')
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions } )<save_to_csv>
players_df = pd.read_csv(datadir/'MPlayers.csv') players_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
output.to_csv('submission.csv', index=False )<import_modules>
team_coaches_df = pd.read_csv(stage1dir/'MTeamCoaches.csv') print('team_coaches_df', team_coaches_df.shape) team_coaches_df.iloc[80:85]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
import numpy as np import pandas as pd<load_from_csv>
conferences_df = pd.read_csv(stage1dir/'Conferences.csv') team_conferences_df = pd.read_csv(stage1dir/'MTeamConferences.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
test = pd.read_csv("/kaggle/input/mldl-competition-1/test.csv") train = pd.read_csv("/kaggle/input/mldl-competition-1/train.csv") sampleSabmission = pd.read_csv("/kaggle/input/mldl-competition-1/sampleSubmission.csv" )<import_modules>
team_conferences_df[team_conferences_df['TeamID'] == 1102]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
print(tf.__version__) <prepare_x_and_y>
conference_tourney_games_df = pd.read_csv(stage1dir/'MConferenceTourneyGames.csv') conference_tourney_games_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
column_names = ['Id' 'X','Y','Z','Time'] X_train_orig = train[["X", "Y", "Z"]] X_test_orig = test[["X", "Y", "Z"]] Y_train_orig = train["Time"] print(X_train_orig.shape) print(X_test_orig.shape) print(Y_train_orig.shape) <train_model>
secondary_tourney_teams_df = pd.read_csv(stage1dir/'MSecondaryTourneyTeams.csv') secondary_tourney_teams_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
scaler = MinMaxScaler(feature_range=(-1, 1), copy=True) scaler.fit(X_train_orig) print("Maximum values of X_train(X, Y, Z): " + str(scaler.data_max_)) print("Minimum values of X_train(X, Y, Z): " + str(scaler.data_min_)) X_train_norm = scaler.transform(X_train_orig) X_test_norm = scaler.transform(X_test_orig) <split>
secondary_tourney_results_df = pd.read_csv(stage1dir/'MSecondaryTourneyCompactResults.csv') secondary_tourney_results_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
X_train, X_val, y_train, y_val = train_test_split(X_train_norm, Y_train_orig, test_size=0.2) print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape )<choose_model_class>
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
model = keras.Sequential([ keras.layers.Dense(128, input_dim=3, activation='relu'), keras.layers.Dense(6, activation='relu'), keras.layers.Dense(1, activation="linear") ]) print(model.summary()) <choose_model_class>
tourney_slots_df = pd.read_csv(stage1dir/'MNCAATourneySlots.csv') tourney_seed_round_slots_df = pd.read_csv(stage1dir/'MNCAATourneySeedRoundSlots.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
model.compile(optimizer='adam', loss='MSE', metrics=['accuracy'] )<train_model>
tourney_slots_df[(tourney_slots_df['Season'] == 1985)&(tourney_slots_df['Slot'].str.startswith('R1W')) ]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
history = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=5 )<compute_test_metric>
tournament_results2015_df = tournament_results_df.query("Season >= 2015") tournament_results2015_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
test_loss, test_acc = model.evaluate(X_val, y_val, verbose=0) print(' Test loss:', test_loss) print(' Test accuracy:', test_acc )<predict_on_test>
for key, row in tournament_results2015_df.iterrows() : if row['WTeamID'] < row['LTeamID']: id_name = str(row['Season'])+ '_' + str(row['WTeamID'])+ '_' + str(row['LTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 1.0 else: id_name = str(row['Season'])+ '_' + str(row['LTeamID'])+ '_' + str(row['WTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 0.0
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,132,680
<save_to_csv><EOS>
sample_submission.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<load_from_csv>
YEAR = 2021 STAGE = 2 NCAAM = True NCAAW = False
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
train = pd.read_csv("/kaggle/input/titanic/train.csv") train.head()<load_from_csv>
if NCAAM: DATA_DIR = f'.. /input/ncaam-march-mania-2021/MDataFiles_Stage{STAGE}/' if NCAAW: DATA_DIR = f'.. /input/ncaaw-march-mania-2021/WDataFiles_Stage{STAGE}/'
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
test = pd.read_csv("/kaggle/input/titanic/test.csv") test.head()<train_model>
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
print('Train columns with null values:',train.isnull().sum() , sep = ' ') print("-"*42) print('Test/Validation columns with null values:', test.isnull().sum() ,sep = ' ') print("-"*42 )<feature_engineering>
if YEAR == 2020: tourney_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') elif YEAR == 2021: tourney_result = pd.read_csv(DATA_DIR + 'MNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv(DATA_DIR + 'MNCAATourneySeeds.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
test[test['Fare'].isnull() ] test['Fare'] = test['Fare'].fillna(test['Fare'].mean() )<count_missing_values>
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
print('Number of missing values in Fare column for test set:', test.Fare.isnull().sum() )<feature_engineering>
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
train[train['Embarked'].isnull() ] train['Embarked'] = train['Embarked'].fillna('S' )<count_missing_values>
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
print('Number of missing values in Embarked column for train set:', test.Embarked.isnull().sum() )<categorify>
if YEAR == 2020: season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') elif YEAR == 2021: season_result = pd.read_csv(DATA_DIR + 'MRegularSeasonCompactResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
y = train["Survived"] features = ["Sex", "Embarked", "Pclass", "Fare"] X = pd.get_dummies(train[features]) X_test = pd.get_dummies(test[features]) model = RandomForestClassifier(n_estimators=125, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predictions}) output.to_csv('contract_submission.csv', index=False) print("Your submission was successfully saved!" )<load_from_csv>
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
path = '.. /input/covid19-global-forecasting-week-2/' train = pd.read_csv(path + 'train.csv') column_name_map = { 'Country_Region' : 'country', 'Province_State' : 'state', 'Date' : 'date', 'ConfirmedCases' : 'cases', 'Fatalities' : 'deaths', } train = train.rename(columns = column_name_map) from_date = train['date'].min() to_date = train['date'].max() train['state'].fillna('-', inplace = True) print(train.dtypes )<load_from_csv>
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
path = '.. /input/covid19-global-forecasting-week-2/' test = pd.read_csv(path + 'test.csv') column_name_map = { 'Country_Region' : 'country', 'Province_State' : 'state', 'Date' : 'date', 'ForecastId' : 'id', } test = test.rename(columns = column_name_map) test['state'].fillna('-', inplace = True) regions = test[['state', 'country']].drop_duplicates() regions.head()<feature_engineering>
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
def logistic(xs, l, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(l +(L-l)/(1.+ np.exp(-xp))) else: result.append(l +(L-l)* np.exp(xp)/(1.+ np.exp(xp))) return result def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, "%Y-%m-%d")- dt.datetime.strptime(d2, "%Y-%m-%d") return delta.days<filter>
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True) tourney_win_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
predictions_cases = [] plot = True for index, region in regions.iterrows() : predicted = False st = region['state'] co = region['country'] rdata = train[(train['state']==st)&(train['country']==co)] rtest = test[(test['state']==st)&(test['country']==co)] window = rdata[rdata['cases']>=100]['date'] if(window.count() < 10): window = rdata[rdata['cases']>=10]['date'] if(window.count() >= 10): start_date = window.min() rdata = rdata[rdata['date']>=start_date] t = rdata['date'].values t = [float(date_day_diff(d, start_date)) for d in t] y = rdata['cases'].values try: bounds =([-1e6, -1e6, 0.001, 0.0], [1e6, 1e6, 0.999, t[-1]+10]) popt, pcov = curve_fit(logistic, t, y, bounds = bounds) residuals = y - logistic(t, *popt) ss_res = np.sum(residuals**2) ss_tot = np.sum(( y - np.mean(y)) **2) rs = 1 -(ss_res / ss_tot) if plot: print(st, co) print(popt) print('R squared: ', rs) T = np.arange(0, 60, 1 ).tolist() yfit = logistic(T, *popt) pylab.plot(t, y, 'o') pylab.plot(T, yfit) pylab.show() if rs>=0.95: for index, rt in rtest.iterrows() : tdate = rt['date'] prev_max = 0 if(tdate<=to_date): ca = list(train[(train['date']==tdate)&(train['state']==st)&(train['country']==co)]['cases'].values)[0] prev_max = max(prev_max, ca) else: ca = logistic([date_day_diff(tdate, start_date)], *popt) ca = max(prev_max, ca[0]) prev_max = ca predictions_cases.append(( rt['id'], int(ca))) predicted = True except: pass if not predicted: t = rdata['date'].values start_date = t[0] t = np.array([float(date_day_diff(d, start_date)) for d in t]) y = rdata['cases'].values linreg = LinearRegression() linreg.fit(t[-10:].reshape(-1, 1), y[-10:]) m = linreg.coef_[0] b = linreg.intercept_ if plot: print(st, co) print(linreg.intercept_, linreg.coef_) T = np.arange(0, 90, 1 ).tolist() y_pred = [m*x+b for x in T] pylab.plot(t, y, 'o') pylab.plot(T, y_pred) pylab.show() for index, rt in rtest.iterrows() : tdate = rt['date'] prev_max = 0 if(tdate<=to_date): ca = list(train[(train['date']==tdate)&(train['state']==st)&(train['country']==co)]['cases'].values)[0] prev_max = max(prev_max, ca) else: ca = m*date_day_diff(tdate, start_date)+ b ca = max(ca, prev_max) prev_max = ca predictions_cases.append(( rt['id'], int(ca)) )<prepare_output>
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
data = { 'ForecastId': [pred[0] for pred in predictions_cases], 'ConfirmedCases': [pred[1] for pred in predictions_cases], } df_cases = pd.DataFrame(data, columns = data.keys()) df_cases.head()<filter>
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
predictions_deaths = [] plot = True for index, region in regions.iterrows() : predicted = False st = region['state'] co = region['country'] rdata = train[(train['state']==st)&(train['country']==co)] rtest = test[(test['state']==st)&(test['country']==co)] window = rdata[rdata['deaths']>=100]['date'] if(window.count() < 10): window = rdata[rdata['deaths']>=10]['date'] if(window.count() >= 10): start_date = window.min() rdata = rdata[rdata['date']>=start_date] t = rdata['date'].values t = [float(date_day_diff(d, start_date)) for d in t] y = rdata['deaths'].values try: bounds =([-1e6, -1e6, 0.001, 0.0], [1e6, 1e6, 0.999, t[-1]+16]) popt, pcov = curve_fit(logistic, t, y, bounds = bounds) residuals = y - logistic(t, *popt) ss_res = np.sum(residuals**2) ss_tot = np.sum(( y - np.mean(y)) **2) rs = 1 -(ss_res / ss_tot) if plot: print(st, co) print(popt) print('R squared: ', rs) T = np.arange(0, 60, 1 ).tolist() yfit = logistic(T, *popt) pylab.plot(t, y, 'o') pylab.plot(T, yfit) pylab.show() if rs>=0.95: for index, rt in rtest.iterrows() : tdate = rt['date'] prev_max = 0 if(tdate<=to_date): ca = list(train[(train['date']==tdate)&(train['state']==st)&(train['country']==co)]['deaths'].values)[0] prev_max = max(prev_max, ca) else: ca = logistic([date_day_diff(tdate, start_date)], *popt) ca = max(prev_max, ca[0]) prev_max = ca predictions_deaths.append(( rt['id'], int(ca))) predicted = True except: pass if not predicted: t = rdata['date'].values start_date = t[0] t = np.array([float(date_day_diff(d, start_date)) for d in t]) y = rdata['deaths'].values linreg = LinearRegression() linreg.fit(t[-10:].reshape(-1, 1), y[-10:]) m = linreg.coef_[0] b = linreg.intercept_ if plot: print(st, co) print(linreg.intercept_, linreg.coef_) T = np.arange(0, 90, 1 ).tolist() y_pred = [m*x+b for x in T] pylab.plot(t, y, 'o') pylab.plot(T, y_pred) pylab.show() for index, rt in rtest.iterrows() : tdate = rt['date'] prev_max = 0 if(tdate<=to_date): ca = list(train[(train['date']==tdate)&(train['state']==st)&(train['country']==co)]['deaths'].values)[0] prev_max = max(prev_max, ca) else: ca = m*date_day_diff(tdate, start_date)+ b ca = max(ca, prev_max) prev_max = ca predictions_deaths.append(( rt['id'], int(ca)) )<prepare_output>
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
data = { 'ForecastId': [pred[0] for pred in predictions_deaths], 'Fatalities': [pred[1] for pred in predictions_deaths], } df_deaths = pd.DataFrame(data, columns = data.keys()) df_deaths.head()<save_to_csv>
if YEAR == 2020: test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') elif YEAR == 2021: test_df = pd.read_csv(DATA_DIR + f'MSampleSubmissionStage{STAGE}.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
df_submission = df_cases.join(df_deaths.set_index('ForecastId'), on = 'ForecastId') df_submission.to_csv('submission.csv', index=False) df_submission.head()<load_from_csv>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
warnings.filterwarnings('ignore') train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') PassengerId=test['PassengerId'] all_data = pd.concat([train, test], ignore_index = True )<count_values>
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
train['Survived'].value_counts()<predict_on_test>
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
age_df = all_data[['Age', 'Pclass','Sex','Title']] age_df=pd.get_dummies(age_df) known_age = age_df[age_df.Age.notnull() ].iloc[:,:].values unknown_age = age_df[age_df.Age.isnull() ].iloc[:,:].values y = known_age[:, 0] X = known_age[:, 1:] rfr = RandomForestRegressor(random_state=0, n_estimators=100, n_jobs=-1) rfr.fit(X, y) predictedAges = rfr.predict(unknown_age[:, 1::]) all_data.loc[(all_data.Age.isnull()), 'Age' ] = predictedAges<filter>
X = tourney_result.drop('result', axis=1) y = tourney_result.result
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
all_data[all_data['Embarked'].isnull() ]<feature_engineering>
params_lgb = {'num_leaves': 127, 'min_data_in_leaf': 10, 'objective': 'binary', 'max_depth': -1, 'learning_rate': 0.01, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": 0 } params_xgb = {'colsample_bytree': 0.8, 'learning_rate': 0.0003, 'max_depth': 31, 'subsample': 1, 'objective':'binary:logistic', 'eval_metric':'logloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':5000, 'verbosity':0 }
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
all_data['Embarked'] = all_data['Embarked'].fillna('C' )<filter>
NFOLDS = 5 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_lgb = np.zeros(test_df.shape[0]) y_oof_lgb = np.zeros(X.shape[0]) for fold_n,(train_index, valid_index)in enumerate(splits): X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) y_pred_valid = clf.predict(X_valid) y_oof_lgb[valid_index] = y_pred_valid y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
all_data[all_data['Fare'].isnull() ]<feature_engineering>
NFOLDS = 10 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_xgb = np.zeros(test_df.shape[0]) y_oof_xgb = np.zeros(X.shape[0]) for fold_n,(train_index, valid_index)in enumerate(splits): X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] train_set = xgb.DMatrix(X_train, y_train) val_set = xgb.DMatrix(X_valid, y_valid) test_set = xgb.DMatrix(test_df) clf = xgb.train(params_xgb, train_set,num_boost_round=5000, evals=[(train_set, 'train'),(val_set, 'val')], early_stopping_rounds=100, verbose_eval=100) y_preds_xgb += clf.predict(test_set)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
fare=all_data[(all_data['Embarked'] == "S")&(all_data['Pclass'] == 3)].Fare.median() all_data['Fare']=all_data['Fare'].fillna(fare )<feature_engineering>
if YEAR == 2020: submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') elif YEAR == 2021: submission_df = pd.read_csv(DATA_DIR + f'MSampleSubmissionStage{STAGE}.csv') submission_df['Pred'] = 0.9*y_preds_lgb + 0.1*y_preds_xgb submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,990,667
<create_dataframe><EOS>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,826,653
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<categorify>
%matplotlib inline
COVID19 Global Forecasting (Week 3)
8,826,653
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy() use_brightness_transf = True if use_brightness_transf: inc_brightness_images = brightness(aug_x_train) print("Shape of brightned images {} ".format(inc_brightness_images.shape))<create_dataframe>
def dS_dt(S, I, R_t, T_inf): return -(R_t / T_inf)* I * S def dE_dt(S, E, I, R_t, T_inf, T_inc): return(R_t / T_inf)* I * S -(T_inc**-1)* E def dI_dt(I, E, T_inc, T_inf): return(T_inc**-1)* E -(T_inf**-1)* I def dR_dt(I, T_inf): return(T_inf**-1)* I def SEIR_model(t, y, R_t, T_inf, T_inc): if callable(R_t): reproduction = R_t(t) else: reproduction = R_t S, E, I, R = y S_out = dS_dt(S, I, reproduction, T_inf) E_out = dE_dt(S, E, I, reproduction, T_inf, T_inc) I_out = dI_dt(I, E, T_inc, T_inf) R_out = dR_dt(I, T_inf) return [S_out, E_out, I_out, R_out]
COVID19 Global Forecasting (Week 3)
8,826,653
aug_x_train = X_train_dropna.copy() aug_y_train = y_train_dropna.copy()<concatenate>
train = pd.read_csv('.. /input/covid19-global-forecasting-week-3/train.csv') test = pd.read_csv('.. /input/covid19-global-forecasting-week-3/test.csv') train['Date_datetime'] = train['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d')) )
COVID19 Global Forecasting (Week 3)
8,826,653
shifted_img, shifted_keypoints = shift_image(aug_x_train, aug_y_train, prop=0.1) shifted_img = shifted_img[:,:,:,np.newaxis] print("Shape of shifted images {} ".format(shifted_img.shape))<normalization>
pop_info = pd.read_csv('/kaggle/input/covid19-population-data/population_data.csv') country_pop = pop_info.query('Type == "Country/Region"') province_pop = pop_info.query('Type == "Province/State"') country_lookup = dict(zip(country_pop['Name'], country_pop['Population'])) province_lookup = dict(zip(province_pop['Name'], province_pop['Population']))
COVID19 Global Forecasting (Week 3)
8,826,653
aug_x_train = X_train_dropna.copy() noisy_img = add_noise(aug_x_train) print("Shape of noisy images {} ".format(noisy_img.shape))<prepare_x_and_y>
from scipy.optimize import minimize from sklearn.metrics import mean_squared_log_error, mean_squared_error
COVID19 Global Forecasting (Week 3)
8,826,653
aug_x_train_ffill = X_train_fill_nan.copy().reshape(( -1, 96,96,1)) aug_y_train_ffill = y_train_fill_nan.copy() aug_x_train = X_train_dropna.copy().reshape(( -1, 96,96,1)) aug_y_train = y_train_dropna.copy() aug_x_train = np.concatenate(( aug_x_train, flipped_img, rotated_img_r, rotated_img_l, inc_brightness_images, shifted_img, noisy_img)) aug_y_train = np.concatenate(( aug_y_train, flipped_kepoints, rotated_keypoints_r, rotated_keypoints_l, aug_y_train, shifted_keypoints, aug_y_train)) print("Number of images in the new train dataset using data augmentation :{} {} ".format(aug_x_train.shape, aug_y_train.shape))<split>
T_inc = 5.2 T_inf = 2.9 R_0, cfr, k, L=[ 3.95469597 , 0.04593316 , 3., 15.32328881] def time_varying_reproduction(t): return R_0 /(1 +(t/L)**k )
COVID19 Global Forecasting (Week 3)
8,826,653
x_train_dna, x_validation_dna, y_train_dna, y_validation_dna = split_train_validation(X_train_dropna, y_train_dropna) x_train_ffill, x_validation_ffill, y_train_ffill, y_validation_ffill = split_train_validation(X_train_fill_nan, y_train_fill_nan) x_train_da, x_validation_da, y_train_da, y_validation_da = split_train_validation(aug_x_train, aug_y_train, 0.1 )<train_model>
def eval_model_const(params, data, population, return_solution=False, forecast_days=0): R_0, cfr = params N = population n_infected = data['ConfirmedCases'].iloc[0] max_days = len(data)+ forecast_days s, e, i, r =(N - n_infected)/ N, 0, n_infected / N, 0 def time_varying_reproduction(t): if t > 80: return R_0 * 0.5 else: return R_0 sol = solve_ivp(SEIR_model, [0, max_days], [s, e, i, r], args=(time_varying_reproduction, T_inf, T_inc), t_eval=np.arange(0, max_days)) sus, exp, inf, rec = sol.y y_pred_cases = np.clip(( inf + rec)* N ,0,np.inf) y_true_cases = data['ConfirmedCases'].values y_pred_fat = np.clip(rec*N* cfr, 0, np.inf) y_true_fat = data['Fatalities'].values optim_days = min(20, len(data)) weights = 1 / np.arange(1, optim_days+1)[::-1] msle_cases = mean_squared_log_error(y_true_cases[-optim_days:], y_pred_cases[-optim_days:], weights) msle_fat = mean_squared_log_error(y_true_fat[-optim_days:], y_pred_fat[-optim_days:], weights) msle_final = np.mean([msle_cases, msle_fat]) if return_solution: return msle_final, sol else: return msle_final
COVID19 Global Forecasting (Week 3)
8,826,653
<choose_model_class>
def eval_model_decay(params, data, population, return_solution=False, forecast_days=0): R_0, cfr, k, L = params N = population n_infected = data['ConfirmedCases'].iloc[0] max_days = len(data)+ forecast_days s, e, i, r =(N - n_infected)/ N, 0, n_infected / N, 0 def time_varying_reproduction(t): return R_0 /(1 +(t/L)**k) sol = solve_ivp(SEIR_model, [0, max_days], [s, e, i, r], args=(time_varying_reproduction, T_inf, T_inc), t_eval=np.arange(0, max_days)) sus, exp, inf, rec = sol.y y_pred_cases = np.clip(( inf + rec)* N ,0,np.inf) y_true_cases = data['ConfirmedCases'].values y_pred_fat = np.clip(rec*N* cfr, 0, np.inf) y_true_fat = data['Fatalities'].values optim_days = min(20, len(data)) weights = 1 / np.arange(1, optim_days+1)[::-1] msle_cases = mean_squared_log_error(y_true_cases[-optim_days:], y_pred_cases[-optim_days:], weights) msle_fat = mean_squared_log_error(y_true_fat[-optim_days:], y_pred_fat[-optim_days:], weights) msle_final = np.mean([msle_cases, msle_fat]) if return_solution: return msle_final, sol else: return msle_final
COVID19 Global Forecasting (Week 3)
8,826,653
model_06_01 = Sequential() model_06_01.add(Convolution2D(filters=64, kernel_size=(3,3), padding='same', input_shape=(96,96,1))) model_06_01.add(Activation('relu')) model_06_01.add(Dropout(0.1)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.1)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(BatchNormalization()) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.25)) model_06_01.add(SeparableConv2D(filters=256, kernel_size=(3,3), padding='same', use_bias=False)) model_06_01.add(Activation('relu')) model_06_01.add(MaxPooling2D(pool_size=(2, 2))) model_06_01.add(Dropout(0.25)) model_06_01.add(Flatten()) model_06_01.add(Dense(512,activation='relu')) model_06_01.add(Dropout(0.5)) model_06_01.add(Dense(30)) model_06_01.summary() model_06_01.compile(optimizer = 'adam',loss = 'mean_squared_error', metrics=['mae', 'acc'] )<train_model>
def fit_model_new(data, area_name, initial_guess=[2.2, 0.02, 2, 50], bounds=(( 1, 20),(0, 0.15),(1, 3),(1, 100)) , make_plot=True, decay_mode = None): if area_name in ['France']: train = data.query('ConfirmedCases > 0' ).copy() [:-1] else: train = data.query('ConfirmedCases > 0' ).copy() valid_data = train[-7:].copy() train_data = train[:-7].copy() if len(train_data)== 0: result_zero = np.zeros(( 43)) return pd.DataFrame({'ConfirmedCases':result_zero,'Fatalities':result_zero}), 0 try: population = pop_info[pop_info['Name']==area_name]['Population'].tolist() [0] except IndexError: print('country not in population set, '+str(area_name)) population = 1000000 if area_name == 'US': population = 327200000 if area_name == 'Global': population = 7744240900 cases_per_million = train_data['ConfirmedCases'].max() * 10**6 / population n_infected = train_data['ConfirmedCases'].iloc[0] if cases_per_million < 1: population = population/100 res_const = minimize(eval_model_const, [2.2, 0.02], bounds=(( 1, 20),(0, 0.15)) , args=(train_data, population, False), method='L-BFGS-B') res_decay = minimize(eval_model_decay, initial_guess, bounds=bounds, args=(train_data, population, False), method='L-BFGS-B') test_end = datetime.datetime.strptime('2020-05-07','%Y-%m-%d') test_start = datetime.datetime.strptime('2020-03-26','%Y-%m-%d') test_period =(test_end - test_start ).days train_max = train_data.Date_datetime.max() train_all_max = train.Date_datetime.max() train_min = train_data.Date_datetime.min() add_date = 0 delta_days =(test_end - train_max ).days train_add_time=[] if train_min > test_start: add_date =(train_min-test_start ).days last = train_min-timedelta(add_date) train_add_time = np.arange(last, train_min, dtype='datetime64[D]' ).tolist() train_add_time = pd.to_datetime(train_add_time) dates_all = train_add_time.append(pd.to_datetime(np.arange(train_min, test_end+timedelta(1), dtype='datetime64[D]'))) else: dates_all = pd.to_datetime(np.arange(train_min, test_end+timedelta(1), dtype='datetime64[D]')) if decay_mode is None: if res_const.fun < res_decay.fun : msle, sol = eval_model_const(res_const.x, train_data, population, True, delta_days+add_date) res = res_const else: msle, sol = eval_model_decay(res_decay.x, train_data, population, True, delta_days+add_date) res = res_decay R_0, cfr, k, L = res.x else: if decay_mode =='day_decay': msle, sol = eval_model_const(res_const.x, train_data, population, True, delta_days+add_date) res = res_const else: msle, sol = eval_model_decay(res_decay.x, train_data, population, True, delta_days+add_date) res = res_decay R_0, cfr, k, L = res.x sus, exp, inf, rec = sol.y y_pred = pd.DataFrame({ 'ConfirmedCases': cumsum_signal(np.diff(( inf + rec)* population, prepend=n_infected ).cumsum()), 'Fatalities': cumsum_signal(( np.clip(rec * population * res.x[1], 0, np.inf)).tolist()) }) y_pred_valid = y_pred.iloc[len(train_data):len(train_data)+len(valid_data)] y_pred_test = y_pred.iloc[-(test_period+1):] y_true_valid = valid_data[['ConfirmedCases', 'Fatalities']] valid_msle_cases = mean_squared_log_error(y_true_valid['ConfirmedCases'], y_pred_valid['ConfirmedCases']) valid_msle_fat = mean_squared_log_error(y_true_valid['Fatalities'], y_pred_valid['Fatalities']) valid_msle = np.mean([valid_msle_cases, valid_msle_fat]) if make_plot: if len(res.x)<=2: print(f'Validation MSLE: {valid_msle:0.5f}, using intervention days decay, Reproduction number(R0): {res.x[0]:0.5f}, Fatal rate : {res.x[1]:0.5f}') else: print(f'Validation MSLE: {valid_msle:0.5f}, using Hill decay, Reproduction number(R0): {res.x[0]:0.5f}, Fatal rate : {res.x[1]:0.5f}, K : {res.x[2]:0.5f}, L: {res.x[3]:0.5f}') f = plt.figure(figsize=(16,5)) ax = f.add_subplot(1,2,1) ax.plot(exp, 'y', label='Exposed'); ax.plot(inf, 'r', label='Infected'); ax.plot(rec, 'c', label='Recovered/deceased'); plt.title('SEIR Model Trends') plt.xlabel("Days", fontsize=10); plt.ylabel("Fraction of population", fontsize=10); plt.legend(loc='best'); ax2 = f.add_subplot(1,2,2) xaxis = train_data['Date_datetime'].tolist() xaxis = dates.date2num(xaxis) hfmt = dates.DateFormatter('%m %d') ax2.xaxis.set_major_formatter(hfmt) ax2.plot(np.array(train_data['Date_datetime'], dtype='datetime64[D]'),train_data['ConfirmedCases'],label='Confirmed Cases(train)', c='g') ax2.plot(np.array(train_data['Date_datetime'], dtype='datetime64[D]'), y_pred['ConfirmedCases'][:len(train_data)],label='Cumulative modeled infections', c='r') ax2.plot(np.array(valid_data['Date_datetime'], dtype='datetime64[D]'), y_true_valid['ConfirmedCases'],label='Confirmed Cases(valid)', c='b') ax2.plot(np.array(valid_data['Date_datetime'], dtype='datetime64[D]'),y_pred_valid['ConfirmedCases'],label='Cumulative modeled infections(valid)', c='y') plt.title('Real ConfirmedCase and Predict ConfirmedCase') plt.legend(loc='best'); plt.show() if len(res.x)>2: msle, sol = eval_model_decay(res.x, train_data, population, True, 300) else: msle, sol = eval_model_const(res.x, train_data, population, True, 300) sus, exp, inf, rec = sol.y y_pred = pd.DataFrame({ 'ConfirmedCases': cumsum_signal(np.diff(( inf + rec)* population, prepend=n_infected ).cumsum()), 'Fatalities': cumsum_signal(np.clip(rec, 0, np.inf)* population * res.x[1]) }) start = train_min end = start + timedelta(len(y_pred)) time_array = np.arange(start, end, dtype='datetime64[D]') max_day = numpy.where(inf == numpy.amax(inf)) [0][0] where_time = time_array[max_day] pred_max_day = y_pred['ConfirmedCases'][max_day] xy_show_max_estimation =(where_time, max_day) con = y_pred['ConfirmedCases'] max_day_con = numpy.where(con == numpy.amax(con)) [0][0] max_con = numpy.amax(con) where_time_con = time_array[len(time_array)-50] xy_show_max_estimation_confirmed =(where_time_con, max_con) fig = go.Figure() fig.add_trace(go.Scatter(x=time_array, y=y_pred['ConfirmedCases'].astype(int), mode='lines', line = dict(color='red'), name='Estimation Confirmed Case Start from '+ str(start.date())+ ' to ' +str(end.date()))) fig.add_trace(go.Scatter(x=time_array[:len(train)], y=train['ConfirmedCases'], mode='lines', name='Confirmed case until '+ str(train_all_max.date()),line = dict(color='green', width=4))) fig.add_annotation( x=where_time_con, y=max_con-(max_con/30), showarrow=False, text="Estimate Max Case around:" +str(int(max_con)) , font=dict( color="Blue", size=15 )) fig.add_annotation( x=time_array[len(train)-1], y=train['ConfirmedCases'].tolist() [-1], showarrow=True, text=f"Real Max ConfirmedCase: " +str(int(train['ConfirmedCases'].tolist() [-1]))) fig.add_annotation( x=where_time, y=pred_max_day, text='Infect start decrease from: ' + str(where_time)) fig.update_layout(title='Estimate Confirmed Case ,'+area_name+' Total population ='+ str(int(population)) , legend_orientation="h") fig.show() return y_pred_test, valid_msle
COVID19 Global Forecasting (Week 3)
8,826,653
callbacks = [ EarlyStopping(monitor='val_loss', patience=15, mode='min',restore_best_weights=True, verbose=1), ModelCheckpoint(filepath = 'best_model_06_01.hdf5', monitor='val_mae', verbose=1, save_best_only=True, mode='min') ] hist_06_01 = model_06_01.fit(x_train_da, y_train_da, epochs= 80, batch_size=128, validation_data=(x_validation_da, y_validation_da), callbacks=callbacks, verbose=1 )<train_model>
validation_scores = [] validation_county = [] validation_country = [] for country in tqdm(train['Country_Region'].unique()): country_pd_train = train[train['Country_Region']==country] if len(country_pd_train['Province_State'].unique())<2: predict_test, score = fit_model_new(country_pd_train,country,make_plot=False) if score ==0: print(f'{country} no case') validation_scores.append(score) validation_county.append(country) validation_country.append(country) test.loc[test['Country_Region']==country,'ConfirmedCases'] = predict_test['ConfirmedCases'].tolist() test.loc[test['Country_Region']==country,'Fatalities'] = predict_test['Fatalities'].tolist() else: for state in country_pd_train['Province_State'].unique() : if state != state: state_pd = country_pd_train[country_pd_train['Province_State'].isna() ] predict_test, score = fit_model_new(state_pd,state,make_plot=False) if score ==0: print(f'{country} / {state} no case') validation_scores.append(score) validation_county.append(state) validation_country.append(country) test.loc[(test['Country_Region']==country)&(test['Province_State'].isna()),'ConfirmedCases'] = predict_test['ConfirmedCases'].tolist() test.loc[(test['Country_Region']==country)&(test['Province_State'].isna()),'Fatalities'] = predict_test['Fatalities'].tolist() else: state_pd = country_pd_train[country_pd_train['Province_State']==state] predict_test, score = fit_model_new(state_pd,state,make_plot=False) if score ==0: print(f'{country} / {state} no case') validation_scores.append(score) validation_county.append(state) validation_country.append(country) test.loc[(test['Country_Region']==country)&(test['Province_State']==state),'ConfirmedCases'] = predict_test['ConfirmedCases'].tolist() test.loc[(test['Country_Region']==country)&(test['Province_State']==state),'Fatalities'] = predict_test['Fatalities'].tolist() print(f'Mean validation score: {np.average(validation_scores):0.5f}' )
COVID19 Global Forecasting (Week 3)
8,826,653
<train_model>
validation_scores = pd.DataFrame({'country/state':validation_country,'country':validation_county,'MSLE':validation_scores}) validation_scores.sort_values(by=['MSLE'], ascending=False ).head(20 )
COVID19 Global Forecasting (Week 3)
8,826,653
<normalization>
large_msle = validation_scores[validation_scores['MSLE']>1]
COVID19 Global Forecasting (Week 3)
8,826,653
test_,_, _ = scale_data(test_data_copy, True) test_img = reshape_data(test_ )<predict_on_test>
for country in large_msle['country'].unique() : if(country!= country)==False: country_pd_train = train[train['Country_Region']==country] country_pd_test = test[test['Country_Region']==country] if len(country_pd_train)==0: country_pd_train = train[train['Province_State']==country] country_pd_test = test[test['Province_State']==country] x = np.array(range(len(country_pd_train)) ).reshape(( -1,1)) [:-7] valid_x = np.array(range(len(country_pd_train)) ).reshape(( -1,1)) [-7:] y = country_pd_train['ConfirmedCases'][:-7] valid_y = country_pd_train['ConfirmedCases'][-7:] y_fat = country_pd_train['Fatalities'][:-7] valid_y_fat = country_pd_train['Fatalities'][-7:] model = Pipeline([('poly', PolynomialFeatures(degree=2)) , ('linear', LinearRegression(fit_intercept=False)) ]) model = model.fit(x, y) model_fat = Pipeline([('poly', PolynomialFeatures(degree=2)) , ('linear', LinearRegression(fit_intercept=False)) ]) model_fat = model_fat.fit(x, y_fat) predict_y = model.predict(valid_x) predict_yfat = model_fat.predict(valid_x) score = mean_squared_log_error(np.clip(valid_y,0,np.inf), np.clip(predict_y,0,np.inf)) score_fat = mean_squared_log_error(np.clip(valid_y_fat,0,np.inf), np.clip(predict_yfat,0,np.inf)) score =(score+score_fat)/2 print(f'{country} {score:0.5f}') if score < large_msle[large_msle['country']==country]['MSLE'].tolist() [0]: validation_scores.loc[validation_scores['country']==country,'MSLE'] = score predict_x =(np.array(range(len(country_pd_test)))+50 ).reshape(( -1,1)) test.loc[test['Province_State']==country,'ConfirmedCases'] = model.predict(predict_x) test.loc[test['Province_State']==country,'Fatalities'] = model_fat.predict(predict_x) else: x = np.array(range(len(country_pd_train)) ).reshape(( -1,1)) [:-7] valid_x = np.array(range(len(country_pd_train)) ).reshape(( -1,1)) [-7:] y = country_pd_train['ConfirmedCases'][:-7] valid_y = country_pd_train['ConfirmedCases'][-7:] y_fat = country_pd_train['Fatalities'][:-7] valid_y_fat = country_pd_train['Fatalities'][-7:] model = Pipeline([('poly', PolynomialFeatures(degree=2)) , ('linear', LinearRegression(fit_intercept=False)) ]) model = model.fit(x, y) model_fat = Pipeline([('poly', PolynomialFeatures(degree=2)) , ('linear', LinearRegression(fit_intercept=False)) ]) model_fat = model_fat.fit(x, y_fat) predict_y = model.predict(valid_x) predict_yfat = model_fat.predict(valid_x) score = mean_squared_log_error(np.clip(valid_y,0,np.inf), np.clip(predict_y,0,np.inf)) score_fat = mean_squared_log_error(np.clip(valid_y_fat,0,np.inf), np.clip(predict_yfat,0,np.inf)) score =(score+score_fat)/2 print(f'{country} {score:0.5f}') if score < large_msle[large_msle['country']==country]['MSLE'].tolist() [0]: validation_scores.loc[validation_scores['country']==country,'MSLE'] = score predict_x =(np.array(range(len(country_pd_test)))+50 ).reshape(( -1,1)) test.loc[test['Country_Region']==country,'ConfirmedCases'] = model.predict(predict_x) test.loc[test['Country_Region']==country,'Fatalities'] = model_fat.predict(predict_x )
COVID19 Global Forecasting (Week 3)
8,826,653
best_model = load_model('best_model_06_01.hdf5') pred = best_model.predict(test_img )<save_to_csv>
val_soces = validation_scores['MSLE'].tolist() print(f'Mean validation score: {np.average(val_soces):0.5f}' )
COVID19 Global Forecasting (Week 3)
8,826,653
feature_name = list(idlookup['FeatureName']) image_id = list(idlookup['ImageId']-1) row_id = list(idlookup['RowId']) feature_list = [] for feature in feature_name: feature_list.append(feature_name.index(feature)) predictions = [] for x,y in zip(image_id, feature_list): predictions.append(pred[x][y]) row_id = pd.Series(row_id, name = 'RowId') locations = pd.Series(predictions, name = 'Location') locations = locations*mean_cols +mean_cols submission_result = pd.concat([row_id,locations],axis = 1) submission_result.to_csv('best_perf_15_1600.csv',index = False )<load_from_csv>
submit = pd.read_csv('.. /input/covid19-global-forecasting-week-3/submission.csv') submit['Fatalities'] = test['Fatalities'].astype('float') submit['ConfirmedCases'] = test['ConfirmedCases'].astype('float') submit.to_csv('submission.csv',index=False )
COVID19 Global Forecasting (Week 3)
8,826,653
train = pd.read_csv('.. /input/training/training.csv' )<count_missing_values>
%matplotlib inline %config InlineBackend.figure_format = 'retina'
COVID19 Global Forecasting (Week 3)
8,826,653
print('size of traning data {}'.format(len(train))) print('Missing vlaue col ') print(train.isnull().any().value_counts()) train.isnull().sum().sort_values(ascending=False )<count_missing_values>
def get_cpmp_sub(save_oof=False, save_public_test=False): train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] train test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv') test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] test day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min min_test_val_day = test.day.min() max_test_val_day = train.day.max() max_test_day = test.day.max() num_days = max_test_day + 1 min_test_val_day, max_test_val_day, num_days train['ForecastId'] = -1 test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 debug = False data = pd.concat([train, test[test.day > max_test_val_day][train.columns] ] ).reset_index(drop=True) if debug: data = data[data['geo'] >= 'France_'].reset_index(drop=True) gc.collect() dates = data[data['geo'] == 'France_'].Date.values if 0: gr = data.groupby('geo') data['ConfirmedCases'] = gr.ConfirmedCases.transform('cummax') data['Fatalities'] = gr.Fatalities.transform('cummax') geo_data = data.pivot(index='geo', columns='day', values='ForecastId') num_geo = geo_data.shape[0] geo_data geo_id = {} for i,g in enumerate(geo_data.index): geo_id[g] = i ConfirmedCases = data.pivot(index='geo', columns='day', values='ConfirmedCases') Fatalities = data.pivot(index='geo', columns='day', values='Fatalities') if debug: cases = ConfirmedCases.values deaths = Fatalities.values else: cases = np.log1p(ConfirmedCases.values) deaths = np.log1p(Fatalities.values) def get_dataset(start_pred, num_train, lag_period): days = np.arange(start_pred - num_train + 1, start_pred + 1) lag_cases = np.vstack([cases[:, d - lag_period : d] for d in days]) lag_deaths = np.vstack([deaths[:, d - lag_period : d] for d in days]) target_cases = np.vstack([cases[:, d : d + 1] for d in days]) target_deaths = np.vstack([deaths[:, d : d + 1] for d in days]) geo_ids = np.vstack([geo_ids_base for d in days]) country_ids = np.vstack([country_ids_base for d in days]) return lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days def update_valid_dataset(data, pred_death, pred_case): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data day = days[-1] + 1 new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case]) new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death]) new_target_cases = cases[:, day:day+1] new_target_deaths = deaths[:, day:day+1] new_geo_ids = geo_ids new_country_ids = country_ids new_days = 1 + days return new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_geo_ids, new_country_ids, new_days def fit_eval(lr_death, lr_case, data, start_lag_death, end_lag_death, num_lag_case, fit, score): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], country_ids]) X_death = np.hstack([lag_deaths[:, -num_lag_case:], country_ids]) X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], lag_deaths[:, -num_lag_case:], country_ids]) y_death = target_deaths y_death_prev = lag_deaths[:, -1:] if fit: if 0: keep =(y_death > 0 ).ravel() X_death = X_death[keep] y_death = y_death[keep] y_death_prev = y_death_prev[keep] lr_death.fit(X_death, y_death) y_pred_death = lr_death.predict(X_death) y_pred_death = np.maximum(y_pred_death, y_death_prev) X_case = np.hstack([lag_cases[:, -num_lag_case:], geo_ids]) X_case = lag_cases[:, -num_lag_case:] y_case = target_cases y_case_prev = lag_cases[:, -1:] if fit: lr_case.fit(X_case, y_case) y_pred_case = lr_case.predict(X_case) y_pred_case = np.maximum(y_pred_case, y_case_prev) if score: death_score = val_score(y_death, y_pred_death) case_score = val_score(y_case, y_pred_case) else: death_score = 0 case_score = 0 return death_score, case_score, y_pred_death, y_pred_case def train_model(train, valid, start_lag_death, end_lag_death, num_lag_case, num_val, score=True): alpha = 3 lr_death = Ridge(alpha=alpha, fit_intercept=False) lr_case = Ridge(alpha=alpha, fit_intercept=True) (train_death_score, train_case_score, train_pred_death, train_pred_case, )= fit_eval(lr_death, lr_case, train, start_lag_death, end_lag_death, num_lag_case, fit=True, score=score) death_scores = [] case_scores = [] death_pred = [] case_pred = [] for i in range(num_val): (valid_death_score, valid_case_score, valid_pred_death, valid_pred_case, )= fit_eval(lr_death, lr_case, valid, start_lag_death, end_lag_death, num_lag_case, fit=False, score=score) death_scores.append(valid_death_score) case_scores.append(valid_case_score) death_pred.append(valid_pred_death) case_pred.append(valid_pred_case) if 0: print('val death: %0.3f' % valid_death_score, 'val case: %0.3f' % valid_case_score, 'val : %0.3f' % np.mean([valid_death_score, valid_case_score]), flush=True) valid = update_valid_dataset(valid, valid_pred_death, valid_pred_case) if score: death_scores = np.sqrt(np.mean([s**2 for s in death_scores])) case_scores = np.sqrt(np.mean([s**2 for s in case_scores])) if 0: print('train death: %0.3f' % train_death_score, 'train case: %0.3f' % train_case_score, 'val death: %0.3f' % death_scores, 'val case: %0.3f' % case_scores, 'val : %0.3f' %(( death_scores + case_scores)/ 2), flush=True) else: print('%0.4f' % case_scores, ', %0.4f' % death_scores, '= %0.4f' %(( death_scores + case_scores)/ 2), flush=True) death_pred = np.hstack(death_pred) case_pred = np.hstack(case_pred) return death_scores, case_scores, death_pred, case_pred countries = [g.split('_')[0] for g in geo_data.index] countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) country_ids_base.shape geo_ids_base = np.arange(num_geo ).reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) geo_ids_base = 0.1 * ohe.fit_transform(geo_ids_base) geo_ids_base.shape def val_score(true, pred): pred = np.log1p(np.round(np.expm1(pred)- 0.2)) return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) def val_score(true, pred): return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) start_lag_death, end_lag_death = 14, 6, num_train = 5 num_lag_case = 14 lag_period = max(start_lag_death, num_lag_case) def get_oof(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[start_val], start_val, num_val) train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = train[['Date', 'Id', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[sub.day >= start_val] sub = sub[['Id', 'ConfirmedCases', 'Fatalities']].copy() return sub if save_oof: for start_val_delta, date in zip(range(3, -8, -3), ['2020-03-22', '2020-03-19', '2020-03-16', '2020-03-13']): print(date, end=' ') oof = get_oof(start_val_delta) oof.to_csv('.. /submissions/cpmp-%s.csv' % date, index=None) def get_sub(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[last_train], start_val, num_val) num_lag_case = 14 train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub.fillna(0) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] return sub return sub known_test = train[['geo', 'day', 'ConfirmedCases', 'Fatalities'] ].merge(test[['geo', 'day', 'ForecastId']], how='left', on=['geo', 'day']) known_test = known_test[['ForecastId', 'ConfirmedCases', 'Fatalities']][known_test.ForecastId.notnull() ].copy() known_test unknow_test = test[test.day > max_test_val_day] unknow_test def get_final_sub() : start_val = max_test_val_day + 1 last_train = start_val - 1 num_val = max_test_day - start_val + 1 print(dates[last_train], start_val, num_val) num_lag_case = num_val + 3 train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) (_, _, val_death_preds, val_case_preds )= train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val, score=False) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases print(unknow_test.shape, pred_deaths.shape, pred_cases.shape) sub = unknow_test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] sub = pd.concat([known_test, sub]) return sub if save_public_test: sub = get_sub() else: sub = get_final_sub() return sub
COVID19 Global Forecasting (Week 3)
8,826,653
train.fillna(method='ffill',inplace=True) train.isnull().any().value_counts()<string_transform>
def get_nn_sub() : df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/train.csv") sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-3/test.csv") coo_df = pd.read_csv(".. /input/covid19week1/train.csv" ).rename(columns={"Country/Region": "Country_Region"}) coo_df = coo_df.groupby("Country_Region")[["Lat", "Long"]].mean().reset_index() coo_df = coo_df[coo_df["Country_Region"].notnull() ] loc_group = ["Province_State", "Country_Region"] def preprocess(df): df["Date"] = df["Date"].astype("datetime64[ms]") df["days"] =(df["Date"] - pd.to_datetime("2020-01-01")).dt.days df["weekend"] = df["Date"].dt.dayofweek//5 df = df.merge(coo_df, how="left", on="Country_Region") df["Lat"] =(df["Lat"] // 30 ).astype(np.float32 ).fillna(0) df["Long"] =(df["Long"] // 60 ).astype(np.float32 ).fillna(0) for col in loc_group: df[col].fillna("none", inplace=True) return df df = preprocess(df) sub_df = preprocess(sub_df) print(df.shape) TARGETS = ["ConfirmedCases", "Fatalities"] for col in TARGETS: df[col] = np.log1p(df[col]) NUM_SHIFT = 5 features = ["Lat", "Long"] for s in range(1, NUM_SHIFT+1): for col in TARGETS: df["prev_{}_{}".format(col, s)] = df.groupby(loc_group)[col].shift(s) features.append("prev_{}_{}".format(col, s)) df = df[df["Date"] >= df["Date"].min() + timedelta(days=NUM_SHIFT)].copy() TEST_FIRST = sub_df["Date"].min() TEST_DAYS =(df["Date"].max() - TEST_FIRST ).days + 1 dev_df, test_df = df[df["Date"] < TEST_FIRST].copy() , df[df["Date"] >= TEST_FIRST].copy() def nn_block(input_layer, size, dropout_rate, activation): out_layer = KL.Dense(size, activation=None )(input_layer) out_layer = KL.Activation(activation )(out_layer) out_layer = KL.Dropout(dropout_rate )(out_layer) return out_layer def get_model() : inp = KL.Input(shape=(len(features),)) hidden_layer = nn_block(inp, 64, 0.0, "relu") gate_layer = nn_block(hidden_layer, 32, 0.0, "sigmoid") hidden_layer = nn_block(hidden_layer, 32, 0.0, "relu") hidden_layer = KL.multiply([hidden_layer, gate_layer]) out = KL.Dense(len(TARGETS), activation="linear" )(hidden_layer) model = tf.keras.models.Model(inputs=[inp], outputs=out) return model get_model().summary() def get_input(df): return [df[features]] NUM_MODELS = 10 def train_models(df, save=False): models = [] for i in range(NUM_MODELS): model = get_model() model.compile(loss="mean_squared_error", optimizer=Nadam(lr=1e-4)) hist = model.fit(get_input(df), df[TARGETS], batch_size=2048, epochs=500, verbose=0, shuffle=True) if save: model.save_weights("model{}.h5".format(i)) models.append(model) return models models = train_models(dev_df) prev_targets = ['prev_ConfirmedCases_1', 'prev_Fatalities_1'] def predict_one(df, models): pred = np.zeros(( df.shape[0], 2)) for model in models: pred += model.predict(get_input(df)) /len(models) pred = np.maximum(pred, df[prev_targets].values) pred[:, 0] = np.log1p(np.expm1(pred[:, 0])+ 0.1) pred[:, 1] = np.log1p(np.expm1(pred[:, 1])+ 0.01) return np.clip(pred, None, 15) print([mean_squared_error(dev_df[TARGETS[i]], predict_one(dev_df, models)[:, i])for i in range(len(TARGETS)) ]) def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) def evaluate(df): error = 0 for col in TARGETS: error += rmse(df[col].values, df["pred_{}".format(col)].values) return np.round(error/len(TARGETS), 5) def predict(test_df, first_day, num_days, models, val=False): temp_df = test_df.loc[test_df["Date"] == first_day].copy() y_pred = predict_one(temp_df, models) for i, col in enumerate(TARGETS): test_df["pred_{}".format(col)] = 0 test_df.loc[test_df["Date"] == first_day, "pred_{}".format(col)] = y_pred[:, i] print(first_day, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max()) if val: print(evaluate(test_df[test_df["Date"] == first_day])) y_prevs = [None]*NUM_SHIFT for i in range(1, NUM_SHIFT): y_prevs[i] = temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]].values for d in range(1, num_days): date = first_day + timedelta(days=d) print(date, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max()) temp_df = test_df.loc[test_df["Date"] == date].copy() temp_df[prev_targets] = y_pred for i in range(2, NUM_SHIFT+1): temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]] = y_prevs[i-1] y_pred, y_prevs = predict_one(temp_df, models), [None, y_pred] + y_prevs[1:-1] for i, col in enumerate(TARGETS): test_df.loc[test_df["Date"] == date, "pred_{}".format(col)] = y_pred[:, i] if val: print(evaluate(test_df[test_df["Date"] == date])) return test_df test_df = predict(test_df, TEST_FIRST, TEST_DAYS, models, val=True) print(evaluate(test_df)) for col in TARGETS: test_df[col] = np.expm1(test_df[col]) test_df["pred_{}".format(col)] = np.expm1(test_df["pred_{}".format(col)]) models = train_models(df, save=True) sub_df_public = sub_df[sub_df["Date"] <= df["Date"].max() ].copy() sub_df_private = sub_df[sub_df["Date"] > df["Date"].max() ].copy() pred_cols = ["pred_{}".format(col)for col in TARGETS] sub_df_public = sub_df_public.merge(test_df[["Date"] + loc_group + TARGETS], how="left", on=["Date"] + loc_group) SUB_FIRST = sub_df_private["Date"].min() SUB_DAYS =(sub_df_private["Date"].max() - sub_df_private["Date"].min() ).days + 1 sub_df_private = df.append(sub_df_private, sort=False) for s in range(1, NUM_SHIFT+1): for col in TARGETS: sub_df_private["prev_{}_{}".format(col, s)] = sub_df_private.groupby(loc_group)[col].shift(s) sub_df_private = sub_df_private[sub_df_private["Date"] >= SUB_FIRST].copy() sub_df_private = predict(sub_df_private, SUB_FIRST, SUB_DAYS, models) for col in TARGETS: sub_df_private[col] = np.expm1(sub_df_private["pred_{}".format(col)]) sub_df = sub_df_public.append(sub_df_private, sort=False) sub_df["ForecastId"] = sub_df["ForecastId"].astype(np.int16) return sub_df[["ForecastId"] + TARGETS]
COVID19 Global Forecasting (Week 3)
8,826,653
image_list=[] for i in train['Image']: i=i.split(' ') image_list.append(i) len(image_list) <prepare_x_and_y>
sub1 = get_cpmp_sub() sub1['ForecastId'] = sub1['ForecastId'].astype('int' )
COVID19 Global Forecasting (Week 3)
8,826,653
image_list = np.array(object=image_list,dtype=float )<prepare_x_and_y>
sub1.sort_values("ForecastId", inplace=True) sub2.sort_values("ForecastId", inplace=True) submit.sort_values("ForecastId", inplace=True )
COVID19 Global Forecasting (Week 3)
8,826,653
y_train=train.drop(labels='Image',axis=1) y_train.shape<feature_engineering>
TARGETS = ["ConfirmedCases", "Fatalities"] [np.sqrt(mean_squared_error(np.log1p(sub1[t].values), np.log1p(sub2[t].values)))for t in TARGETS]
COVID19 Global Forecasting (Week 3)
8,826,653
X_train=X_train/255 X_train[1]<choose_model_class>
sub_df = sub1.copy() for t in TARGETS: sub_df[t] = np.expm1(np.log1p(submit[t].values)*0.2 + np.log1p(sub1[t].values)*0.3 + np.log1p(sub2[t].values)*0.5 )
COVID19 Global Forecasting (Week 3)
8,826,653
<choose_model_class><EOS>
sub_df.to_csv("submission.csv", index=False )
COVID19 Global Forecasting (Week 3)
8,785,208
<SOS> metric: MCRMSLE Kaggle data source: covid19-global-forecasting-week-3<train_model>
import plotly.graph_objects as go import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm import time from datetime import datetime from pathlib import Path from sklearn import preprocessing import keras.backend as K from keras.models import Sequential from keras.layers import Dense, LSTM, RNN, Dropout from keras.callbacks import EarlyStopping, ModelCheckpoint from keras import optimizers from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder from sklearn.model_selection import train_test_split
COVID19 Global Forecasting (Week 3)
8,785,208
hist=model.fit(x=X_train,y=y_train,batch_size=128,epochs=200,verbose=2,validation_split=0.2) hist<import_modules>
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/train.csv', parse_dates=['Date']) test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/test.csv',parse_dates=['Date']) submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv') train.tail()
COVID19 Global Forecasting (Week 3)
8,785,208
from sklearn.metrics import r2_score <compute_test_metric>
mask = train['Date'].max() world_cum_confirmed = sum(train[train['Date'] == mask].ConfirmedCases) world_cum_fatal = sum(train[train['Date'] == mask].Fatalities )
COVID19 Global Forecasting (Week 3)
8,785,208
y_pred =model.predict(X_train) score = r2_score(y_train,y_pred) score<load_from_csv>
print('Number of Countires are: ', len(train['Country_Region'].unique())) print('Training dataset ends at: ', mask) print('Number of cumulative confirmed cases worldwide are: ', world_cum_confirmed) print('Number of cumulative fatal cases worldwide are: ', world_cum_fatal )
COVID19 Global Forecasting (Week 3)
8,785,208
test = pd.read_csv('.. /input/test/test.csv' )<prepare_x_and_y>
cum_per_country = train[train['Date'] == mask].groupby(['Date','Country_Region'] ).sum().sort_values(['ConfirmedCases'], ascending=False) cum_per_country[:10]
COVID19 Global Forecasting (Week 3)
8,785,208
y=np.arange(1,501 )<string_transform>
date = train['Date'].unique() cc_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().ConfirmedCases ft_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().Fatalities cc_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().ConfirmedCases ft_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().Fatalities cc_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().ConfirmedCases ft_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().Fatalities cc_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().ConfirmedCases ft_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().Fatalities cc_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().ConfirmedCases ft_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().Fatalities fig = go.Figure() fig.add_trace(go.Scatter(x=date, y=cc_us, name='US')) fig.add_trace(go.Scatter(x=date, y=cc_ity, name='Italy')) fig.add_trace(go.Scatter(x=date, y=cc_spn, name='Spain')) fig.add_trace(go.Scatter(x=date, y=cc_gmn, name='Germany')) fig.add_trace(go.Scatter(x=date, y=cc_frc, name='France')) fig.update_layout(title="Plot of Cumulative Cases for Top 5 countires(except China)", xaxis_title="Date", yaxis_title="Cases") fig.update_xaxes(nticks=30) fig.show()
COVID19 Global Forecasting (Week 3)
8,785,208
image_list=[] for i in test['Image']: i=i.split(' ') image_list.append(i) len(image_list )<prepare_x_and_y>
train.columns = train.columns.str.lower() test.columns = test.columns.str.lower()
COVID19 Global Forecasting (Week 3)
8,785,208
image_list=np.array(image_list,dtype=float) images=image_list.reshape(-1,96,96,1) X_test =images/255.0 <predict_on_test>
train.fillna(' ',inplace=True) test.fillna(' ', inplace=True) train_id = train.pop('id') test_id = test.pop('forecastid') train['cp'] = train['country_region'] + train['province_state'] test['cp'] = test['country_region'] + test['province_state'] train.drop(['province_state','country_region'], axis=1, inplace=True) test.drop(['province_state','country_region'], axis =1, inplace=True )
COVID19 Global Forecasting (Week 3)
8,785,208
predicted_value =model.predict(X_test )<create_dataframe>
df = pd.DataFrame() def create_time_feat(data): df['date']= data['date'] df['hour']=df['date'].dt.hour df['weekofyear']=df['date'].dt.weekofyear df['quarter'] =df['date'].dt.quarter df['month'] = df['date'].dt.month df['dayofyear']=df['date'].dt.dayofyear x=df[['hour','weekofyear','quarter','month','dayofyear']] return x cr_tr = create_time_feat(train) cr_te = create_time_feat(test )
COVID19 Global Forecasting (Week 3)
8,785,208
pv =pd.DataFrame(data=predicted_value) img_show(image_list,pv )<save_to_csv>
train_df = pd.concat([train,cr_tr], axis=1) test_df = pd.concat([test, cr_te], axis =1) test_df.dropna(inplace=True)
COVID19 Global Forecasting (Week 3)
8,785,208
pred = model.predict(X_test) lookid_data = pd.read_csv('.. /input/IdLookupTable.csv') lookid_list = list(lookid_data['FeatureName']) imageID = list(lookid_data['ImageId']-1) pre_list = list(pred) rowid = lookid_data['RowId'] rowid=list(rowid) feature = [] for f in list(lookid_data['FeatureName']): feature.append(lookid_list.index(f)) preded = [] for x,y in zip(imageID,feature): preded.append(pre_list[x][y]) rowid = pd.Series(rowid,name = 'RowId') loc = pd.Series(preded,name = 'Location') submission = pd.concat([rowid,loc],axis = 1) submission.to_csv('submision.csv',index = False )<load_from_csv>
le=LabelEncoder() train_df['cp_le']=le.fit_transform(train_df['cp']) test_df['cp_le']=le.transform(test_df['cp']) train_df.drop(['cp'], axis=1, inplace=True) test_df.drop(['cp'], axis=1, inplace=True )
COVID19 Global Forecasting (Week 3)
8,785,208
df=pd.read_csv('submision.csv' )<set_options>
def create_date_feat(data, cf, ft): for d in data['date'].drop_duplicates() : for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc, cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc, ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 create_date_feat(train_df,'confirmedcases','fatalities' )
COVID19 Global Forecasting (Week 3)
8,785,208
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )<train_model>
def rmsle(pred,true): assert pred.shape[0]==true.shape[0] return K.sqrt(K.mean(K.square(K.log(pred+1)- K.log(true+1)))) es = EarlyStopping(monitor='val_loss', min_delta = 0, verbose=0, patience=10, mode='auto') mc_cf = ModelCheckpoint('model_cf.h5', monitor='val_loss', verbose=0, save_best_only=True) mc_ft = ModelCheckpoint('model_ft.h5', monitor='val_loss', verbose=0, save_best_only=True) def lstm_model(hidden_nodes, second_dim, third_dim): model = Sequential([LSTM(hidden_nodes, input_shape=(second_dim, third_dim), activation='relu'), Dense(64, activation='relu'), Dense(32, activation='relu'), Dense(1, activation='relu')]) model.compile(loss=rmsle, optimizer = 'adam') return model model_cf = lstm_model(10, tr_x_cf.shape[1], tr_x_cf.shape[2]) model_ft = lstm_model(10, tr_x_ft.shape[1], tr_x_ft.shape[2]) history_cf = model_cf.fit(tr_x_cf, tr_y_cf, epochs=200, batch_size=512, validation_data=(val_x_cf,val_y_cf), callbacks=[es,mc_cf]) history_ft = model_ft.fit(tr_x_ft, tr_y_ft, epochs=200, batch_size=512, validation_data=(val_x_ft,val_y_ft), callbacks=[es,mc_ft] )
COVID19 Global Forecasting (Week 3)
8,785,208
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df<train_model>
feat = ['confirmedcases','fatalities','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] c_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'cf_2', 'cf_3', 'cf_4', 'cf_5', 'cf_6', 'cf_7', 'cf_8', 'cf_9','cf_10', 'cf_11', 'cf_12', 'cf_13', 'cf_14'] f_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','ft_1', 'ft_2', 'ft_3', 'ft_4', 'ft_5', 'ft_6', 'ft_7', 'ft_8', 'ft_9','ft_10', 'ft_11', 'ft_12', 'ft_13', 'ft_14'] tot_feat = ['cp_le', 'weekofyear','quarter','month','dayofyear','cf_1', 'ft_1', 'cf_2', 'ft_2', 'cf_3', 'ft_3', 'cf_4', 'ft_4', 'cf_5', 'ft_5', 'cf_6', 'ft_6', 'cf_7', 'ft_7', 'cf_8', 'ft_8', 'cf_9', 'ft_9', 'cf_10', 'ft_10', 'cf_11', 'ft_11', 'cf_12', 'ft_12', 'cf_13', 'ft_13', 'cf_14', 'ft_14'] test_new = test_df.copy().join(pd.DataFrame(columns=feat)) test_mask =(test_df['date'] <= train_df['date'].max()) train_mask =(train_df['date'] >= test_df['date'].min()) test_new.loc[test_mask,feat] = train_df.loc[train_mask, feat].values future_df = pd.date_range(start = train_df['date'].max() +pd.Timedelta(days=1),end=test_df['date'].max() , freq='1D') def create_add_trend_pred(data, cf, ft): for d in future_df: for i in data['cp_le'].drop_duplicates() : org_mask =(data['date']==d)&(data['cp_le']==i) for lag in range(1,15): mask_loc =(data['date']==(d-pd.Timedelta(days=lag)))&(data['cp_le']==i) try: data.loc[org_mask, 'cf_' + str(lag)]=data.loc[mask_loc,cf].values data.loc[org_mask, 'ft_' + str(lag)]=data.loc[mask_loc,ft].values except: data.loc[org_mask, 'cf_' + str(lag)]=0.0 data.loc[org_mask, 'ft_' + str(lag)]=0.0 test_x = data.loc[org_mask,tot_feat] test_x_cf = test_x[c_feat] test_x_cf = test_x_cf.to_numpy().reshape(1,-1) test_x_cf_reshape = test_x_cf.reshape(test_x_cf.shape[0],1,test_x_cf.shape[1]) test_x_ft = test_x[f_feat] test_x_ft = test_x_ft.to_numpy().reshape(1,-1) test_x_ft_reshape = test_x_ft.reshape(test_x_ft.shape[0],1,test_x_ft.shape[1]) data.loc[org_mask, cf] = model_cf.predict(test_x_cf_reshape) data.loc[org_mask, ft] = model_ft.predict(test_x_ft_reshape) create_add_trend_pred(test_new, 'confirmedcases', 'fatalities' )
COVID19 Global Forecasting (Week 3)
8,785,208
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params<load_from_csv>
sub_pred = pd.DataFrame({'ForecastId': test_id, 'ConfirmedCases':test_new['confirmedcases'],'Fatalities':test_new['fatalities']}) sub_pred.to_csv('submission.csv', index=False )
COVID19 Global Forecasting (Week 3)
8,769,803
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )<feature_engineering>
df=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv") test=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv" )
COVID19 Global Forecasting (Week 3)
8,769,803
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()<load_from_csv>
df["Province_State"].fillna("state", inplace = True) df["Country_Region"] = [country_name.replace("'","")for country_name in df["Country_Region"]] print(df.shape," ",df.head() )
COVID19 Global Forecasting (Week 3)
8,769,803
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()<feature_engineering>
X_y=shuffle(new_data) y_cases=X_y['target_infection'] y_fatal=X_y['target_fatal'] X=X_y.drop(['target_infection','target_fatal'],axis=1) X_train_cases, X_test_cases, y_train_cases, y_test_cases = train_test_split(X, y_cases, test_size=0.33) X_train_fatal, X_test_fatal, y_train_fatal, y_test_fatal = train_test_split(X, y_fatal, test_size=0.33) print("Shape of infection train dataset:",(X_train_cases.shape,y_train_cases.shape)) print("Shape of infection test dataset:",(X_test_cases.shape,y_test_cases.shape)) print("Shape of fatal train dataset:",(X_train_fatal.shape,y_train_fatal.shape)) print("Shape of fatal test dataset:",(X_test_fatal.shape,y_test_fatal.shape))
COVID19 Global Forecasting (Week 3)