path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
106201316/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize': (30, 18)})
import matplotlib.pyplot as plt
import os
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
fig = sns.catplot(
data = train, x = 'agecat', y = 'survived', kind = 'bar',
palette = 'deep'
)
fig.set_axis_labels('Ages', 'survival rate', size = 15)
fig.fig.suptitle('survival rate per ages', verticalalignment = 'center', size = 15)
plt.show();
fig = sns.catplot(data=train, x='sex', y='survived', kind='bar', palette='deep')
fig.set_axis_labels('Sex', 'Survival rate', size=15)
fig.fig.suptitle('survival rate per gender', verticalalignment='center', size=15)
plt.show() | code |
106201316/cell_43 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
n_train = train.shape[0]
y = train.survived
df = pd.concat((train, test)).reset_index(drop=True)
df.drop(['survived'], axis=1, inplace=True)
print('shape of all the data is ==>', df.shape)
df.head(2) | code |
106201316/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
test.head(2) | code |
106201316/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
print(f'shape of the training data is {train.shape}\n shape of the test data is {test.shape}') | code |
106201316/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
print('now data looks like')
train.head() | code |
106201316/cell_36 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
def get_others(df):
keep = ['Mr', 'Miss', 'Mrs', 'Master']
titles = list(df['title'].values)
others = [i for i in titles if i not in keep]
df['title'] = df['title'].replace(others, 'other')
return df
"\n \n this function takes any value except ('Mr', 'Miss', 'Mrs', 'Master')\n and append it to a list and replace the values in the data frame with other\n \n ----------\n parameters\n ----------\n just the data set\n \n -------\n returns\n -------\n the data frame with title feature with \n "
for DF in [train, test]:
get_others(DF)
print('now title values is')
test['title'].value_counts() | code |
32068083/cell_9 | [
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_1.png",
"text_html_output_8.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | import datetime
import datetime
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
test_df['ConfirmedCases'] = np.nan
test_df['Fatalities'] = np.nan
test_df = test_df.set_index('Date')
display(test_df)
prediction = {} | code |
32068083/cell_4 | [
"text_html_output_1.png"
] | import datetime
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
display(country_province_df[-30:])
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
px.bar(present_country_df, x='Date', y='New Case', color='Province_State', title=f'United States : DAILY NEW Confirmed cases in ' + province).show() | code |
32068083/cell_2 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
display(train_df.tail())
display(test_df.head())
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
display(country_df[country_df['Country'] == 'France'][80:]) | code |
32068083/cell_11 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | import datetime
import datetime
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
top30_countries = top_country_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Country'].unique()
country_df['prev_cases'] = country_df.groupby('Country')['ConfirmedCases'].shift(1)
country_df['New Case'] = country_df['ConfirmedCases'] - country_df['prev_cases']
country_df['New Case'].fillna(0, inplace=True)
country_df['prev_deaths'] = country_df.groupby('Country')['Fatalities'].shift(1)
country_df['New Death'] = country_df['Fatalities'] - country_df['prev_deaths']
country_df['New Death'].fillna(0, inplace=True)
top30_country_df = country_df[country_df['Country'].isin(top30_countries)]
for country in top30_countries:
present_country_df = top30_country_df[top30_country_df['Country'] == country]
def get_time_series(df, country_name, insert=False):
if df[df['Country'] == country_name]['Province_State'].nunique() > 1:
country_table = df[df['Country'] == country_name]
if insert:
country_df = pd.DataFrame(pd.pivot_table(country_table, values=['ConfirmedCases', 'Fatalities', 'Days'], index='Date', aggfunc=sum).to_records())
return country_df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
return country_table.set_index('Date')[['Province_State', 'ConfirmedCases', 'Fatalities', 'Days']]
df = df[df['Country'] == country_name]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities', 'Days']]
def get_time_series_province(province):
df = full_table[full_table['Province_State'] == province]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
province_country_dfs = {}
no_province_country_dfs = {}
absent_country_in_age_data_dfs = {}
province_country_dfs_list = []
no_province_country_dfs_list = []
absent_country_in_age_data_dfs_list = []
province_countries = train_df[train_df['Province_State'] != 'None*']['Country'].unique()
no_province_countries = train_df[train_df['Province_State'] == 'None*']['Country'].unique()
no_province_countries = [x for x in no_province_countries if x not in province_countries]
for country in province_countries:
province_country_dfs[country] = get_time_series(train_df, country)
for country in no_province_countries:
no_province_country_dfs[country] = get_time_series(train_df, country)
assert len([x for x in all_countries if x not in list(no_province_countries) + list(province_countries)]) == 0
display(train_df[train_df['Country'] == 'Afghanistan'].tail())
print(province_countries)
max_train_date = train_df['Date'].max() | code |
32068083/cell_1 | [
"text_plain_output_1.png"
] | import os
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068083/cell_7 | [
"text_plain_output_1.png"
] | import datetime
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
top30_countries = top_country_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Country'].unique()
country_df['prev_cases'] = country_df.groupby('Country')['ConfirmedCases'].shift(1)
country_df['New Case'] = country_df['ConfirmedCases'] - country_df['prev_cases']
country_df['New Case'].fillna(0, inplace=True)
country_df['prev_deaths'] = country_df.groupby('Country')['Fatalities'].shift(1)
country_df['New Death'] = country_df['Fatalities'] - country_df['prev_deaths']
country_df['New Death'].fillna(0, inplace=True)
top30_country_df = country_df[country_df['Country'].isin(top30_countries)]
for country in top30_countries:
present_country_df = top30_country_df[top30_country_df['Country'] == country]
def get_time_series(df, country_name, insert=False):
if df[df['Country'] == country_name]['Province_State'].nunique() > 1:
country_table = df[df['Country'] == country_name]
if insert:
country_df = pd.DataFrame(pd.pivot_table(country_table, values=['ConfirmedCases', 'Fatalities', 'Days'], index='Date', aggfunc=sum).to_records())
return country_df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
return country_table.set_index('Date')[['Province_State', 'ConfirmedCases', 'Fatalities', 'Days']]
df = df[df['Country'] == country_name]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities', 'Days']]
def get_time_series_province(province):
df = full_table[full_table['Province_State'] == province]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
province_country_dfs = {}
no_province_country_dfs = {}
absent_country_in_age_data_dfs = {}
province_country_dfs_list = []
no_province_country_dfs_list = []
absent_country_in_age_data_dfs_list = []
province_countries = train_df[train_df['Province_State'] != 'None*']['Country'].unique()
no_province_countries = train_df[train_df['Province_State'] == 'None*']['Country'].unique()
no_province_countries = [x for x in no_province_countries if x not in province_countries]
for country in province_countries:
province_country_dfs[country] = get_time_series(train_df, country)
for country in no_province_countries:
no_province_country_dfs[country] = get_time_series(train_df, country)
print([x for x in no_province_countries if x in top30_countries])
print([x for x in province_countries if x in top30_countries])
assert len([x for x in all_countries if x not in list(no_province_countries) + list(province_countries)]) == 0
display(province_country_dfs['United States']) | code |
32068083/cell_8 | [
"text_html_output_29.png",
"text_html_output_27.png",
"text_html_output_28.png",
"text_html_output_10.png",
"text_html_output_22.png",
"text_html_output_16.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_26.png",
"text_html_output_2.png",
"text_html_output_15.png",
"text_html_output_5.png",
"text_html_output_14.png",
"text_html_output_23.png",
"text_html_output_19.png",
"text_html_output_9.png",
"text_html_output_13.png",
"text_html_output_20.png",
"text_html_output_21.png",
"text_html_output_1.png",
"text_html_output_17.png",
"text_html_output_30.png",
"text_html_output_18.png",
"text_html_output_12.png",
"text_html_output_11.png",
"text_html_output_24.png",
"text_html_output_31.png",
"text_html_output_8.png",
"text_html_output_25.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | import tensorflow as tf
import datetime
from numpy import array
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
print(tf.__version__) | code |
32068083/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import datetime
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1], x='value', y='Country', color='variable', barmode='group', title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show() | code |
32068083/cell_14 | [
"text_html_output_29.png",
"text_html_output_27.png",
"text_html_output_28.png",
"text_html_output_10.png",
"text_html_output_22.png",
"text_html_output_16.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_26.png",
"text_html_output_2.png",
"text_html_output_15.png",
"text_html_output_5.png",
"text_html_output_14.png",
"text_html_output_23.png",
"text_html_output_19.png",
"text_html_output_9.png",
"text_html_output_13.png",
"text_html_output_20.png",
"text_html_output_21.png",
"text_html_output_1.png",
"text_html_output_17.png",
"text_html_output_30.png",
"text_html_output_18.png",
"text_html_output_12.png",
"text_html_output_11.png",
"text_html_output_24.png",
"text_html_output_31.png",
"text_html_output_8.png",
"text_html_output_25.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from numpy import array
import datetime
import datetime
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
top30_countries = top_country_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Country'].unique()
country_df['prev_cases'] = country_df.groupby('Country')['ConfirmedCases'].shift(1)
country_df['New Case'] = country_df['ConfirmedCases'] - country_df['prev_cases']
country_df['New Case'].fillna(0, inplace=True)
country_df['prev_deaths'] = country_df.groupby('Country')['Fatalities'].shift(1)
country_df['New Death'] = country_df['Fatalities'] - country_df['prev_deaths']
country_df['New Death'].fillna(0, inplace=True)
top30_country_df = country_df[country_df['Country'].isin(top30_countries)]
for country in top30_countries:
present_country_df = top30_country_df[top30_country_df['Country'] == country]
def get_time_series(df, country_name, insert=False):
if df[df['Country'] == country_name]['Province_State'].nunique() > 1:
country_table = df[df['Country'] == country_name]
if insert:
country_df = pd.DataFrame(pd.pivot_table(country_table, values=['ConfirmedCases', 'Fatalities', 'Days'], index='Date', aggfunc=sum).to_records())
return country_df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
return country_table.set_index('Date')[['Province_State', 'ConfirmedCases', 'Fatalities', 'Days']]
df = df[df['Country'] == country_name]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities', 'Days']]
def get_time_series_province(province):
df = full_table[full_table['Province_State'] == province]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
province_country_dfs = {}
no_province_country_dfs = {}
absent_country_in_age_data_dfs = {}
province_country_dfs_list = []
no_province_country_dfs_list = []
absent_country_in_age_data_dfs_list = []
province_countries = train_df[train_df['Province_State'] != 'None*']['Country'].unique()
no_province_countries = train_df[train_df['Province_State'] == 'None*']['Country'].unique()
no_province_countries = [x for x in no_province_countries if x not in province_countries]
for country in province_countries:
province_country_dfs[country] = get_time_series(train_df, country)
for country in no_province_countries:
no_province_country_dfs[country] = get_time_series(train_df, country)
assert len([x for x in all_countries if x not in list(no_province_countries) + list(province_countries)]) == 0
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
test_df['ConfirmedCases'] = np.nan
test_df['Fatalities'] = np.nan
test_df = test_df.set_index('Date')
prediction = {}
def build_model(n_steps):
model = Sequential()
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, 1)))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='RMSprop', loss='mse')
return model
def split_sequence(sequence, n_steps):
X, y = (list(), list())
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = (sequence[i:end_ix], sequence[end_ix])
X.append(seq_x)
y.append(seq_y)
return (array(X), array(y))
for country in province_countries:
current_country_provinces = province_country_dfs[country]['Province_State'].unique()
for province in current_country_provinces:
current_considered_country_df = province_country_dfs[country][province_country_dfs[country]['Province_State'] == province][['ConfirmedCases', 'Fatalities', 'Days']].reset_index()
current_considered_country_df_copy = current_considered_country_df
for i in range(train_end_day - test_start_day + 1):
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + test_start_day), 'ConfirmedCases'] = current_considered_country_df.loc[current_considered_country_df['Days'] == i + test_start_day, 'ConfirmedCases'].values[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + test_start_day), 'Fatalities'] = current_considered_country_df.loc[current_considered_country_df['Days'] == i + test_start_day, 'Fatalities'].values[0]
indexNames = current_considered_country_df[current_considered_country_df['ConfirmedCases'] == 0].index
current_considered_country_df.drop(indexNames, inplace=True)
cases_train = np.diff(current_considered_country_df['ConfirmedCases'].to_numpy())
fatalities_train = np.diff(current_considered_country_df['Fatalities'].to_numpy())
cases_train[cases_train < 0] = 0
fatalities_train[fatalities_train < 0] = 0
fatal_rate = 0.0
if current_considered_country_df['Fatalities'].to_numpy()[-1] > 0:
fatal_rate = current_considered_country_df['Fatalities'].to_numpy()[-1] / current_considered_country_df['ConfirmedCases'].to_numpy()[-1]
cases_increase_avg = 0
days = 0
for i in range(len(cases_train) - 1):
cases_increase_avg += cases_train[i + 1] - cases_train[i]
days += 1
if days > 0:
cases_increase_avg = int(cases_increase_avg / days)
days = 0
fatal_increase_avg = 0
for i in range(len(fatalities_train) - 1):
fatal_increase_avg += fatalities_train[i + 1] - fatalities_train[i]
days += 1
if days > 0:
fatal_increase_avg = int(fatal_increase_avg / days)
del current_considered_country_df
n_steps = max(int(len(cases_train) * 0.1), 3)
avg_weekly_per_day_case = []
avg_window = 4
avg_step = 2
if int(len(cases_train) / avg_window) > avg_step:
for i in range(int(len(cases_train) / avg_window)):
temp_list = cases_train[i * avg_window:i * avg_window + avg_window]
avg_weekly_per_day_case.append(np.sum(temp_list) / len(temp_list))
avg_weekly_per_day_case = np.array(avg_weekly_per_day_case)
X_weekly_avg_val, y_weekly_avg_val = split_sequence(avg_weekly_per_day_case, avg_step)
X_weekly_avg_val = np.reshape(X_weekly_avg_val, (X_weekly_avg_val.shape[0], X_weekly_avg_val.shape[1], 1))
model_weekly_avg = build_model(avg_step)
model_weekly_avg.fit(X_weekly_avg_val, y_weekly_avg_val, epochs=50, verbose=0)
new_entry_avg = X_weekly_avg_val[len(X_weekly_avg_val) - 1]
for i in range(int(30 / avg_window) + 1):
weekly_avg_predict_next = model_weekly_avg.predict(np.reshape(new_entry_avg, (1, avg_step, 1)), verbose=0).astype(int)
avg_weekly_per_day_case = np.append(avg_weekly_per_day_case, weekly_avg_predict_next[0])
last_series = np.reshape(new_entry_avg, (1, avg_step, 1))
new_entry_avg = np.delete(last_series, [0])
new_entry_avg = np.insert(new_entry_avg, avg_step - 1, weekly_avg_predict_next[0])
X_cases_val, y_cases_val = split_sequence(cases_train, n_steps)
X_cases_val = np.reshape(X_cases_val, (X_cases_val.shape[0], X_cases_val.shape[1], 1))
X_fatal_val, y_fatal_val = split_sequence(fatalities_train, n_steps)
X_fatal_val = np.reshape(X_fatal_val, (X_fatal_val.shape[0], X_fatal_val.shape[1], 1))
assert len(X_fatal_val) == len(X_cases_val)
assert len(y_fatal_val) == len(y_cases_val)
model_cases = build_model(n_steps)
model_cases.fit(X_cases_val, y_cases_val, epochs=50, verbose=0)
cases_predict_next = model_cases.predict(np.reshape(X_cases_val[len(X_cases_val) - 1], (1, n_steps, 1)), verbose=0).astype(int)
cases_predict_next[0] = np.array([max(0, cases_predict_next[0])])
model_fatalities = build_model(n_steps)
model_fatalities.fit(X_fatal_val, y_fatal_val, epochs=50, verbose=0)
fatality_predict_next = model_fatalities.predict(np.reshape(X_fatal_val[len(X_fatal_val) - 1], (1, n_steps, 1)), verbose=0).astype(int)
fatality_predict_next[0] = np.array([max(0, fatality_predict_next[0])])
fatality_predict_next[0] = np.array([max(fatality_predict_next[0], cases_predict_next[0] * fatal_rate)])
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day + 1), 'ConfirmedCases'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day), 'ConfirmedCases'].values[0] + cases_predict_next[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day + 1), 'Fatalities'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day), 'Fatalities'].values[0] + fatality_predict_next[0]
new_entry_cases = X_cases_val[len(X_cases_val) - 1]
new_entry_fatal = X_fatal_val[len(X_fatal_val) - 1]
for i in range(test_end_day - train_end_day - 1):
last_series = np.reshape(new_entry_cases, (1, n_steps, 1))
new_entry_cases = np.delete(last_series, [0])
new_entry_cases = np.insert(new_entry_cases, n_steps - 1, cases_predict_next[0])
cases_predict_next = model_cases.predict(np.reshape(new_entry_cases, (1, n_steps, 1)), verbose=0).astype(int)
if cases_predict_next[0] - new_entry_cases[n_steps - 1] > cases_increase_avg:
cases_predict_next = np.array([max(0, new_entry_cases[n_steps - 1] + cases_increase_avg)])
if province in ['Kentucky', 'New Mexico', 'Sint Maarten', 'Cayman Islands', 'Isle of Man']:
cases_predict_next[0] = avg_weekly_per_day_case[-int(30 / avg_window) - 1 + int(i / avg_window)]
cases_predict_next[0] = np.array([max(0, cases_predict_next[0])])
last_series = np.reshape(new_entry_fatal, (1, n_steps, 1))
new_entry_fatal = np.delete(last_series, [0])
new_entry_fatal = np.insert(new_entry_fatal, n_steps - 1, fatality_predict_next[0])
fatality_predict_next = model_fatalities.predict(np.reshape(new_entry_fatal, (1, n_steps, 1)), verbose=0).astype(int)
if fatality_predict_next[0] - new_entry_fatal[n_steps - 1] > fatal_increase_avg:
fatality_predict_next[0] = max(0, new_entry_fatal[n_steps - 1] + fatal_increase_avg)
fatality_predict_next[0] = np.array([max(0, fatality_predict_next[0])])
fatality_predict_next[0] = np.array([max(fatality_predict_next[0], int(cases_predict_next[0] * fatal_rate))])
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 2), 'ConfirmedCases'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 1), 'ConfirmedCases'].values[0] + cases_predict_next[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 2), 'Fatalities'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 1), 'Fatalities'].values[0] + fatality_predict_next[0]
del model_fatalities
del model_cases
country_province_df = test_df[test_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
display(country_province_df.head())
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
px.bar(present_country_df, x='Date', y='New Case', color='Province_State', title=f'United States : DAILY NEW Confirmed cases in ' + province).show() | code |
32068083/cell_12 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from numpy import array
import datetime
import datetime
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
top30_countries = top_country_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Country'].unique()
country_df['prev_cases'] = country_df.groupby('Country')['ConfirmedCases'].shift(1)
country_df['New Case'] = country_df['ConfirmedCases'] - country_df['prev_cases']
country_df['New Case'].fillna(0, inplace=True)
country_df['prev_deaths'] = country_df.groupby('Country')['Fatalities'].shift(1)
country_df['New Death'] = country_df['Fatalities'] - country_df['prev_deaths']
country_df['New Death'].fillna(0, inplace=True)
top30_country_df = country_df[country_df['Country'].isin(top30_countries)]
for country in top30_countries:
present_country_df = top30_country_df[top30_country_df['Country'] == country]
def get_time_series(df, country_name, insert=False):
if df[df['Country'] == country_name]['Province_State'].nunique() > 1:
country_table = df[df['Country'] == country_name]
if insert:
country_df = pd.DataFrame(pd.pivot_table(country_table, values=['ConfirmedCases', 'Fatalities', 'Days'], index='Date', aggfunc=sum).to_records())
return country_df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
return country_table.set_index('Date')[['Province_State', 'ConfirmedCases', 'Fatalities', 'Days']]
df = df[df['Country'] == country_name]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities', 'Days']]
def get_time_series_province(province):
df = full_table[full_table['Province_State'] == province]
return df.set_index('Date')[['ConfirmedCases', 'Fatalities']]
province_country_dfs = {}
no_province_country_dfs = {}
absent_country_in_age_data_dfs = {}
province_country_dfs_list = []
no_province_country_dfs_list = []
absent_country_in_age_data_dfs_list = []
province_countries = train_df[train_df['Province_State'] != 'None*']['Country'].unique()
no_province_countries = train_df[train_df['Province_State'] == 'None*']['Country'].unique()
no_province_countries = [x for x in no_province_countries if x not in province_countries]
for country in province_countries:
province_country_dfs[country] = get_time_series(train_df, country)
for country in no_province_countries:
no_province_country_dfs[country] = get_time_series(train_df, country)
assert len([x for x in all_countries if x not in list(no_province_countries) + list(province_countries)]) == 0
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
test_df['ConfirmedCases'] = np.nan
test_df['Fatalities'] = np.nan
test_df = test_df.set_index('Date')
prediction = {}
def build_model(n_steps):
model = Sequential()
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, 1)))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='RMSprop', loss='mse')
return model
def split_sequence(sequence, n_steps):
X, y = (list(), list())
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = (sequence[i:end_ix], sequence[end_ix])
X.append(seq_x)
y.append(seq_y)
return (array(X), array(y))
for country in province_countries:
current_country_provinces = province_country_dfs[country]['Province_State'].unique()
for province in current_country_provinces:
current_considered_country_df = province_country_dfs[country][province_country_dfs[country]['Province_State'] == province][['ConfirmedCases', 'Fatalities', 'Days']].reset_index()
print(country + ' ' + province)
current_considered_country_df_copy = current_considered_country_df
for i in range(train_end_day - test_start_day + 1):
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + test_start_day), 'ConfirmedCases'] = current_considered_country_df.loc[current_considered_country_df['Days'] == i + test_start_day, 'ConfirmedCases'].values[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + test_start_day), 'Fatalities'] = current_considered_country_df.loc[current_considered_country_df['Days'] == i + test_start_day, 'Fatalities'].values[0]
indexNames = current_considered_country_df[current_considered_country_df['ConfirmedCases'] == 0].index
current_considered_country_df.drop(indexNames, inplace=True)
cases_train = np.diff(current_considered_country_df['ConfirmedCases'].to_numpy())
fatalities_train = np.diff(current_considered_country_df['Fatalities'].to_numpy())
cases_train[cases_train < 0] = 0
fatalities_train[fatalities_train < 0] = 0
fatal_rate = 0.0
if current_considered_country_df['Fatalities'].to_numpy()[-1] > 0:
fatal_rate = current_considered_country_df['Fatalities'].to_numpy()[-1] / current_considered_country_df['ConfirmedCases'].to_numpy()[-1]
print('fatal rate is: ' + str(fatal_rate))
cases_increase_avg = 0
days = 0
for i in range(len(cases_train) - 1):
cases_increase_avg += cases_train[i + 1] - cases_train[i]
days += 1
if days > 0:
cases_increase_avg = int(cases_increase_avg / days)
days = 0
fatal_increase_avg = 0
for i in range(len(fatalities_train) - 1):
fatal_increase_avg += fatalities_train[i + 1] - fatalities_train[i]
days += 1
if days > 0:
fatal_increase_avg = int(fatal_increase_avg / days)
del current_considered_country_df
n_steps = max(int(len(cases_train) * 0.1), 3)
avg_weekly_per_day_case = []
avg_window = 4
avg_step = 2
if int(len(cases_train) / avg_window) > avg_step:
for i in range(int(len(cases_train) / avg_window)):
temp_list = cases_train[i * avg_window:i * avg_window + avg_window]
avg_weekly_per_day_case.append(np.sum(temp_list) / len(temp_list))
avg_weekly_per_day_case = np.array(avg_weekly_per_day_case)
X_weekly_avg_val, y_weekly_avg_val = split_sequence(avg_weekly_per_day_case, avg_step)
X_weekly_avg_val = np.reshape(X_weekly_avg_val, (X_weekly_avg_val.shape[0], X_weekly_avg_val.shape[1], 1))
model_weekly_avg = build_model(avg_step)
model_weekly_avg.fit(X_weekly_avg_val, y_weekly_avg_val, epochs=50, verbose=0)
new_entry_avg = X_weekly_avg_val[len(X_weekly_avg_val) - 1]
for i in range(int(30 / avg_window) + 1):
weekly_avg_predict_next = model_weekly_avg.predict(np.reshape(new_entry_avg, (1, avg_step, 1)), verbose=0).astype(int)
avg_weekly_per_day_case = np.append(avg_weekly_per_day_case, weekly_avg_predict_next[0])
last_series = np.reshape(new_entry_avg, (1, avg_step, 1))
new_entry_avg = np.delete(last_series, [0])
new_entry_avg = np.insert(new_entry_avg, avg_step - 1, weekly_avg_predict_next[0])
X_cases_val, y_cases_val = split_sequence(cases_train, n_steps)
X_cases_val = np.reshape(X_cases_val, (X_cases_val.shape[0], X_cases_val.shape[1], 1))
X_fatal_val, y_fatal_val = split_sequence(fatalities_train, n_steps)
X_fatal_val = np.reshape(X_fatal_val, (X_fatal_val.shape[0], X_fatal_val.shape[1], 1))
assert len(X_fatal_val) == len(X_cases_val)
assert len(y_fatal_val) == len(y_cases_val)
model_cases = build_model(n_steps)
model_cases.fit(X_cases_val, y_cases_val, epochs=50, verbose=0)
cases_predict_next = model_cases.predict(np.reshape(X_cases_val[len(X_cases_val) - 1], (1, n_steps, 1)), verbose=0).astype(int)
cases_predict_next[0] = np.array([max(0, cases_predict_next[0])])
model_fatalities = build_model(n_steps)
model_fatalities.fit(X_fatal_val, y_fatal_val, epochs=50, verbose=0)
fatality_predict_next = model_fatalities.predict(np.reshape(X_fatal_val[len(X_fatal_val) - 1], (1, n_steps, 1)), verbose=0).astype(int)
fatality_predict_next[0] = np.array([max(0, fatality_predict_next[0])])
fatality_predict_next[0] = np.array([max(fatality_predict_next[0], cases_predict_next[0] * fatal_rate)])
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day + 1), 'ConfirmedCases'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day), 'ConfirmedCases'].values[0] + cases_predict_next[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day + 1), 'Fatalities'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == train_end_day), 'Fatalities'].values[0] + fatality_predict_next[0]
new_entry_cases = X_cases_val[len(X_cases_val) - 1]
new_entry_fatal = X_fatal_val[len(X_fatal_val) - 1]
for i in range(test_end_day - train_end_day - 1):
last_series = np.reshape(new_entry_cases, (1, n_steps, 1))
new_entry_cases = np.delete(last_series, [0])
new_entry_cases = np.insert(new_entry_cases, n_steps - 1, cases_predict_next[0])
cases_predict_next = model_cases.predict(np.reshape(new_entry_cases, (1, n_steps, 1)), verbose=0).astype(int)
if cases_predict_next[0] - new_entry_cases[n_steps - 1] > cases_increase_avg:
cases_predict_next = np.array([max(0, new_entry_cases[n_steps - 1] + cases_increase_avg)])
if province in ['Kentucky', 'New Mexico', 'Sint Maarten', 'Cayman Islands', 'Isle of Man']:
cases_predict_next[0] = avg_weekly_per_day_case[-int(30 / avg_window) - 1 + int(i / avg_window)]
cases_predict_next[0] = np.array([max(0, cases_predict_next[0])])
last_series = np.reshape(new_entry_fatal, (1, n_steps, 1))
new_entry_fatal = np.delete(last_series, [0])
new_entry_fatal = np.insert(new_entry_fatal, n_steps - 1, fatality_predict_next[0])
fatality_predict_next = model_fatalities.predict(np.reshape(new_entry_fatal, (1, n_steps, 1)), verbose=0).astype(int)
if fatality_predict_next[0] - new_entry_fatal[n_steps - 1] > fatal_increase_avg:
fatality_predict_next[0] = max(0, new_entry_fatal[n_steps - 1] + fatal_increase_avg)
fatality_predict_next[0] = np.array([max(0, fatality_predict_next[0])])
fatality_predict_next[0] = np.array([max(fatality_predict_next[0], int(cases_predict_next[0] * fatal_rate))])
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 2), 'ConfirmedCases'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 1), 'ConfirmedCases'].values[0] + cases_predict_next[0]
test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 2), 'Fatalities'] = test_df.loc[(test_df['Country'] == country) & (test_df['Province_State'] == province) & (test_df['Days'] == i + train_end_day + 1), 'Fatalities'].values[0] + fatality_predict_next[0]
del model_fatalities
del model_cases | code |
32068083/cell_5 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import datetime
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.io as pio
import plotly.offline as py
import numpy as np
import pandas as pd
from plotly import tools, subplots
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.io as pio
pio.templates.default = 'plotly_dark'
import os
import datetime
train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train_df = train_df.drop(['Id'], axis=1)
train_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
test_df.rename(columns={'Country_Region': 'Country'}, inplace=True)
train_df['Province_State'].fillna('None*', inplace=True)
test_df['Province_State'].fillna('None*', inplace=True)
renameCountryNames = {'Congo (Brazzaville)': 'Congo1', 'Congo (Kinshasa)': 'Congo2', "Cote d'Ivoire": "Côte d'Ivoire", 'Czechia': 'Czech Republic (Czechia)', 'Korea, South': 'South Korea', 'Saint Kitts and Nevis': 'Saint Kitts & Nevis', 'Saint Vincent and the Grenadines': 'St. Vincent & Grenadines', 'Taiwan*': 'Taiwan', 'US': 'United States'}
train_df.replace({'Country': renameCountryNames}, inplace=True)
test_df.replace({'Country': renameCountryNames}, inplace=True)
specific_countries = ['United States', 'United Kingdom', 'Netherlands']
days_df = train_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
train_df['Days'] = days_df
days_df = test_df['Date'].apply(lambda dt: datetime.datetime.strptime(dt, '%Y-%m-%d') - datetime.datetime.strptime('2020-01-21', '%Y-%m-%d')).apply(lambda x: str(x).split()[0]).astype(int)
test_df['Days'] = days_df
all_countries = train_df['Country'].unique()
ww_df = train_df.groupby('Date')[['ConfirmedCases', 'Fatalities']].sum().reset_index()
ww_df['new_case'] = ww_df['ConfirmedCases'] - ww_df['ConfirmedCases'].shift(1)
ww_df['new_deaths'] = ww_df['Fatalities'] - ww_df['Fatalities'].shift(1)
country_df = train_df.groupby(['Date', 'Country'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
target_date = country_df['Date'].max()
train_end_day = train_df['Days'].max()
test_start_day = test_df['Days'].min()
test_end_day = test_df['Days'].max()
py.init_notebook_mode()
top_country_df = country_df.query('(Date == @target_date) & (ConfirmedCases > 2000)').sort_values('ConfirmedCases', ascending=False)
print(len(top_country_df))
top_country_melt_df = pd.melt(top_country_df, id_vars='Country', value_vars=['ConfirmedCases', 'Fatalities'])
display(top_country_df.head())
display(top_country_melt_df.head())
fig = px.bar(top_country_melt_df.iloc[::-1],
x='value', y='Country', color='variable', barmode='group',
title=f'Confirmed Cases/Deaths on {target_date}', text='value', height=1500, orientation='h')
fig.show()
country_province_df = train_df[train_df['Country'] == 'United States'].groupby(['Date', 'Province_State'])[['ConfirmedCases', 'Fatalities']].sum().reset_index()
top_province_df = country_province_df.query('(Date == @target_date)').sort_values('ConfirmedCases', ascending=False)
top30_provinces = top_province_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Province_State'].unique()
country_province_df['prev_cases'] = country_province_df.groupby('Province_State')['ConfirmedCases'].shift(1)
country_province_df['New Case'] = country_province_df['ConfirmedCases'] - country_province_df['prev_cases']
country_province_df['New Case'].fillna(0, inplace=True)
country_province_df['prev_deaths'] = country_province_df.groupby('Province_State')['Fatalities'].shift(1)
country_province_df['New Death'] = country_province_df['Fatalities'] - country_province_df['prev_deaths']
country_province_df['New Death'].fillna(0, inplace=True)
for province in top30_provinces:
present_country_df = country_province_df[country_province_df['Province_State'] == province]
top30_countries = top_country_df.sort_values('ConfirmedCases', ascending=False).iloc[:30]['Country'].unique()
display(country_df[:20])
country_df['prev_cases'] = country_df.groupby('Country')['ConfirmedCases'].shift(1)
country_df['New Case'] = country_df['ConfirmedCases'] - country_df['prev_cases']
country_df['New Case'].fillna(0, inplace=True)
country_df['prev_deaths'] = country_df.groupby('Country')['Fatalities'].shift(1)
country_df['New Death'] = country_df['Fatalities'] - country_df['prev_deaths']
country_df['New Death'].fillna(0, inplace=True)
top30_country_df = country_df[country_df['Country'].isin(top30_countries)]
display(country_df[:10])
for country in top30_countries:
present_country_df = top30_country_df[top30_country_df['Country'] == country]
px.bar(present_country_df, x='Date', y='New Case', color='Country', title=f'DAILY NEW Confirmed cases in ' + country).show() | code |
104117830/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated() | code |
104117830/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
print('=======================')
print('=========== Details for Credit Record Rows x Columns =============')
print(credit_record.shape)
print('=======================')
print(credit_record.head(5))
print('=======================')
print(credit_record.describe)
print('=======================')
print(credit_record.info)
print('=======================')
print(credit_record.dtypes)
print('=======================') | code |
104117830/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
credit_record.nunique()
sns.countplot(x='STATUS', data=credit_record) | code |
104117830/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape
Merged_Data.duplicated()
Merged_Data.nunique()
Merged_Data.isnull().sum()
Merged_Data[Merged_Data.duplicated()].shape | code |
104117830/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape
Merged_Data.duplicated()
Merged_Data.nunique()
Merged_Data.isnull().sum() | code |
104117830/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape | code |
104117830/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
print('======================')
print('Unique Values of STATUS')
print(credit_record['STATUS'].unique())
print(credit_record['STATUS'].nunique())
print('======================') | code |
104117830/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
credit_record.nunique()
credit_record['STATUS'].value_counts() | code |
104117830/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104117830/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
print('=======================')
print('=========== Details for Application Record Rows x Columns =============')
print(application_record.shape)
print('=======================')
print(application_record.head(5))
print('=======================')
print(application_record.describe)
print('=======================')
print(application_record.info)
print('=======================')
print(application_record.dtypes)
print('=======================') | code |
104117830/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record.duplicated()
application_record.nunique()
application_record[application_record.duplicated()].shape | code |
104117830/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape
Merged_Data.duplicated()
Merged_Data.nunique() | code |
104117830/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
print('======================')
print('Unique Values of CODE_GENDER')
print(application_record['CODE_GENDER'].unique())
print(application_record['CODE_GENDER'].nunique())
print('======================')
print('Unique Values of FLAG_OWN_CAR')
print(application_record['FLAG_OWN_CAR'].unique())
print(application_record['FLAG_OWN_CAR'].nunique())
print('======================')
print('Unique Values of FLAG_OWN_REALTY')
print(application_record['FLAG_OWN_REALTY'].unique())
print(application_record['FLAG_OWN_REALTY'].nunique())
print('======================')
print('Unique Values of NAME_INCOME_TYPE')
print(application_record['NAME_INCOME_TYPE'].unique())
print(application_record['NAME_INCOME_TYPE'].nunique())
print('======================')
print('Unique Values of NAME_EDUCATION_TYPE')
print(application_record['NAME_EDUCATION_TYPE'].unique())
print(application_record['NAME_EDUCATION_TYPE'].nunique())
print('======================')
print('Unique Values of NAME_FAMILY_STATUS')
print(application_record['NAME_FAMILY_STATUS'].unique())
print(application_record['NAME_FAMILY_STATUS'].nunique())
print('======================')
print('Unique Values of NAME_HOUSING_TYPE')
print(application_record['NAME_HOUSING_TYPE'].unique())
print(application_record['NAME_HOUSING_TYPE'].nunique())
print('======================')
print(application_record['OCCUPATION_TYPE'].unique())
print(application_record['OCCUPATION_TYPE'].nunique())
print('======================') | code |
104117830/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record.duplicated() | code |
104117830/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record.duplicated()
application_record.nunique() | code |
104117830/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
credit_record.nunique() | code |
104117830/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape
Merged_Data.duplicated()
Merged_Data.nunique()
Merged_Data.isnull().sum()
Merged_Data[Merged_Data.duplicated()].shape
print('=======================')
print('=========== Details for Merged_Data :: Rows x Columns =============')
print(Merged_Data.shape)
print('=======================')
print(Merged_Data.head(5))
print('=======================')
print(Merged_Data.describe)
print('=======================')
print(Merged_Data.info)
print('=======================')
print(Merged_Data.dtypes)
print('=======================') | code |
104117830/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape | code |
104117830/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
credit_record.nunique()
credit_record['STATUS'].value_counts() | code |
104117830/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
print('=====================================')
print('=========== Null for Application Record ============')
print(application_record.isnull().sum())
print('=========== Null for Credit Record ============')
print(credit_record.isnull().sum())
print('=====================================') | code |
104117830/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
application_record = pd.read_csv('../input/creditcard/application_record.csv')
credit_record = pd.read_csv('../input/creditcard/credit_record.csv')
credit_record.duplicated()
credit_record[credit_record.duplicated()].shape
application_record.duplicated()
application_record.nunique()
credit_record.nunique()
application_record[application_record.duplicated()].shape
Merged_Data = pd.merge(application_record, credit_record, how='left', on='ID')
Merged_Data.shape
Merged_Data.duplicated() | code |
1005954/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/database.csv', low_memory=False)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(13, 15))
crims_by_relationship = data[['Relationship', 'Record ID']].groupby('Relationship').count()
crimes_per_perpetrator_race = data[['Perpetrator Race', 'Record ID']].groupby('Perpetrator Race').count()
crimes_per_victime_race = data[['Victim Race', 'Record ID']].groupby('Victim Race').count()
crimes_per_type = data[['Crime Type', 'Record ID']].groupby('Crime Type').count()
crimes_per_perpetrator_race.plot(kind='bar', ax= ax1, title='crimes per perpetrator race')
crimes_per_victime_race.plot(kind='bar', ax= ax2, title='crimes per victime race')
crims_by_relationship.plot(kind='bar', ax= ax3, title='crimes by relationship')
crimes_per_type.plot(kind='bar', ax= ax4, title='crimes types')
data1 = data[['Relationship', 'Year', 'Record ID']].groupby(['Relationship', 'Year']).count().reset_index()
plt.plot(data1[data1.Relationship == 'Wife']['Year'].tolist(), data1[data1.Relationship == 'Wife']['Record ID'].tolist(), data1[data1.Relationship == 'Acquaintance']['Year'].tolist(), data1[data1.Relationship == 'Acquaintance']['Record ID'].tolist())
plt.show() | code |
1005954/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/database.csv', low_memory=False)
data.head() | code |
1005954/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1005954/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/database.csv', low_memory=False)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(13, 15))
crims_by_relationship = data[['Relationship', 'Record ID']].groupby('Relationship').count()
crimes_per_perpetrator_race = data[['Perpetrator Race', 'Record ID']].groupby('Perpetrator Race').count()
crimes_per_victime_race = data[['Victim Race', 'Record ID']].groupby('Victim Race').count()
crimes_per_type = data[['Crime Type', 'Record ID']].groupby('Crime Type').count()
crimes_per_perpetrator_race.plot(kind='bar', ax=ax1, title='crimes per perpetrator race')
crimes_per_victime_race.plot(kind='bar', ax=ax2, title='crimes per victime race')
crims_by_relationship.plot(kind='bar', ax=ax3, title='crimes by relationship')
crimes_per_type.plot(kind='bar', ax=ax4, title='crimes types') | code |
74043801/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import numpy as np
import pandas as pd
import os
df = pd.DataFrame()
import random
random.seed(0)
for file in random.sample(filenames, 20):
if df.empty:
df = pd.read_csv(os.path.join(dirname, file))
else:
d = pd.read_csv(os.path.join(dirname, file))
df = pd.concat([df, d])
df | code |
311500/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df[df.data_field == 'confirmed_male'].value.plot()
df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best')
plt.title('Confirmed Male vs Female cases') | code |
311500/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.data_field.unique() | code |
311500/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.head(3) | code |
311500/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
311500/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.data_field.unique()
age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus')
for i, age_group in enumerate(age_groups):
print(age_group)
print(df[df.data_field == age_group].value)
print('') | code |
311500/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7))
plt.title('Number of locations reported - Top 30') | code |
17133658/cell_9 | [
"text_html_output_1.png"
] | train_data = train_data.sample(n=5000)
train_data.shape
train_data.head() | code |
17133658/cell_34 | [
"text_plain_output_1.png"
] | from sklearn import ensemble
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
clf = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.5)
selector = clf.fit(X_train, y_train)
feat_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
features = feat_imp[:40].index
print(features) | code |
17133658/cell_29 | [
"text_html_output_1.png"
] | from sklearn import ensemble
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
import scipy as sp
import warnings
import xgboost as xgb
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
import scipy as sp
def get_scores_and_params(pipeline, params):
search = RandomizedSearchCV(pipeline, params, cv=3, n_iter=5, scoring='roc_auc', n_jobs=-1, verbose=2)
search.fit(X_train, y_train)
return (search.best_score_, search.best_params_)
pipelines = [Pipeline([('xgb', xgb)]), Pipeline([('tree', tree)]), Pipeline([('ada', ada)]), Pipeline([('grad', grad)])]
getd = [{'xgb__max_depth': sp.stats.randint(1, 11), 'xgb__n_estimators': [100, 200, 500, 1000], 'xgb__colsample_bytree': [0.5, 0.6, 0.7, 0.8]}, {'tree__n_estimators': [100, 200, 500, 1000], 'tree__min_samples_split': [2, 4, 8, 10], 'tree__min_samples_leaf': [1, 2, 3, 4]}, {'ada__learning_rate': [0.3, 0.4, 0.5, 0.7, 1], 'ada__n_estimators': [10, 50, 100, 500]}, {'grad__learning_rate': [0.1, 0.2, 0.5, 1], 'grad__max_depth': [3, 5, 7], 'grad__n_estimators': [1, 2, 3, 4]}]
warnings.filterwarnings('ignore')
for i in range(len(pipelines)):
print(get_scores_and_params(pipelines[i], getd[i])) | code |
17133658/cell_39 | [
"image_output_1.png"
] | from sklearn import ensemble
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
clf = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.5)
selector = clf.fit(X_train, y_train)
feat_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
features = feat_imp[:40].index
X_train = X_train[features]
X_test = X_test[features]
X_valid = X_valid[features]
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
val_pred = clf.predict(X_valid)
(roc_auc_score(y_test, preds), roc_auc_score(y_valid, val_pred)) | code |
17133658/cell_41 | [
"text_plain_output_1.png"
] | from sklearn import ensemble
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
clf = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.5)
selector = clf.fit(X_train, y_train)
feat_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
features = feat_imp[:40].index
X_train = X_train[features]
X_test = X_test[features]
X_valid = X_valid[features]
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
val_pred = clf.predict(X_valid)
(roc_auc_score(y_test, preds), roc_auc_score(y_valid, val_pred))
pros = clf.predict(data_for_sub[features])
sub = pd.DataFrame()
sub['ID'] = test_data['ID']
sub['target'] = pros
sub.to_csv('submission.csv', index=False)
test = pd.read_csv('submission.csv')
test.head() | code |
17133658/cell_19 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | train_data = train_data.sample(n=5000)
train_data.shape
train_data.corr()
train_data.TARGET.value_counts() | code |
17133658/cell_7 | [
"text_plain_output_1.png"
] | train_data = train_data.sample(n=5000)
train_data.shape | code |
17133658/cell_32 | [
"text_plain_output_1.png"
] | from sklearn import ensemble
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
clf = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.5)
selector = clf.fit(X_train, y_train)
feat_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
feat_imp[:40].plot(kind='bar', title='Feature Importances according to AdaBoostRegressor', figsize=(12, 8))
plt.ylabel('Feature Importance Score')
plt.subplots_adjust(bottom=0.3)
plt.show() | code |
17133658/cell_16 | [
"text_plain_output_1.png"
] | columns_to_drop = []
columns = train_data.columns
for i in range(len(columns) - 1):
column_to_check = train_data[columns[i]]
for c in range(i + 1, len(columns)):
if np.array_equal(column_to_check, train_data[columns[c]].values):
columns_to_drop.append(columns[c])
train_data.drop(columns_to_drop, axis=1, inplace=True)
test_data.drop(columns_to_drop, axis=1, inplace=True)
print('Data after cleaning')
print('Train data shape: ', train_data.shape, 'Test data shape: ', test_data.shape) | code |
17133658/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn import neighbors
from sklearn import linear_model
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore') | code |
17133658/cell_17 | [
"text_html_output_1.png"
] | train_data = train_data.sample(n=5000)
train_data.shape
train_data.corr() | code |
17133658/cell_24 | [
"text_plain_output_1.png"
] | X_train = df_train.drop(['ID', 'TARGET'], axis=1)
y_train = df_train.TARGET
X_test = df_test.drop(['ID', 'TARGET'], axis=1)
y_test = df_test.TARGET
X_valid = df_valid.drop(['ID', 'TARGET'], axis=1)
y_valid = df_valid.TARGET
data_for_sub = test_data.drop(['ID'], axis=1) | code |
17133658/cell_14 | [
"text_plain_output_1.png"
] | dropable_cols = []
for i in train_data.columns:
if (train_data[i] == 0).all():
dropable_cols.append(i)
train_data.drop(dropable_cols, axis=1, inplace=True)
test_data.drop(dropable_cols, axis=1, inplace=True)
print('Data shape after droping rows: ')
print('Train data shape: ', train_data.shape, 'Test data shape: ', test_data.shape) | code |
17133658/cell_22 | [
"text_plain_output_1.png"
] | df_train = train_data[:3000]
df_test = train_data[3000:4000]
df_valid = train_data[4000:]
print(df_train.shape, df_test.shape, df_valid.shape) | code |
17133658/cell_37 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_9.png",
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_6.png",
"application_vnd.jupyter.stderr_output_8.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_1.png"
] | from sklearn import ensemble
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
train_data = train_data.sample(n=5000)
train_data.shape
train_data.corr()
train_data.TARGET.value_counts()
xgb = xgb.XGBRFRegressor()
tree = ensemble.RandomForestRegressor()
ada = ensemble.AdaBoostRegressor()
grad = ensemble.GradientBoostingRegressor()
clf = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=0.5)
selector = clf.fit(X_train, y_train)
feat_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
features = feat_imp[:40].index
for i in features[0:5]:
x = train_data[i].value_counts().head().index
y = train_data[i].value_counts().head()
plt.figure()
plt.scatter(y, x)
plt.xlabel(i) | code |
17133658/cell_12 | [
"text_plain_output_1.png"
] | train_data.isnull().sum().any() > 0 | code |
17133658/cell_5 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | test_data = pd.read_csv('../input/test.csv')
train_data = pd.read_csv('../input/train.csv') | code |
33096285/cell_34 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from sklearn import preprocessing
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
df_cred_normalized_validation_set['Class'].value_counts() | code |
33096285/cell_33 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
print('train set dimensions :', df_cred_normalized_train.shape)
print('test set dimensions :', df_cred_normalized_test_set.shape)
print('validate set dimensions :', df_cred_normalized_validation_set.shape) | code |
33096285/cell_44 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping ,ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Input, Dense
from keras.models import Model, load_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
def plot_distribution(data_select):
figsize = (15, 8)
sns.set_style('ticks')
s = sns.FacetGrid(df_cred, hue='Class', aspect=2.5, palette={0: 'lime', 1: 'black'})
s.map(sns.kdeplot, data_select, shade=True, alpha=0.6)
s.set(xlim=(df_cred[data_select].min(), df_cred[data_select].max()))
s.add_legend()
s.set_axis_labels(data_select, 'proportion')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
X_train, X_test = train_test_split(df_cred_normalized_train, test_size=0.2, random_state=2020)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 20
input_layer = Input(shape=(input_dim,))
encoder = Dense(encoding_dim * 2, activation='sigmoid')(input_layer)
encoder = Dense(encoding_dim, activation='sigmoid')(input_layer)
encoder = Dense(8, activation='sigmoid')(encoder)
decoder = Dense(20, activation='sigmoid')(encoder)
decoder = Dense(40, activation='sigmoid')(encoder)
decoder = Dense(input_dim, activation='sigmoid')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
nb_epoch = 50
batch_size = 32
autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=0, save_best_only=True)
history = autoencoder.fit(X_train, X_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(X_test, X_test), callbacks=[es, checkpointer], verbose=1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model acc')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show() | code |
33096285/cell_6 | [
"text_html_output_1.png"
] | from contextlib import contextmanager
import plotly.offline as py
import warnings
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import warnings
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import plotly.figure_factory as ff
warnings.filterwarnings('ignore')
from contextlib import contextmanager
@contextmanager
def timer(title):
t0 = time.time()
yield
print('{} - done in {:.0f}s'.format(title, time.time() - t0)) | code |
33096285/cell_29 | [
"image_output_1.png"
] | from sklearn import preprocessing
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape | code |
33096285/cell_50 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping ,ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Input, Dense
from keras.models import Model, load_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
X_train, X_test = train_test_split(df_cred_normalized_train, test_size=0.2, random_state=2020)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 20
input_layer = Input(shape=(input_dim,))
encoder = Dense(encoding_dim * 2, activation='sigmoid')(input_layer)
encoder = Dense(encoding_dim, activation='sigmoid')(input_layer)
encoder = Dense(8, activation='sigmoid')(encoder)
decoder = Dense(20, activation='sigmoid')(encoder)
decoder = Dense(40, activation='sigmoid')(encoder)
decoder = Dense(input_dim, activation='sigmoid')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
nb_epoch = 50
batch_size = 32
autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=0, save_best_only=True)
history = autoencoder.fit(X_train, X_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(X_test, X_test), callbacks=[es, checkpointer], verbose=1)
predictions = autoencoder.predict(X_test)
mse = np.mean(np.power(X_test - predictions, 2), axis=1)
error_df = pd.DataFrame({'reconstruction_error': mse, 'true_class': y_test})
y_test = df_cred_normalized_test_set['Class']
df_cred_normalized_test_set = df_cred_normalized_test_set.drop('Class', axis=1)
predictions = autoencoder.predict(df_cred_normalized_test_set)
mse = np.mean(np.power(df_cred_normalized_test_set - predictions, 2), axis=1)
error_df_test = pd.DataFrame({'reconstruction_error': mse, 'true_class': y_test})
error_df_test.describe() | code |
33096285/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33096285/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot')
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title('Overview Data Set')
ax = sns.boxplot(data=df_cred.drop(columns=['Amount', 'Class', 'Time']), orient='h', palette='Set2') | code |
33096285/cell_8 | [
"image_output_1.png"
] | from contextlib import contextmanager
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objs as go
import plotly.offline as py
import seaborn as sns
import warnings
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import warnings
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import plotly.figure_factory as ff
warnings.filterwarnings('ignore')
from contextlib import contextmanager
@contextmanager
def timer(title):
t0 = time.time()
yield
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
fraud = df_cred[df_cred['Class'] != 0]
normal = df_cred[df_cred['Class'] == 0]
trace = go.Pie(labels=['Normal', 'Fraud'], values=df_cred['Class'].value_counts(), textfont=dict(size=15), opacity=0.8, marker=dict(colors=['lightskyblue', 'gold'], line=dict(color='#000000', width=1.5)))
layout = dict(title='Distribution of target variable')
fig = dict(data=[trace], layout=layout)
py.iplot(fig) | code |
33096285/cell_38 | [
"text_plain_output_1.png"
] | from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.optimizers import Adam | code |
33096285/cell_43 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping ,ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Input, Dense
from keras.models import Model, load_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
X_train, X_test = train_test_split(df_cred_normalized_train, test_size=0.2, random_state=2020)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 20
input_layer = Input(shape=(input_dim,))
encoder = Dense(encoding_dim * 2, activation='sigmoid')(input_layer)
encoder = Dense(encoding_dim, activation='sigmoid')(input_layer)
encoder = Dense(8, activation='sigmoid')(encoder)
decoder = Dense(20, activation='sigmoid')(encoder)
decoder = Dense(40, activation='sigmoid')(encoder)
decoder = Dense(input_dim, activation='sigmoid')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
nb_epoch = 50
batch_size = 32
autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=0, save_best_only=True)
history = autoencoder.fit(X_train, X_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(X_test, X_test), callbacks=[es, checkpointer], verbose=1) | code |
33096285/cell_46 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping ,ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.layers import Input, Dense
from keras.models import Model, load_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
X_train, X_test = train_test_split(df_cred_normalized_train, test_size=0.2, random_state=2020)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 20
input_layer = Input(shape=(input_dim,))
encoder = Dense(encoding_dim * 2, activation='sigmoid')(input_layer)
encoder = Dense(encoding_dim, activation='sigmoid')(input_layer)
encoder = Dense(8, activation='sigmoid')(encoder)
decoder = Dense(20, activation='sigmoid')(encoder)
decoder = Dense(40, activation='sigmoid')(encoder)
decoder = Dense(input_dim, activation='sigmoid')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
nb_epoch = 50
batch_size = 32
autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=0, save_best_only=True)
history = autoencoder.fit(X_train, X_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(X_test, X_test), callbacks=[es, checkpointer], verbose=1)
predictions = autoencoder.predict(X_test)
mse = np.mean(np.power(X_test - predictions, 2), axis=1)
error_df = pd.DataFrame({'reconstruction_error': mse, 'true_class': y_test})
error_df.describe() | code |
33096285/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
def plot_distribution(data_select):
figsize = (15, 8)
sns.set_style('ticks')
s = sns.FacetGrid(df_cred, hue='Class', aspect=2.5, palette={0: 'lime', 1: 'black'})
s.map(sns.kdeplot, data_select, shade=True, alpha=0.6)
s.set(xlim=(df_cred[data_select].min(), df_cred[data_select].max()))
s.add_legend()
s.set_axis_labels(data_select, 'proportion')
plot_distribution('V4')
plot_distribution('V9')
plot_distribution('V11')
plot_distribution('V12')
plot_distribution('V13') | code |
33096285/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape | code |
33096285/cell_36 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_cred = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df_cred.shape
plt.style.use('ggplot') # Using ggplot2 style visuals
f, ax = plt.subplots(figsize=(11, 15))
ax.set_facecolor('#fafafa')
ax.set(xlim=(-5, 5))
plt.ylabel('Variables')
plt.title("Overview Data Set")
ax = sns.boxplot(data = df_cred.drop(columns=['Amount', 'Class', 'Time']),
orient = 'h',
palette = 'Set2')
min_max_scaler = preprocessing.MinMaxScaler()
df_cred = df_cred.drop('Time', axis=1)
df_cred_scaled = min_max_scaler.fit_transform(df_cred.iloc[:, :-1])
df_cred_normalized = pd.DataFrame(df_cred_scaled)
df_cred_normalized_train = df_cred_normalized[df_cred_normalized['Class'] == 0]
df_cred_normalized_test = df_cred_normalized[df_cred_normalized['Class'] == 1]
df_cred_normalized_test_part_1 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_1.index)
df_cred_normalized_test_part_2 = df_cred_normalized_train.sample(frac=0.05)
df_cred_normalized_train = df_cred_normalized_train.drop(df_cred_normalized_test_part_2.index)
df_cred_normalized_test_class_1 = df_cred_normalized_test.sample(frac=0.5)
df_cred_normalized_validation_class_1 = df_cred_normalized_test.drop(df_cred_normalized_test_class_1.index)
df_cred_normalized_test_class_1.shape
df_cred_normalized_test_set = df_cred_normalized_test_part_1.append(df_cred_normalized_test_class_1)
df_cred_normalized_validation_set = df_cred_normalized_test_part_2.append(df_cred_normalized_validation_class_1)
X_train, X_test = train_test_split(df_cred_normalized_train, test_size=0.2, random_state=2020)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape | code |
18138191/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from IPython.display import clear_output
from time import sleep
import os
os.listdir('../input')
train_data = pd.read_csv('../input/training/training.csv')
test_data = pd.read_csv('../input/test/test.csv')
lookid_data = pd.read_csv('../input/IdLookupTable.csv')
train_data.head().T
train_data.isnull().any().value_counts()
train_data.fillna(method='ffill', inplace=True)
train_data.isnull().any().value_counts() | code |
18138191/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from IPython.display import clear_output
from time import sleep
import os
os.listdir('../input')
train_data = pd.read_csv('../input/training/training.csv')
test_data = pd.read_csv('../input/test/test.csv')
lookid_data = pd.read_csv('../input/IdLookupTable.csv')
train_data.head().T
train_data.isnull().any().value_counts()
train_data.fillna(method='ffill', inplace=True)
train_data.isnull().any().value_counts()
def split_image_feature(data):
"""Return extracted image feature"""
imag = []
for i in range(0, data.shape[0]):
img = data['Image'][i].split(' ')
img = ['0' if x == '' else x for x in img]
imag.append(img)
image_list = np.array(imag, dtype='float')
X_train = image_list.reshape(-1, 96, 96)
return X_train
X_train = split_image_feature(train_data)
training = data.drop('Image', axis=1)
y_train = []
for i in range(0, data.shape[0]):
y = training.iloc[i, :]
y_train.append(y)
y_train = np.array(y_train, dtype='float')
plt.imshow(X_train[0], cmap='gray')
plt.show() | code |
18138191/cell_2 | [
"text_html_output_1.png"
] | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from IPython.display import clear_output
from time import sleep
import os
os.listdir('../input')
train_data = pd.read_csv('../input/training/training.csv')
test_data = pd.read_csv('../input/test/test.csv')
lookid_data = pd.read_csv('../input/IdLookupTable.csv')
train_data.head().T | code |
18138191/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Conv2D,Dropout,Dense,Flatten
from keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from IPython.display import clear_output
from time import sleep
import os
os.listdir('../input')
train_data = pd.read_csv('../input/training/training.csv')
test_data = pd.read_csv('../input/test/test.csv')
lookid_data = pd.read_csv('../input/IdLookupTable.csv')
train_data.head().T
train_data.isnull().any().value_counts()
train_data.fillna(method='ffill', inplace=True)
train_data.isnull().any().value_counts()
def split_image_feature(data):
"""Return extracted image feature"""
imag = []
for i in range(0, data.shape[0]):
img = data['Image'][i].split(' ')
img = ['0' if x == '' else x for x in img]
imag.append(img)
image_list = np.array(imag, dtype='float')
X_train = image_list.reshape(-1, 96, 96)
return X_train
X_train = split_image_feature(train_data)
training = data.drop('Image', axis=1)
y_train = []
for i in range(0, data.shape[0]):
y = training.iloc[i, :]
y_train.append(y)
y_train = np.array(y_train, dtype='float')
from keras.layers import Conv2D, Dropout, Dense, Flatten
from keras.models import Sequential
model = Sequential([Flatten(input_shape=(96, 96)), Dense(128, activation='relu'), Dropout(0.1), Dense(64, activation='relu'), Dense(30)])
model.compile(optimizer='adam', loss='mse', metrics=['mae', 'accuracy'])
model.fit(X_train, y_train, epochs=500, batch_size=128, validation_split=0.2) | code |
2035149/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
from vgg16 import vgg16
import numpy as np
import os
from datalab import DataLabTrain | code |
2035149/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from datalab import DataLabTrain
from vgg16 import vgg16
import tensorflow as tf
def train(n_iters):
model, params = vgg16(fine_tune_last=True, n_classes=2)
X = model['input']
Z = model['out']
Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z[:, 0, 0, :], labels=Y))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
try:
sess.run(tf.global_variables_initializer())
for i in range(n_iters):
dl = DataLabTrain('./datasets/train_set/')
train_gen = dl.generator()
dev_gen = DataLabTrain('./datasets/dev_set/').generator()
for X_train, Y_train in train_gen:
sess.run(train_step, feed_dict={X: X_train, Y: Y_train})
l = 0
count = 0
for X_test, Y_test in dev_gen:
count += 1
l += sess.run(loss, feed_dict={X: X_test, Y: Y_test})
saver.save(sess, './model/vgg16-dog-vs-cat.ckpt')
finally:
sess.close()
train(n_iters=1) | code |
2035149/cell_10 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from datalab import DataLabTrain
from make_file import make_sub
from vgg16 import vgg16
import numpy as np
import tensorflow as tf
def train(n_iters):
model, params = vgg16(fine_tune_last=True, n_classes=2)
X = model['input']
Z = model['out']
Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z[:, 0, 0, :], labels=Y))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
try:
sess.run(tf.global_variables_initializer())
for i in range(n_iters):
dl = DataLabTrain('./datasets/train_set/')
train_gen = dl.generator()
dev_gen = DataLabTrain('./datasets/dev_set/').generator()
for X_train, Y_train in train_gen:
sess.run(train_step, feed_dict={X: X_train, Y: Y_train})
l = 0
count = 0
for X_test, Y_test in dev_gen:
count += 1
l += sess.run(loss, feed_dict={X: X_test, Y: Y_test})
saver.save(sess, './model/vgg16-dog-vs-cat.ckpt')
finally:
sess.close()
from make_file import make_sub
def predict(model_path, batch_size):
model, params = vgg16(fine_tune_last=True, n_classes=2)
X = model['input']
Y_hat = tf.nn.softmax(model['out'])
saver = tf.train.Saver()
dl_test = DataLabTest('./datasets/test_set/')
test_gen = dl_test.generator()
Y = []
with tf.Session() as sess:
saver.restore(sess, model_path)
for i in range(12500 // batch_size + 1):
y = sess.run(Y_hat, feed_dict={X: next(test_gen)})
Y.append(y[:, 0, 0, 1])
print('Complete: {}%'.format(round(len(Y) / dl_test.max_len * 100, 2)), end='\r')
Y = np.concatenate(Y)
print()
print('Total Predictions: '.format(Y.shape))
return Y
Y = predict('./model/vgg16-dog-vs-cat.ckpt', 16)
np.save('out.npy', Y)
make_sub('sub_1.csv') | code |
88075491/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', delimiter='\\t', engine='python')
df_mod = df
df_mod['Age'] = max(df_mod.Dt_Customer.dt.year) - df['Year_Birth']
df_mod['Dt_Customer'] = pd.to_datetime(df['Dt_Customer'], format='%d-%m-%Y')
df_mod = df.rename(columns={'Response': 'AcceptedCmp6'})
df_mod = df_mod.drop(columns=['Z_Revenue', 'Z_CostContact']) | code |
90107080/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
test = test.drop('Unnamed: 0', axis=1)
test = test.fillna(test.mean())
for _ in test.columns:
print('The number of null values in:{} == {}'.format(_, test[_].isnull().sum())) | code |
90107080/cell_33 | [
"text_plain_output_1.png"
] | from joblib import dump, load
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
test = test.drop('Unnamed: 0', axis=1)
test = test.fillna(test.mean())
X_test = test.drop('satisfaction', axis=1)
clf = load('filename.joblib')
clf.predict(X_test) | code |
90107080/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
train = train.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
train.columns
train.info() | code |
90107080/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from scipy import stats
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from graphviz import Source
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid')
plt.style.use('fivethirtyeight')
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
def dataset_overview(data, col):
pass
dataset_overview(train, 'satisfaction') | code |
90107080/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression , Ridge , LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pickle
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
train = train.drop('Unnamed: 0', axis=1)
test = test.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
test = test.fillna(test.mean())
train.columns
X = train.drop(['satisfaction'], axis=1)
y = train['satisfaction']
from sklearn.linear_model import LogisticRegression
clf_lr = LogisticRegression(solver='liblinear')
clf_lr.fit(X, y)
s = pickle.dumps(clf_lr)
clf2 = pickle.loads(s)
X_test = test.drop('satisfaction', axis=1)
clf2.predict(X_test) | code |
90107080/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from scipy import stats
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from graphviz import Source
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid')
plt.style.use('fivethirtyeight')
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
def dataset_overview(data, col):
pass
train = train.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
def correlation_matrix(data):
corr = data.corr().round(2)
# Mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set figure size
f, ax = plt.subplots(figsize=(20, 20))
# Define custom colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap
d=sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.tight_layout()
return d
train.columns
def label_encoding(data, col):
label_encoder = preprocessing.LabelEncoder()
data[col] = label_encoder.fit_transform(data[col])
return
label_encoding(train, 'Gender')
label_encoding(train, 'Customer Type')
label_encoding(train, 'Type of Travel')
label_encoding(train, 'satisfaction')
label_encoding(train, 'Class') | code |
90107080/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90107080/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from scipy import stats
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from graphviz import Source
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid')
plt.style.use('fivethirtyeight')
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
def dataset_overview(data, col):
pass
dataset_overview(test, 'satisfaction') | code |
90107080/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from scipy import stats
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from graphviz import Source
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid')
plt.style.use('fivethirtyeight')
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
def dataset_overview(data, col):
pass
train = train.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
def correlation_matrix(data):
corr = data.corr().round(2)
# Mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set figure size
f, ax = plt.subplots(figsize=(20, 20))
# Define custom colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap
d=sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.tight_layout()
return d
correlation_matrix(train) | code |
90107080/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
train = train.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
train.columns | code |
90107080/cell_35 | [
"text_plain_output_1.png"
] | from joblib import dump, load
from sklearn.linear_model import LinearRegression , Ridge , LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pickle
import seaborn as sns
import warnings
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from scipy import stats
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from graphviz import Source
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
sns.set(style='darkgrid')
plt.style.use('fivethirtyeight')
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None)
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None)
def dataset_overview(data, col):
pass
train = train.drop('Unnamed: 0', axis=1)
test = test.drop('Unnamed: 0', axis=1)
train = train.fillna(train.mean())
test = test.fillna(test.mean())
def correlation_matrix(data):
corr = data.corr().round(2)
# Mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set figure size
f, ax = plt.subplots(figsize=(20, 20))
# Define custom colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap
d=sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.tight_layout()
return d
train.columns
X = train.drop(['satisfaction'], axis=1)
y = train['satisfaction']
from sklearn.linear_model import LogisticRegression
clf_lr = LogisticRegression(solver='liblinear')
clf_lr.fit(X, y)
s = pickle.dumps(clf_lr)
X_test = test.drop('satisfaction', axis=1)
clf = load('filename.joblib')
clf.predict(X_test)
def prediction_pickle(clf, input_data, data):
dump(clf, 'pipeline.joblib')
s = load('pipeline.joblib')
prediction = s.predict(input_data)
data['prediction'] = prediction
return
prediction_pickle(clf_lr, X_test, test) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.