path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128042012/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df.describe() | code |
128042012/cell_6 | [
"image_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100[df_100['Arrival Delay in Minutes'] > 500] | code |
128042012/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns
Gender = list(df_100.Gender) + list(df_100.Gender)
Customer_Type = list(df_100['Customer Type']) + list(df_100['Customer Type'])
Type_of_Travel = list(df_100['Type of Travel']) + list(df_100['Type of Travel'])
Classes = list(df_100['Class']) + list(df_100['Class'])
satisfaction_rate = df_100['satisfaction'].value_counts() / len(df_100) * 100
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
sns.countplot(x='Gender', hue='satisfaction', palette='viridis', data=df_100, ax=axes[0])
axes[0].set_title('Qolgan va qaytgan mijozrlarning jinsi')
sns.countplot(x='Class', hue='satisfaction', palette='viridis', data=df_100, ax=axes[1])
axes[1].set_title('Qolgan va qaytgan mijozrlarning qaysi classda uchganligi')
sns.countplot(x='Type of Travel', hue='satisfaction', palette='viridis', data=df_100, ax=axes[2])
axes[1].set_title('Qolgan va qaytgan mijozrlarning parvoz turi')
plt.show() | code |
128042012/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.pipeline import Pipeline
from sklearn import metrics | code |
128042012/cell_7 | [
"image_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns | code |
128042012/cell_8 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns
Gender = list(df_100.Gender) + list(df_100.Gender)
Customer_Type = list(df_100['Customer Type']) + list(df_100['Customer Type'])
Type_of_Travel = list(df_100['Type of Travel']) + list(df_100['Type of Travel'])
Classes = list(df_100['Class']) + list(df_100['Class'])
print('\nGender ustunidagi takrorlanmas qiymatlar soni : \n ', len(set(Gender)))
print('\nGender ustunidagi takrorlanmas qiymatlari : \n ', set(Gender))
print('\nCustomer_Type ustunidagi takrorlanmas qiymatlar soni : \n ', len(set(Customer_Type)))
print('\nCustomer_Type ustunidagi takrorlanmas qiymatlari: \n ', set(Customer_Type))
print('\nType_of_Travel ustunidagi takrorlanmas qiymatlar soni : \n ', len(set(Type_of_Travel)))
print('\nType_of_Travel ustunidagi takrorlanmas qiymatlari : \n ', set(Type_of_Travel))
print('\nClass ustunidagi takrorlanmas qiymatlar soni : \n ', len(set(Classes)))
print('\nClass ustunidagi takrorlanmas qiymatlari : \n ', set(Classes)) | code |
128042012/cell_16 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns
Gender = list(df_100.Gender) + list(df_100.Gender)
Customer_Type = list(df_100['Customer Type']) + list(df_100['Customer Type'])
Type_of_Travel = list(df_100['Type of Travel']) + list(df_100['Type of Travel'])
Classes = list(df_100['Class']) + list(df_100['Class'])
satisfaction_rate = df_100['satisfaction'].value_counts() / len(df_100) * 100
fig, axes = plt.subplots(1,3, figsize=(15,5))
sns.countplot(x='Gender', hue='satisfaction', palette='viridis', data=df_100, ax=axes[0])
axes[0].set_title("Qolgan va qaytgan mijozrlarning jinsi")
sns.countplot(x='Class', hue='satisfaction', palette='viridis', data=df_100, ax=axes[1])
axes[1].set_title("Qolgan va qaytgan mijozrlarning qaysi classda uchganligi")
sns.countplot(x='Type of Travel', hue='satisfaction', palette='viridis', data=df_100, ax=axes[2])
axes[1].set_title("Qolgan va qaytgan mijozrlarning parvoz turi")
plt.show()
xgb_model = XGBClassifier()
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
print(classification_report(y_test, y_pred))
print('Model aniqligi:', accuracy_score(y_test, y_pred))
conf_mat = confusion_matrix(y_test, y_pred)
sns.heatmap(conf_mat, annot=True, fmt='g')
plt.show()
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
roc_auc = metrics.auc(fpr, tpr)
display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name='ROC curve')
display.plot()
plt.show() | code |
128042012/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
test_set.info() | code |
128042012/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns
Gender = list(df_100.Gender) + list(df_100.Gender)
Customer_Type = list(df_100['Customer Type']) + list(df_100['Customer Type'])
Type_of_Travel = list(df_100['Type of Travel']) + list(df_100['Type of Travel'])
Classes = list(df_100['Class']) + list(df_100['Class'])
satisfaction_rate = df_100['satisfaction'].value_counts() / len(df_100) * 100
plt.figure(figsize=(5, 5))
plt.pie(satisfaction_rate, labels=['Qolgan', 'Ketgan'])
plt.show() | code |
129033753/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
DDoS_PortScan_Data = DDoS_PortScan_Data / 347000000
DDoS_PortScan_Data = DDoS_PortScan_Data.reshape(DDoS_PortScan_Data.shape[0], 79, 1)
DDoS_PortScan_Data = tf.expand_dims(DDoS_PortScan_Data, -1)
dataset = tf.data.Dataset.from_tensor_slices((DDoS_PortScan_Data, Label_Data))
print(dataset) | code |
129033753/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
print(DDoS_PortScan.shape)
print(Label.shape) | code |
129033753/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
print(DDoS_PortScan_Data.shape)
print(DDoS_PortScan_Data.dtype)
print(Label_Data.shape)
print(Label_Data.dtype) | code |
129033753/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
DDoS_PortScan_Data = DDoS_PortScan_Data / 347000000
DDoS_PortScan_Data = DDoS_PortScan_Data.reshape(DDoS_PortScan_Data.shape[0], 79, 1)
print(DDoS_PortScan_Data.shape)
print(DDoS_PortScan_Data.dtype) | code |
129033753/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import preprocessing
import tensorflow as tf
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd | code |
129033753/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
DDoS_PortScan_Data | code |
129033753/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
Label_Data | code |
129033753/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan | code |
129033753/cell_10 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
DDoS_PortScan_Data = DDoS_PortScan_Data / 347000000
Max = np.max(DDoS_PortScan_Data)
Max | code |
129033753/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
DDoS_PortScan = pd.read_csv('/kaggle/input/ddos-portscan/DDoS_PortScan.csv')
Label = DDoS_PortScan.loc[:, ' Label']
DDoS_PortScan.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
Label.replace({'DDoS': 0, 'PortScan': 1}, inplace=True)
DDoS_PortScan_Data = np.array(DDoS_PortScan)
Label_Data = np.array(Label)
DDoS_PortScan_Data = DDoS_PortScan_Data / 347000000
DDoS_PortScan_Data = DDoS_PortScan_Data.reshape(DDoS_PortScan_Data.shape[0], 79, 1)
DDoS_PortScan_Data = tf.expand_dims(DDoS_PortScan_Data, -1)
print(DDoS_PortScan_Data.shape) | code |
74067689/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier()
classifier.fit(X_train, y_train)
print('Training Accuracy: ', classifier.score(X_train, y_train))
print('Testing Accuracy: ', classifier.score(X_test, y_test)) | code |
74067689/cell_25 | [
"text_plain_output_1.png"
] | y_test[3] | code |
74067689/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.head() | code |
74067689/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier()
classifier.fit(X_train, y_train) | code |
74067689/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | print(f'X_train: {X_train.shape}')
print(f'X_test: {X_test.shape}') | code |
74067689/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.quality.hist() | code |
74067689/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
figure = plt.figure(figsize=(10, 10))
correlation = df.corr()
sns.heatmap(correlation, annot=True)
df.quality.value_counts() | code |
74067689/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
figure = plt.figure(figsize=(10, 10))
correlation = df.corr()
sns.heatmap(correlation, annot=True)
df.quality.value_counts()
X = df.drop(columns='quality', axis=1)
y = df['quality'].apply(lambda x: 1 if x > 6 else 0).values
classifier = RandomForestClassifier()
classifier.fit(X_train, y_train)
def predict(X, model):
X = np.array(X).reshape(1, -1)
pred = model.predict(X)
if pred == 1:
return 'Good Wine Quality'
else:
return 'Bad Wine Quality'
predict(X_test.values[3], classifier) | code |
74067689/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
figure = plt.figure(figsize=(10, 10))
correlation = df.corr()
sns.heatmap(correlation, annot=True) | code |
74067689/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5, 5))
sns.barplot(x='quality', y='volatile acidity', data=df) | code |
74067689/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
sns.barplot(x='quality', y='citric acid', data=df) | code |
74067689/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.describe() | code |
32068663/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals
#Représentation du forecast de la RCI
pred_ci = predictions_cc["Cote d'Ivoire Cote d'Ivoire"].conf_int()
ax = train_cc.loc[("Cote d'Ivoire")].plot(label='observed')
predictions_cc["Cote d'Ivoire Cote d'Ivoire"].predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Date')
ax.set_ylabel('Confirmed Cases')
plt.legend()
plt.show()
print(list_cc_results["Cote d'Ivoire Cote d'Ivoire"].summary().tables[1])
list_cc_results["Cote d'Ivoire Cote d'Ivoire"].plot_diagnostics(figsize=(16, 8))
plt.show() | code |
32068663/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals
rmsle_f_pays = dict()
mle_f_retval = dict()
list_f_results = dict()
predictions_f = dict()
list_f_y = dict()
for elmt in liste_pays:
df = train_f.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals | code |
32068663/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_imputed.info() | code |
32068663/cell_11 | [
"text_plain_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals
rmsle_f_pays = dict()
mle_f_retval = dict()
list_f_results = dict()
predictions_f = dict()
list_f_y = dict()
for elmt in liste_pays:
df = train_f.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals
error = np.array(list(rmsle_cc_pays.values())).mean()
error
error = np.array(list(rmsle_f_pays.values())).mean()
error | code |
32068663/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068663/cell_8 | [
"text_html_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals | code |
32068663/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train.info() | code |
32068663/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
holdout = impute(test)
holdout.head() | code |
32068663/cell_10 | [
"text_plain_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals
rmsle_f_pays = dict()
mle_f_retval = dict()
list_f_results = dict()
predictions_f = dict()
list_f_y = dict()
for elmt in liste_pays:
df = train_f.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['Fatalities'].resample('D').mean()
list_f_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_f_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_f[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_f_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_f_retval[elmt + ' ' + elt] = results.mle_retvals
error = np.array(list(rmsle_cc_pays.values())).mean()
error | code |
32068663/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import itertools
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
def impute(df):
df['Province_State'] = df['Province_State'].mask(df['Province_State'].isnull(), df['Country_Region'])
return df
train_imputed = impute(train)
train_cc = train_imputed.drop(['Id', 'Fatalities'], 1)
train_cc['Date'] = pd.to_datetime(train_cc['Date'])
train_cc.set_index(['Country_Region', 'Date'], inplace=True)
train_f = train_imputed.drop(['Id', 'ConfirmedCases'], 1)
train_f['Date'] = pd.to_datetime(train_f['Date'])
train_f.set_index(['Country_Region', 'Date'], inplace=True)
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 7) for x in list(itertools.product(p, d, q))]
def param(df):
liste = dict()
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
key = str(param) + ',' + str(param_seasonal)
liste[key] = results.aic
except:
continue
key_min = min(liste, key=liste.get)
k = key_min.replace('(', '').replace(')', '').split(',')
i = [int(x) for x in k]
par = tuple(i[:3])
par_seas = tuple(i[3:])
return (par, par_seas)
liste_pays = train_cc.index.get_level_values(0).unique()
rmsle_cc_pays = dict()
mle_cc_retval = dict()
list_cc_results = dict()
predictions_cc = dict()
list_cc_y = dict()
for elmt in liste_pays:
df = train_cc.loc[elmt]
if len(df['Province_State'].unique()) == 1:
y = df['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elmt] = y
par, par_seas = param(y)
mod = sm.tsa.statespace.SARIMAX(y, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elmt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elmt] = pred
y_forecasted = pred.predicted_mean
y_truth = y.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elmt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elmt] = results.mle_retvals
else:
for elt in df['Province_State'].unique():
d = df.loc[df['Province_State'] == elt]['ConfirmedCases'].resample('D').mean()
list_cc_y[elmt + ' ' + elt] = d
par, par_seas = param(d)
mod = sm.tsa.statespace.SARIMAX(d, order=par, seasonal_order=par_seas, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
list_cc_results[elmt + ' ' + elt] = results
pred = results.get_prediction(start=pd.to_datetime('2020-01-22'), dynamic=False)
predictions_cc[elmt + ' ' + elt] = pred
y_forecasted = pred.predicted_mean
y_truth = d.copy()
rmsle = np.sqrt(np.square(np.log(y_forecasted + 1) - np.log(y_truth + 1)).mean())
rmsle_cc_pays[elmt + ' ' + elt] = round(rmsle, 2)
mle_cc_retval[elmt + ' ' + elt] = results.mle_retvals
pred_ci = predictions_cc["Cote d'Ivoire Cote d'Ivoire"].conf_int()
ax = train_cc.loc["Cote d'Ivoire"].plot(label='observed')
predictions_cc["Cote d'Ivoire Cote d'Ivoire"].predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=0.7, figsize=(14, 7))
ax.fill_between(pred_ci.index, pred_ci.iloc[:, 0], pred_ci.iloc[:, 1], color='k', alpha=0.2)
ax.set_xlabel('Date')
ax.set_ylabel('Confirmed Cases')
plt.legend()
plt.show() | code |
50218788/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105194319/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
plt.scatter(clusters['tatmin'], clusters['sadakat'], c=clusters['kume_tahmin'], cmap='rainbow')
plt.xlabel('Tatmin')
plt.ylabel('Sadakat') | code |
105194319/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
data.head() | code |
105194319/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
from sklearn.cluster import KMeans
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
from sklearn import preprocessing
x_scaled = preprocessing.scale(x)
x_scaled
a = []
for i in range(1, 10):
kmeans = KMeans(i)
kmeans.fit(x_scaled)
a.append(kmeans.inertia_)
a | code |
105194319/cell_7 | [
"text_html_output_1.png"
] | from sklearn.cluster import KMeans
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x) | code |
105194319/cell_15 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
from sklearn import preprocessing
x_scaled = preprocessing.scale(x)
x_scaled
a = []
for i in range(1, 10):
kmeans = KMeans(i)
kmeans.fit(x_scaled)
a.append(kmeans.inertia_)
a
kmeans_new = KMeans(4)
kmeans_new.fit(x_scaled)
clusters_new = x.copy()
clusters_new['kume_tahmin'] = kmeans_new.fit_predict(x_scaled)
plt.scatter(clusters_new['tatmin'], clusters_new['sadakat'], c=clusters_new['kume_tahmin'], cmap='rainbow')
plt.xlabel('Tatmin')
plt.ylabel('Sadakat') | code |
105194319/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import preprocessing
from sklearn.cluster import KMeans
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
from sklearn import preprocessing
x_scaled = preprocessing.scale(x)
x_scaled
kmeans_new = KMeans(4)
kmeans_new.fit(x_scaled)
clusters_new = x.copy()
clusters_new['kume_tahmin'] = kmeans_new.fit_predict(x_scaled)
clusters_new | code |
105194319/cell_10 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
from sklearn.cluster import KMeans
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
from sklearn import preprocessing
x_scaled = preprocessing.scale(x)
x_scaled | code |
105194319/cell_12 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
x = data.copy()
kmeans = KMeans(2)
kmeans.fit(x)
clusters = x.copy()
clusters['kume_tahmin'] = kmeans.fit_predict(x)
from sklearn import preprocessing
x_scaled = preprocessing.scale(x)
x_scaled
a = []
for i in range(1, 10):
kmeans = KMeans(i)
kmeans.fit(x_scaled)
a.append(kmeans.inertia_)
a
plt.plot(range(1, 10), a)
plt.xlabel('Küme Sayısı')
plt.ylabel('Küme-içi Kareler Toplamı') | code |
105194319/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/market-sadakatcsv/Market_Sadakat.csv')
plt.scatter(data['tatmin'], data['sadakat'])
plt.xlabel('Tatmin')
plt.ylabel('Sadakat') | code |
72092655/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
for column in columns_to_boxplot:
i = 0
quartile_1 = data[column].quantile(0.25)
quartile_3 = data[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data[(data[column] > max_value) | (data[column] < min_value)]
i += outliers.shape[0]
# analyse des variables quantitatives continues: pour les groupes de PrimaryPropertyType)
groups = data['PrimaryPropertyType'].unique()
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
values=[]
assessment = []
subset = data[column]
# var = variance empirique, std = ecart type empirique
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'
.format(subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()))
plt.hist(subset,n_bins,label='all datas', alpha=0.5)
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36,18))
for group in groups:
subset = data[data.PrimaryPropertyType == group][column] # Création du sous-échantillon
values.append(subset)
assessment.append([group,subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()])
plt.hist(subset,n_bins,label=group, alpha=0.3)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
fig = plt.figure(figsize=(36,18))
plt.hist(values, n_bins, histtype='bar', label=groups)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
df_assessment = pd.DataFrame(assessment,columns=['group', 'moy', 'med', 'mod','var','std','skew','kur'])
display(df_assessment)
# https://towardsdatascience.com/create-and-customize-boxplots-with-pythons-matplotlib-to-get-lots-of-insights-from-your-data-d561c9883643
fig, ax = plt.subplots(figsize=(36,10))
ax.boxplot(values, labels=groups)
plt.show()
print('='*120)
data_refined = data.copy()
columns_outliers_to_delete = ['Energy/Surface', 'GHG/Surface']
for column in columns_outliers_to_delete:
i = 0
for group in groups:
j = 0
subset = data_refined[data_refined.PrimaryPropertyType == group]
quartile_1 = subset[column].quantile(0.25)
quartile_3 = subset[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = subset[(subset[column] > max_value) | (subset[column] < min_value)]
j += outliers.shape[0]
i += j
data_refined.drop(outliers.index.values, inplace=True)
data_refined.to_csv('data_without_outliers.csv', index=False)
data_refine = data.copy()
columns_outliers_to_delete == ['SiteEnergyUse(kBtu)', 'TotalGHGEmissions']
for column in columns_outliers_to_delete:
i = 0
quartile_1 = data_refine[column].quantile(0.25)
quartile_3 = data_refine[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data_refine[(data_refine[column] > max_value) | (data_refine[column] < min_value)]
i += outliers.shape[0]
data_refine.drop(outliers.index.values, inplace=True)
print(data.shape)
print(data_refine.shape) | code |
72092655/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
groups = data['PrimaryPropertyType'].unique()
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
values = []
assessment = []
subset = data[column]
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'.format(subset.mean(), subset.median(), subset.mode()[0], subset.var(ddof=0), subset.std(ddof=0), subset.skew(), subset.kurtosis()))
plt.hist(subset, n_bins, label='all datas', alpha=0.5)
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36, 18))
for group in groups:
subset = data[data.PrimaryPropertyType == group][column]
values.append(subset)
assessment.append([group, subset.mean(), subset.median(), subset.mode()[0], subset.var(ddof=0), subset.std(ddof=0), subset.skew(), subset.kurtosis()])
plt.hist(subset, n_bins, label=group, alpha=0.3)
plt.legend(loc='best')
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36, 18))
plt.hist(values, n_bins, histtype='bar', label=groups)
plt.legend(loc='best')
plt.title(column)
plt.show()
df_assessment = pd.DataFrame(assessment, columns=['group', 'moy', 'med', 'mod', 'var', 'std', 'skew', 'kur'])
display(df_assessment)
fig, ax = plt.subplots(figsize=(36, 10))
ax.boxplot(values, labels=groups)
plt.show()
print('=' * 120) | code |
72092655/cell_4 | [
"image_output_11.png",
"text_plain_output_5.png",
"image_output_17.png",
"text_html_output_4.png",
"image_output_14.png",
"text_plain_output_4.png",
"text_html_output_2.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_7.png",
"image_output_20.png",
"text_plain_output_3.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"text_html_output_1.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"text_html_output_3.png",
"image_output_9.png",
"image_output_19.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
print(objectColumns)
print(numericColumns) | code |
72092655/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
subset = data[column]
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'.format(subset.mean(), subset.median(), subset.mode()[0], subset.var(ddof=0), subset.std(ddof=0), subset.skew(), subset.kurtosis()))
plt.hist(subset, n_bins, label='all datas', alpha=0.5)
plt.title(column)
plt.legend(loc='best')
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
print('=' * 120) | code |
72092655/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
for column in columns_to_boxplot:
i = 0
quartile_1 = data[column].quantile(0.25)
quartile_3 = data[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data[(data[column] > max_value) | (data[column] < min_value)]
i += outliers.shape[0]
# analyse des variables quantitatives continues: pour les groupes de PrimaryPropertyType)
groups = data['PrimaryPropertyType'].unique()
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
values=[]
assessment = []
subset = data[column]
# var = variance empirique, std = ecart type empirique
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'
.format(subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()))
plt.hist(subset,n_bins,label='all datas', alpha=0.5)
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36,18))
for group in groups:
subset = data[data.PrimaryPropertyType == group][column] # Création du sous-échantillon
values.append(subset)
assessment.append([group,subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()])
plt.hist(subset,n_bins,label=group, alpha=0.3)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
fig = plt.figure(figsize=(36,18))
plt.hist(values, n_bins, histtype='bar', label=groups)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
df_assessment = pd.DataFrame(assessment,columns=['group', 'moy', 'med', 'mod','var','std','skew','kur'])
display(df_assessment)
# https://towardsdatascience.com/create-and-customize-boxplots-with-pythons-matplotlib-to-get-lots-of-insights-from-your-data-d561c9883643
fig, ax = plt.subplots(figsize=(36,10))
ax.boxplot(values, labels=groups)
plt.show()
print('='*120)
data_refined = data.copy()
columns_outliers_to_delete = ['Energy/Surface', 'GHG/Surface']
for column in columns_outliers_to_delete:
i = 0
for group in groups:
j = 0
subset = data_refined[data_refined.PrimaryPropertyType == group]
quartile_1 = subset[column].quantile(0.25)
quartile_3 = subset[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = subset[(subset[column] > max_value) | (subset[column] < min_value)]
j += outliers.shape[0]
i += j
data_refined.drop(outliers.index.values, inplace=True)
print(data.shape)
print(data_refined.shape)
data_refined.to_csv('data_without_outliers.csv', index=False) | code |
72092655/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
for column in columns_to_boxplot:
i = 0
quartile_1 = data[column].quantile(0.25)
quartile_3 = data[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data[(data[column] > max_value) | (data[column] < min_value)]
i += outliers.shape[0]
print('colonnes {}, {} outliers détectés'.format(column, i)) | code |
72092655/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
for column in columns_to_boxplot:
i = 0
quartile_1 = data[column].quantile(0.25)
quartile_3 = data[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data[(data[column] > max_value) | (data[column] < min_value)]
i += outliers.shape[0]
# analyse des variables quantitatives continues: pour les groupes de PrimaryPropertyType)
groups = data['PrimaryPropertyType'].unique()
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
values=[]
assessment = []
subset = data[column]
# var = variance empirique, std = ecart type empirique
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'
.format(subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()))
plt.hist(subset,n_bins,label='all datas', alpha=0.5)
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36,18))
for group in groups:
subset = data[data.PrimaryPropertyType == group][column] # Création du sous-échantillon
values.append(subset)
assessment.append([group,subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()])
plt.hist(subset,n_bins,label=group, alpha=0.3)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
fig = plt.figure(figsize=(36,18))
plt.hist(values, n_bins, histtype='bar', label=groups)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
df_assessment = pd.DataFrame(assessment,columns=['group', 'moy', 'med', 'mod','var','std','skew','kur'])
display(df_assessment)
# https://towardsdatascience.com/create-and-customize-boxplots-with-pythons-matplotlib-to-get-lots-of-insights-from-your-data-d561c9883643
fig, ax = plt.subplots(figsize=(36,10))
ax.boxplot(values, labels=groups)
plt.show()
print('='*120)
data_refined = data.copy()
columns_outliers_to_delete = ['Energy/Surface', 'GHG/Surface']
for column in columns_outliers_to_delete:
print('colonne {}'.format(column))
i = 0
for group in groups:
j = 0
subset = data_refined[data_refined.PrimaryPropertyType == group]
quartile_1 = subset[column].quantile(0.25)
quartile_3 = subset[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = subset[(subset[column] > max_value) | (subset[column] < min_value)]
j += outliers.shape[0]
i += j
print('groupe {}, {} outliers détectés'.format(group, j))
data_refined.drop(outliers.index.values, inplace=True)
print('Total {}, {} outliers détectés'.format(column, i))
print('=' * 120) | code |
72092655/cell_12 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"image_output_5.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
data['Energy/Surface'] = data['SiteEnergyUse(kBtu)'] / data['PropertyGFATotal']
data['GHG/Surface'] = data['TotalGHGEmissions'] / data['PropertyGFATotal']
columns_to_boxplot = ['SiteEnergyUse(kBtu)', 'Energy/Surface', 'TotalGHGEmissions', 'GHG/Surface']
for column in columns_to_boxplot:
n_bins = 12
subset = data[column]
subset.plot.box(vert=False)
for column in columns_to_boxplot:
i = 0
quartile_1 = data[column].quantile(0.25)
quartile_3 = data[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data[(data[column] > max_value) | (data[column] < min_value)]
i += outliers.shape[0]
# analyse des variables quantitatives continues: pour les groupes de PrimaryPropertyType)
groups = data['PrimaryPropertyType'].unique()
for column in columns_to_boxplot:
print('column: {}'.format(column))
n_bins = 12
values=[]
assessment = []
subset = data[column]
# var = variance empirique, std = ecart type empirique
print('all datas: \n\tmean: {}\n\tmed: {}\n\tmod: {}\n\tvar: {}\n\tstd: {}\n\tskew: {}\n\tkur: {}'
.format(subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()))
plt.hist(subset,n_bins,label='all datas', alpha=0.5)
plt.show()
subset.plot.box(vert=False)
plt.title(column)
plt.show()
fig = plt.figure(figsize=(36,18))
for group in groups:
subset = data[data.PrimaryPropertyType == group][column] # Création du sous-échantillon
values.append(subset)
assessment.append([group,subset.mean(),subset.median(), subset.mode()[0],subset.var(ddof=0),subset.std(ddof=0),subset.skew(),subset.kurtosis()])
plt.hist(subset,n_bins,label=group, alpha=0.3)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
fig = plt.figure(figsize=(36,18))
plt.hist(values, n_bins, histtype='bar', label=groups)
plt.legend(loc='best')
plt.title(column)
plt.show() # Affiche l'histogramme
df_assessment = pd.DataFrame(assessment,columns=['group', 'moy', 'med', 'mod','var','std','skew','kur'])
display(df_assessment)
# https://towardsdatascience.com/create-and-customize-boxplots-with-pythons-matplotlib-to-get-lots-of-insights-from-your-data-d561c9883643
fig, ax = plt.subplots(figsize=(36,10))
ax.boxplot(values, labels=groups)
plt.show()
print('='*120)
data_refined = data.copy()
columns_outliers_to_delete = ['Energy/Surface', 'GHG/Surface']
for column in columns_outliers_to_delete:
i = 0
for group in groups:
j = 0
subset = data_refined[data_refined.PrimaryPropertyType == group]
quartile_1 = subset[column].quantile(0.25)
quartile_3 = subset[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = subset[(subset[column] > max_value) | (subset[column] < min_value)]
j += outliers.shape[0]
i += j
data_refined.drop(outliers.index.values, inplace=True)
data_refined.to_csv('data_without_outliers.csv', index=False)
data_refine = data.copy()
columns_outliers_to_delete == ['SiteEnergyUse(kBtu)', 'TotalGHGEmissions']
for column in columns_outliers_to_delete:
i = 0
quartile_1 = data_refine[column].quantile(0.25)
quartile_3 = data_refine[column].quantile(0.75)
iqr = quartile_3 - quartile_1
min_value = quartile_1 - 1.5 * iqr
max_value = quartile_3 + 1.5 * iqr
outliers = data_refine[(data_refine[column] > max_value) | (data_refine[column] < min_value)]
i += outliers.shape[0]
print('colonnes {}, {} outliers détectés'.format(column, i))
data_refine.drop(outliers.index.values, inplace=True) | code |
72092655/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv')
data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv')
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
objectColumns = list(data.dtypes[data.dtypes == np.object].index)
numericColumns = list(data.dtypes[data.dtypes != np.object].index)
print(data['BuildingType'].unique())
print(data['PrimaryPropertyType'].unique()) | code |
89138938/cell_13 | [
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
train_pneumonia = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA'
train_nornal = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL'
x = (len(os.listdir(train_pneumonia)), len(os.listdir(train_nornal)))
labels = ['PNEUMONIA', 'NORMAL']
color = ['yellow', 'green']
# visualize
figure = plt.figure(figsize=(12, 12))
img1 = figure.add_subplot(1, 2, 1)
img_plot = plt.imshow(train[3][0], cmap = 'gray')
img1.set_title(labels[train[3][1]])
plt.axis("off")
# Visualize
img2 = figure.add_subplot(1, 2, 2)
img2_plot = plt.imshow(train[4][0], cmap = 'gray')
img2.set_title(labels[train[4][1]])
plt.axis('off')
sample = train[2][0]
rgb = cv2.cvtColor(sample, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
thresholds = [100, 120, 140, 180]
for threshold in thresholds:
val, thresh = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
plt.imshow(thresh, cmap='gray')
plt.title(f'Threshold = {threshold}')
plt.show() | code |
89138938/cell_9 | [
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
train_pneumonia = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA'
train_nornal = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL'
x = (len(os.listdir(train_pneumonia)), len(os.listdir(train_nornal)))
labels = ['PNEUMONIA', 'NORMAL']
color = ['yellow', 'green']
figure = plt.figure(figsize=(12, 12))
img1 = figure.add_subplot(1, 2, 1)
img_plot = plt.imshow(train[3][0], cmap='gray')
img1.set_title(labels[train[3][1]])
plt.axis('off')
img2 = figure.add_subplot(1, 2, 2)
img2_plot = plt.imshow(train[4][0], cmap='gray')
img2.set_title(labels[train[4][1]])
plt.axis('off') | code |
89138938/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val') | code |
89138938/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
print('Total number of Train image', len(train))
print()
print('Total number of Test image', len(test))
print()
print('Total number of Validation image', len(val)) | code |
89138938/cell_11 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
train_pneumonia = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA'
train_nornal = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL'
x = (len(os.listdir(train_pneumonia)), len(os.listdir(train_nornal)))
labels = ['PNEUMONIA', 'NORMAL']
color = ['yellow', 'green']
# visualize
figure = plt.figure(figsize=(12, 12))
img1 = figure.add_subplot(1, 2, 1)
img_plot = plt.imshow(train[3][0], cmap = 'gray')
img1.set_title(labels[train[3][1]])
plt.axis("off")
# Visualize
img2 = figure.add_subplot(1, 2, 2)
img2_plot = plt.imshow(train[4][0], cmap = 'gray')
img2.set_title(labels[train[4][1]])
plt.axis('off')
sample = train[2][0]
rgb = cv2.cvtColor(sample, cv2.COLOR_BGR2RGB)
plt.imshow(rgb) | code |
89138938/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train_pneumonia = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA'
train_nornal = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL'
x = (len(os.listdir(train_pneumonia)), len(os.listdir(train_nornal)))
labels = ['PNEUMONIA', 'NORMAL']
color = ['yellow', 'green']
plt.pie(x, labels=labels, colors=color, autopct='%.0f%%', radius=1.5, textprops={'fontsize': 16})
plt.show() | code |
89138938/cell_12 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
image_size = 150
labels = ['PNEUMONIA', 'NORMAL']
def get_data(path):
data = list()
for label in labels:
image_dir = os.path.join(path, label)
class_num = labels.index(label)
for img in os.listdir(image_dir):
try:
img_arr = cv2.imread(os.path.join(image_dir, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_arr, (image_size, image_size))
data.append([resized_array, class_num])
except Exception as e:
return np.array(data)
train = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
train_pneumonia = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA'
train_nornal = '/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL'
x = (len(os.listdir(train_pneumonia)), len(os.listdir(train_nornal)))
labels = ['PNEUMONIA', 'NORMAL']
color = ['yellow', 'green']
# visualize
figure = plt.figure(figsize=(12, 12))
img1 = figure.add_subplot(1, 2, 1)
img_plot = plt.imshow(train[3][0], cmap = 'gray')
img1.set_title(labels[train[3][1]])
plt.axis("off")
# Visualize
img2 = figure.add_subplot(1, 2, 2)
img2_plot = plt.imshow(train[4][0], cmap = 'gray')
img2.set_title(labels[train[4][1]])
plt.axis('off')
sample = train[2][0]
rgb = cv2.cvtColor(sample, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray') | code |
17102038/cell_21 | [
"text_plain_output_1.png"
] | from keras.callbacks import ReduceLROnPlateau
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
import keras
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
X = pd.read_csv('../input/train.csv')
X_test_main = pd.read_csv('../input/test.csv')
y = X['label']
X = X.drop(['label'], axis=1)
X = X.values.reshape(-1, 28, 28, 1).astype('float32')
X_test_main = X_test_main.values.reshape(-1, 28, 28, 1).astype('float32')
y = y.values
X.shape
plt.colorbar()
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['sparse_categorical_accuracy'])
lrr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=1e-05)
result = model.fit(X_train, y_train, batch_size=70, epochs=20, verbose=2, validation_split=0.25, callbacks=[lrr], shuffle=True)
'\nresult = model.fit(X, \n y, \n batch_size=70, \n epochs=3, \n verbose=2, \n validation_split=0.25, \n callbacks=[lrr],\n shuffle=True)\n'
acc = result.history['sparse_categorical_accuracy']
val_acc = result.history['val_sparse_categorical_accuracy']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training')
plt.plot(epochs, val_acc, 'b', label='Validation')
plt.title('Training and validatio set accuracy')
plt.legend(loc=0)
plt.figure()
plt.show() | code |
17102038/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
X = pd.read_csv('../input/train.csv')
X_test_main = pd.read_csv('../input/test.csv')
y = X['label']
X = X.drop(['label'], axis=1)
X = X.values.reshape(-1, 28, 28, 1).astype('float32')
X_test_main = X_test_main.values.reshape(-1, 28, 28, 1).astype('float32')
y = y.values
X.shape
plt.figure()
plt.imshow(X[1][:, :, 0])
plt.colorbar()
plt.grid(False)
plt.show() | code |
17102038/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
X = pd.read_csv('../input/train.csv')
X_test_main = pd.read_csv('../input/test.csv')
y = X['label']
X = X.drop(['label'], axis=1)
X = X.values.reshape(-1, 28, 28, 1).astype('float32')
X_test_main = X_test_main.values.reshape(-1, 28, 28, 1).astype('float32')
y = y.values | code |
17102038/cell_19 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from keras.callbacks import ReduceLROnPlateau
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
import keras
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['sparse_categorical_accuracy'])
lrr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=1e-05)
result = model.fit(X_train, y_train, batch_size=70, epochs=20, verbose=2, validation_split=0.25, callbacks=[lrr], shuffle=True)
'\nresult = model.fit(X, \n y, \n batch_size=70, \n epochs=3, \n verbose=2, \n validation_split=0.25, \n callbacks=[lrr],\n shuffle=True)\n'
model.summary()
y_pred = model.predict(X_test, verbose=2)
y_pred[:, 0]
test_loss, test_acc = model.evaluate(X_test, y_test)
print('Test accuracy:', test_acc) | code |
17102038/cell_18 | [
"image_output_1.png"
] | from keras.callbacks import ReduceLROnPlateau
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
import keras
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['sparse_categorical_accuracy'])
lrr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=1e-05)
result = model.fit(X_train, y_train, batch_size=70, epochs=20, verbose=2, validation_split=0.25, callbacks=[lrr], shuffle=True)
'\nresult = model.fit(X, \n y, \n batch_size=70, \n epochs=3, \n verbose=2, \n validation_split=0.25, \n callbacks=[lrr],\n shuffle=True)\n'
model.summary()
y_pred = model.predict(X_test, verbose=2)
y_pred[:, 0] | code |
17102038/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.callbacks import ReduceLROnPlateau
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
import keras
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['sparse_categorical_accuracy'])
lrr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=1e-05)
result = model.fit(X_train, y_train, batch_size=70, epochs=20, verbose=2, validation_split=0.25, callbacks=[lrr], shuffle=True)
'\nresult = model.fit(X, \n y, \n batch_size=70, \n epochs=3, \n verbose=2, \n validation_split=0.25, \n callbacks=[lrr],\n shuffle=True)\n' | code |
17102038/cell_16 | [
"text_plain_output_1.png"
] | from keras.callbacks import ReduceLROnPlateau
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
import keras
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['sparse_categorical_accuracy'])
lrr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=1e-05)
result = model.fit(X_train, y_train, batch_size=70, epochs=20, verbose=2, validation_split=0.25, callbacks=[lrr], shuffle=True)
'\nresult = model.fit(X, \n y, \n batch_size=70, \n epochs=3, \n verbose=2, \n validation_split=0.25, \n callbacks=[lrr],\n shuffle=True)\n'
model.summary() | code |
17102038/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
X = pd.read_csv('../input/train.csv')
X_test_main = pd.read_csv('../input/test.csv')
y = X['label']
X = X.drop(['label'], axis=1)
X = X.values.reshape(-1, 28, 28, 1).astype('float32')
X_test_main = X_test_main.values.reshape(-1, 28, 28, 1).astype('float32')
y = y.values
X.shape | code |
74054733/cell_5 | [
"text_plain_output_1.png"
] | ver = read_kernel_versions()
ver.columns | code |
1008459/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_hdf('../input/train.h5')
data.loc[(data.id == 288) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']]
data.loc[(data.id == 1201) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']] | code |
1008459/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_hdf('../input/train.h5')
data.loc[(data.id == 288) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']] | code |
1008459/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_hdf('../input/train.h5') | code |
1008459/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_hdf('../input/train.h5')
data.technical_16.describe() | code |
32071115/cell_13 | [
"text_html_output_1.png"
] | from tqdm.notebook import tqdm
import pandas as pd
import requests
def get_restcountries(countries):
"""Retrieve all available fields from restcountries API
https://github.com/apilayer/restcountries#response-example"""
api = 'https://restcountries.eu/rest/v2'
rdfs = []
for country in tqdm(countries):
r = requests.get(f'{api}/name/{country}?fullText=true').json()
if len(r) != 1:
r = requests.get(f'{api}/name/{country}?fullText=false').json()
if len(r) != 1:
try:
alpha3 = {'Channel Islands': None, 'Congo (Brazzaville)': 'COG', 'Congo (Kinshasa)': 'COD', 'Czechia': 'CZE', 'Diamond Princess': None, 'Iran': 'IRN', 'Korea, South': 'PRK', 'North Macedonia': 'MKD', 'St Martin': 'MAF', 'Taiwan*': 'TWN', 'Virgin Islands': 'VIR'}
r = requests.get(f'{api}/alpha/{alpha3[country]}')
r = [r.json()] if r.status_code == 200 else []
except:
r = []
rdf = pd.DataFrame(r)
rdf['country'] = country
rdfs.append(rdf)
return pd.concat(rdfs, sort=False).set_index('country')
def get_datausa():
"""Retrieve population on state level from datausa.io
https://datausa.io/about/api/"""
datausa = pd.DataFrame(requests.get('https://datausa.io/api/data?drilldowns=State&measures=Population&year=latest', headers={'User-Agent': ''}).json()['data'])
datausa = datausa[['State', 'Population']]
datausa.columns = ['state', 'population']
datausa['region'] = 'Americas'
datausa['subregion'] = 'Northern America'
return datausa.set_index('state')
wiki_canada = {'Alberta': 4413146, 'British Columbia': 5110917, 'Manitoba': 1377517, 'New Brunswick': 779993, 'Newfoundland and Labrador': 521365, 'Nova Scotia': 977457, 'Ontario': 14711827, 'Prince Edward Island': 158158, 'Quebec': 8537674, 'Saskatchewan': 1181666}
canada = pd.DataFrame({'population': wiki_canada, 'region': 'Americas', 'subregion': 'Northern America'})
wiki_australia = {'Australian Capital Territory': 426709, 'New South Wales': 8089526, 'Northern Territory': 245869, 'Queensland': 5095100, 'South Australia': 1751693, 'Tasmania': 534281, 'Victoria': 6594804, 'Western Australia': 2621680}
australia = pd.DataFrame({'population': wiki_australia, 'region': 'Oceania', 'subregion': 'Australia and New Zealand'})
wiki_china = {'Anhui': 62550000, 'Beijing': 21710000, 'Chongqing': 30750000, 'Fujian': 39110000, 'Gansu': 26260000, 'Guangdong': 111690000, 'Guangxi': 48850000, 'Guizhou': 35550000, 'Hainan': 9170000, 'Hebei': 75200000, 'Heilongjiang': 37890000, 'Henan': 95590000, 'Hubei': 59020000, 'Hunan': 68600000, 'Inner Mongolia': 25290000, 'Jiangsu': 80290000, 'Jiangxi': 46220000, 'Jilin': 27170000, 'Liaoning': 43690000, 'Ningxia': 6820000, 'Qinghai': 5980000, 'Shaanxi': 38350000, 'Shandong': 100060000, 'Shanghai': 24180000, 'Shanxi': 36820000, 'Sichuan': 83020000, 'Tianjin': 15570000, 'Tibet': 3370000, 'Xinjiang': 24450000, 'Yunnan': 48010000, 'Zhejiang': 56570000}
china = pd.DataFrame({'population': wiki_china, 'region': 'Asia', 'subregion': 'Eastern Asia'})
wiki_channel_islands = {'Channel Islands': 170499}
channel_islands = pd.DataFrame({'population': wiki_channel_islands, 'region': 'Europe', 'subregion': 'Northern Europe'})
wiki_diamond_princess = {'Diamond Princess': 3711}
diamond_princess = pd.DataFrame({'population': wiki_diamond_princess, 'region': 'Asia', 'subregion': 'Eastern Asia'})
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date'])
train.columns = ['id', 'province_state', 'country_region', 'date', 'confirmed', 'fatal']
train['country_region'].update(train['country_region'].str.replace('Georgia', 'Sakartvelo'))
train['entity'] = train['province_state'].where(~train['province_state'].isna(), train['country_region'])
countries = train['entity'].unique()
features = get_restcountries(countries)[['region', 'subregion', 'population']]
for chunk in [get_datausa(), canada, australia, china, channel_islands, diamond_princess]:
features = features.combine_first(chunk)
features
covid = train[['date', 'entity', 'confirmed', 'fatal']].join(features, on='entity')
covid['confirmed'] = covid.groupby('entity')['confirmed'].cummax()
covid['fatal'] = covid.groupby('entity')['fatal'].cummax()
covid[['confirmed', 'fatal', 'population']] = covid[['confirmed', 'fatal', 'population']].fillna(0).astype('int')
covid.sample(20) | code |
32071115/cell_15 | [
"text_html_output_1.png"
] | from tqdm.notebook import tqdm
import pandas as pd
import requests
def get_restcountries(countries):
"""Retrieve all available fields from restcountries API
https://github.com/apilayer/restcountries#response-example"""
api = 'https://restcountries.eu/rest/v2'
rdfs = []
for country in tqdm(countries):
r = requests.get(f'{api}/name/{country}?fullText=true').json()
if len(r) != 1:
r = requests.get(f'{api}/name/{country}?fullText=false').json()
if len(r) != 1:
try:
alpha3 = {'Channel Islands': None, 'Congo (Brazzaville)': 'COG', 'Congo (Kinshasa)': 'COD', 'Czechia': 'CZE', 'Diamond Princess': None, 'Iran': 'IRN', 'Korea, South': 'PRK', 'North Macedonia': 'MKD', 'St Martin': 'MAF', 'Taiwan*': 'TWN', 'Virgin Islands': 'VIR'}
r = requests.get(f'{api}/alpha/{alpha3[country]}')
r = [r.json()] if r.status_code == 200 else []
except:
r = []
rdf = pd.DataFrame(r)
rdf['country'] = country
rdfs.append(rdf)
return pd.concat(rdfs, sort=False).set_index('country')
def get_datausa():
"""Retrieve population on state level from datausa.io
https://datausa.io/about/api/"""
datausa = pd.DataFrame(requests.get('https://datausa.io/api/data?drilldowns=State&measures=Population&year=latest', headers={'User-Agent': ''}).json()['data'])
datausa = datausa[['State', 'Population']]
datausa.columns = ['state', 'population']
datausa['region'] = 'Americas'
datausa['subregion'] = 'Northern America'
return datausa.set_index('state')
wiki_canada = {'Alberta': 4413146, 'British Columbia': 5110917, 'Manitoba': 1377517, 'New Brunswick': 779993, 'Newfoundland and Labrador': 521365, 'Nova Scotia': 977457, 'Ontario': 14711827, 'Prince Edward Island': 158158, 'Quebec': 8537674, 'Saskatchewan': 1181666}
canada = pd.DataFrame({'population': wiki_canada, 'region': 'Americas', 'subregion': 'Northern America'})
wiki_australia = {'Australian Capital Territory': 426709, 'New South Wales': 8089526, 'Northern Territory': 245869, 'Queensland': 5095100, 'South Australia': 1751693, 'Tasmania': 534281, 'Victoria': 6594804, 'Western Australia': 2621680}
australia = pd.DataFrame({'population': wiki_australia, 'region': 'Oceania', 'subregion': 'Australia and New Zealand'})
wiki_china = {'Anhui': 62550000, 'Beijing': 21710000, 'Chongqing': 30750000, 'Fujian': 39110000, 'Gansu': 26260000, 'Guangdong': 111690000, 'Guangxi': 48850000, 'Guizhou': 35550000, 'Hainan': 9170000, 'Hebei': 75200000, 'Heilongjiang': 37890000, 'Henan': 95590000, 'Hubei': 59020000, 'Hunan': 68600000, 'Inner Mongolia': 25290000, 'Jiangsu': 80290000, 'Jiangxi': 46220000, 'Jilin': 27170000, 'Liaoning': 43690000, 'Ningxia': 6820000, 'Qinghai': 5980000, 'Shaanxi': 38350000, 'Shandong': 100060000, 'Shanghai': 24180000, 'Shanxi': 36820000, 'Sichuan': 83020000, 'Tianjin': 15570000, 'Tibet': 3370000, 'Xinjiang': 24450000, 'Yunnan': 48010000, 'Zhejiang': 56570000}
china = pd.DataFrame({'population': wiki_china, 'region': 'Asia', 'subregion': 'Eastern Asia'})
wiki_channel_islands = {'Channel Islands': 170499}
channel_islands = pd.DataFrame({'population': wiki_channel_islands, 'region': 'Europe', 'subregion': 'Northern Europe'})
wiki_diamond_princess = {'Diamond Princess': 3711}
diamond_princess = pd.DataFrame({'population': wiki_diamond_princess, 'region': 'Asia', 'subregion': 'Eastern Asia'})
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date'])
train.columns = ['id', 'province_state', 'country_region', 'date', 'confirmed', 'fatal']
train['country_region'].update(train['country_region'].str.replace('Georgia', 'Sakartvelo'))
train['entity'] = train['province_state'].where(~train['province_state'].isna(), train['country_region'])
countries = train['entity'].unique()
features = get_restcountries(countries)[['region', 'subregion', 'population']]
for chunk in [get_datausa(), canada, australia, china, channel_islands, diamond_princess]:
features = features.combine_first(chunk)
features
covid = train[['date', 'entity', 'confirmed', 'fatal']].join(features, on='entity')
covid['confirmed'] = covid.groupby('entity')['confirmed'].cummax()
covid['fatal'] = covid.groupby('entity')['fatal'].cummax()
covid[['confirmed', 'fatal', 'population']] = covid[['confirmed', 'fatal', 'population']].fillna(0).astype('int')
covid.sample(20)
covid.groupby('entity').max().pivot_table(index='region', aggfunc='sum', margins=True) | code |
32071115/cell_17 | [
"text_plain_output_1.png"
] | from tqdm.notebook import tqdm
import pandas as pd
import requests
def get_restcountries(countries):
"""Retrieve all available fields from restcountries API
https://github.com/apilayer/restcountries#response-example"""
api = 'https://restcountries.eu/rest/v2'
rdfs = []
for country in tqdm(countries):
r = requests.get(f'{api}/name/{country}?fullText=true').json()
if len(r) != 1:
r = requests.get(f'{api}/name/{country}?fullText=false').json()
if len(r) != 1:
try:
alpha3 = {'Channel Islands': None, 'Congo (Brazzaville)': 'COG', 'Congo (Kinshasa)': 'COD', 'Czechia': 'CZE', 'Diamond Princess': None, 'Iran': 'IRN', 'Korea, South': 'PRK', 'North Macedonia': 'MKD', 'St Martin': 'MAF', 'Taiwan*': 'TWN', 'Virgin Islands': 'VIR'}
r = requests.get(f'{api}/alpha/{alpha3[country]}')
r = [r.json()] if r.status_code == 200 else []
except:
r = []
rdf = pd.DataFrame(r)
rdf['country'] = country
rdfs.append(rdf)
return pd.concat(rdfs, sort=False).set_index('country')
def get_datausa():
"""Retrieve population on state level from datausa.io
https://datausa.io/about/api/"""
datausa = pd.DataFrame(requests.get('https://datausa.io/api/data?drilldowns=State&measures=Population&year=latest', headers={'User-Agent': ''}).json()['data'])
datausa = datausa[['State', 'Population']]
datausa.columns = ['state', 'population']
datausa['region'] = 'Americas'
datausa['subregion'] = 'Northern America'
return datausa.set_index('state')
wiki_canada = {'Alberta': 4413146, 'British Columbia': 5110917, 'Manitoba': 1377517, 'New Brunswick': 779993, 'Newfoundland and Labrador': 521365, 'Nova Scotia': 977457, 'Ontario': 14711827, 'Prince Edward Island': 158158, 'Quebec': 8537674, 'Saskatchewan': 1181666}
canada = pd.DataFrame({'population': wiki_canada, 'region': 'Americas', 'subregion': 'Northern America'})
wiki_australia = {'Australian Capital Territory': 426709, 'New South Wales': 8089526, 'Northern Territory': 245869, 'Queensland': 5095100, 'South Australia': 1751693, 'Tasmania': 534281, 'Victoria': 6594804, 'Western Australia': 2621680}
australia = pd.DataFrame({'population': wiki_australia, 'region': 'Oceania', 'subregion': 'Australia and New Zealand'})
wiki_china = {'Anhui': 62550000, 'Beijing': 21710000, 'Chongqing': 30750000, 'Fujian': 39110000, 'Gansu': 26260000, 'Guangdong': 111690000, 'Guangxi': 48850000, 'Guizhou': 35550000, 'Hainan': 9170000, 'Hebei': 75200000, 'Heilongjiang': 37890000, 'Henan': 95590000, 'Hubei': 59020000, 'Hunan': 68600000, 'Inner Mongolia': 25290000, 'Jiangsu': 80290000, 'Jiangxi': 46220000, 'Jilin': 27170000, 'Liaoning': 43690000, 'Ningxia': 6820000, 'Qinghai': 5980000, 'Shaanxi': 38350000, 'Shandong': 100060000, 'Shanghai': 24180000, 'Shanxi': 36820000, 'Sichuan': 83020000, 'Tianjin': 15570000, 'Tibet': 3370000, 'Xinjiang': 24450000, 'Yunnan': 48010000, 'Zhejiang': 56570000}
china = pd.DataFrame({'population': wiki_china, 'region': 'Asia', 'subregion': 'Eastern Asia'})
wiki_channel_islands = {'Channel Islands': 170499}
channel_islands = pd.DataFrame({'population': wiki_channel_islands, 'region': 'Europe', 'subregion': 'Northern Europe'})
wiki_diamond_princess = {'Diamond Princess': 3711}
diamond_princess = pd.DataFrame({'population': wiki_diamond_princess, 'region': 'Asia', 'subregion': 'Eastern Asia'})
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date'])
train.columns = ['id', 'province_state', 'country_region', 'date', 'confirmed', 'fatal']
train['country_region'].update(train['country_region'].str.replace('Georgia', 'Sakartvelo'))
train['entity'] = train['province_state'].where(~train['province_state'].isna(), train['country_region'])
countries = train['entity'].unique()
features = get_restcountries(countries)[['region', 'subregion', 'population']]
for chunk in [get_datausa(), canada, australia, china, channel_islands, diamond_princess]:
features = features.combine_first(chunk)
features
covid = train[['date', 'entity', 'confirmed', 'fatal']].join(features, on='entity')
covid['confirmed'] = covid.groupby('entity')['confirmed'].cummax()
covid['fatal'] = covid.groupby('entity')['fatal'].cummax()
covid[['confirmed', 'fatal', 'population']] = covid[['confirmed', 'fatal', 'population']].fillna(0).astype('int')
covid.sample(20)
covid.groupby('entity').max().pivot_table(index='region', aggfunc='sum', margins=True)
covid.to_csv('covid.csv', index=False)
covid['date'].max() | code |
32071115/cell_12 | [
"text_html_output_1.png"
] | from tqdm.notebook import tqdm
import pandas as pd
import requests
def get_restcountries(countries):
"""Retrieve all available fields from restcountries API
https://github.com/apilayer/restcountries#response-example"""
api = 'https://restcountries.eu/rest/v2'
rdfs = []
for country in tqdm(countries):
r = requests.get(f'{api}/name/{country}?fullText=true').json()
if len(r) != 1:
r = requests.get(f'{api}/name/{country}?fullText=false').json()
if len(r) != 1:
try:
alpha3 = {'Channel Islands': None, 'Congo (Brazzaville)': 'COG', 'Congo (Kinshasa)': 'COD', 'Czechia': 'CZE', 'Diamond Princess': None, 'Iran': 'IRN', 'Korea, South': 'PRK', 'North Macedonia': 'MKD', 'St Martin': 'MAF', 'Taiwan*': 'TWN', 'Virgin Islands': 'VIR'}
r = requests.get(f'{api}/alpha/{alpha3[country]}')
r = [r.json()] if r.status_code == 200 else []
except:
r = []
rdf = pd.DataFrame(r)
rdf['country'] = country
rdfs.append(rdf)
return pd.concat(rdfs, sort=False).set_index('country')
def get_datausa():
"""Retrieve population on state level from datausa.io
https://datausa.io/about/api/"""
datausa = pd.DataFrame(requests.get('https://datausa.io/api/data?drilldowns=State&measures=Population&year=latest', headers={'User-Agent': ''}).json()['data'])
datausa = datausa[['State', 'Population']]
datausa.columns = ['state', 'population']
datausa['region'] = 'Americas'
datausa['subregion'] = 'Northern America'
return datausa.set_index('state')
wiki_canada = {'Alberta': 4413146, 'British Columbia': 5110917, 'Manitoba': 1377517, 'New Brunswick': 779993, 'Newfoundland and Labrador': 521365, 'Nova Scotia': 977457, 'Ontario': 14711827, 'Prince Edward Island': 158158, 'Quebec': 8537674, 'Saskatchewan': 1181666}
canada = pd.DataFrame({'population': wiki_canada, 'region': 'Americas', 'subregion': 'Northern America'})
wiki_australia = {'Australian Capital Territory': 426709, 'New South Wales': 8089526, 'Northern Territory': 245869, 'Queensland': 5095100, 'South Australia': 1751693, 'Tasmania': 534281, 'Victoria': 6594804, 'Western Australia': 2621680}
australia = pd.DataFrame({'population': wiki_australia, 'region': 'Oceania', 'subregion': 'Australia and New Zealand'})
wiki_china = {'Anhui': 62550000, 'Beijing': 21710000, 'Chongqing': 30750000, 'Fujian': 39110000, 'Gansu': 26260000, 'Guangdong': 111690000, 'Guangxi': 48850000, 'Guizhou': 35550000, 'Hainan': 9170000, 'Hebei': 75200000, 'Heilongjiang': 37890000, 'Henan': 95590000, 'Hubei': 59020000, 'Hunan': 68600000, 'Inner Mongolia': 25290000, 'Jiangsu': 80290000, 'Jiangxi': 46220000, 'Jilin': 27170000, 'Liaoning': 43690000, 'Ningxia': 6820000, 'Qinghai': 5980000, 'Shaanxi': 38350000, 'Shandong': 100060000, 'Shanghai': 24180000, 'Shanxi': 36820000, 'Sichuan': 83020000, 'Tianjin': 15570000, 'Tibet': 3370000, 'Xinjiang': 24450000, 'Yunnan': 48010000, 'Zhejiang': 56570000}
china = pd.DataFrame({'population': wiki_china, 'region': 'Asia', 'subregion': 'Eastern Asia'})
wiki_channel_islands = {'Channel Islands': 170499}
channel_islands = pd.DataFrame({'population': wiki_channel_islands, 'region': 'Europe', 'subregion': 'Northern Europe'})
wiki_diamond_princess = {'Diamond Princess': 3711}
diamond_princess = pd.DataFrame({'population': wiki_diamond_princess, 'region': 'Asia', 'subregion': 'Eastern Asia'})
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date'])
train.columns = ['id', 'province_state', 'country_region', 'date', 'confirmed', 'fatal']
train['country_region'].update(train['country_region'].str.replace('Georgia', 'Sakartvelo'))
train['entity'] = train['province_state'].where(~train['province_state'].isna(), train['country_region'])
countries = train['entity'].unique()
features = get_restcountries(countries)[['region', 'subregion', 'population']]
for chunk in [get_datausa(), canada, australia, china, channel_islands, diamond_princess]:
features = features.combine_first(chunk)
features | code |
331254/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
plt.figure(figsize=(45, 10))
sns.stripplot('year', 'Fatalities', data=frame) | code |
331254/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
frame.head() | code |
331254/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
plt.figure(figsize=(45, 10))
sns.barplot('year', 'Aboard', data=frame) | code |
331254/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
plt.figure(figsize=(45, 10))
sns.barplot('year', 'Fatalities', data=frame) | code |
331254/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
plt.figure(figsize=(100, 10))
sns.barplot('year', 'Aboard', data=frame, color='blue')
bottom_plot = sns.barplot('year', 'Fatalities', data=frame, color='red')
bottom_plot.set_ylabel('mean(Fatalities) and mean(Aboard)')
bottom_plot.set_xlabel('year') | code |
331254/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
frame = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
frame['year'].head() | code |
105192049/cell_4 | [
"text_plain_output_1.png"
] | from gekko import GEKKO
from gekko import GEKKO
from gekko import GEKKO
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import time
import time
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 500', 'minlp_max_iter_with_int_sol 10', 'minlp_as_nlp 0', 'nlp_maximum_iterations 50', 'minlp_branch_method 1', 'minlp_integer_tol 0.05', 'minlp_gap_tol 0.01']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
max_elementb = df['Bandwidth'].max()
max_elementc = df['Capacity'].max()
start_time = time.time()
main_allocation = m.Var(value=max_elementc, lb=max_elementc, ub=max_elementc)
others_allocation = m.Var(value=max_elementc, lb=0, ub=max_elementb)
b = m.Var(value=max_elementb, lb=max_elementb, ub=max_elementb)
m.Equation(main_allocation * others_allocation <= b)
m.Maximize(main_allocation + others_allocation)
m.solve(disp=False)
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 50000']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
bw = df['Bandwidth']
cp = df['Capacity']
n = len(bw)
start_time = time.time()
x = m.Array(m.Var, n)
for i in range(n):
x[i] = m.Var(lb=0, ub=cp[i], integer=True)
s = m.sum([x[i] for i in range(n)])
for i in range(n):
m.Equation(x[i] * s - x[i] * x[i] <= bw[i])
m.Maximize(s)
m.solve(disp=False)
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 50000']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
bw = df['Bandwidth']
cp = df['Capacity']
print(bw)
print(cp)
n = len(bw)
start_time = time.time()
x = m.Array(m.Var, n)
for i in range(n):
x[i] = m.Var(lb=0, ub=cp[i], integer=True)
s = 25.0
xsum = m.sum([x[i] for i in range(n)])
traffic = m.sum([x[i] * x[i] for i in range(n)])
for i in range(n):
m.Equation(x[i] * s - x[i] * x[i] <= bw[i])
m.Equation(xsum == s)
m.Maximize(traffic)
m.solve(disp=False)
for i in range(n):
print(x[i].value, ' capacity ', cp[i], ' bandwidth ', bw[i])
print('execution time', time.time() - start_time) | code |
105192049/cell_2 | [
"text_plain_output_1.png"
] | from gekko import GEKKO
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 500', 'minlp_max_iter_with_int_sol 10', 'minlp_as_nlp 0', 'nlp_maximum_iterations 50', 'minlp_branch_method 1', 'minlp_integer_tol 0.05', 'minlp_gap_tol 0.01']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
max_elementb = df['Bandwidth'].max()
max_elementc = df['Capacity'].max()
print('----', max_elementb, max_elementc)
start_time = time.time()
main_allocation = m.Var(value=max_elementc, lb=max_elementc, ub=max_elementc)
others_allocation = m.Var(value=max_elementc, lb=0, ub=max_elementb)
b = m.Var(value=max_elementb, lb=max_elementb, ub=max_elementb)
m.Equation(main_allocation * others_allocation <= b)
m.Maximize(main_allocation + others_allocation)
m.solve(disp=False)
print('main_allocation: ' + str(main_allocation.value))
print('others_allocation: ' + str(others_allocation.value))
print('Total acceptance: ' + str(-m.options.objfcnval))
print('--- %s seconds ---' % (time.time() - start_time)) | code |
105192049/cell_1 | [
"text_plain_output_1.png"
] | !pip install gekko | code |
105192049/cell_3 | [
"text_plain_output_1.png"
] | from gekko import GEKKO
from gekko import GEKKO
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import time
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 500', 'minlp_max_iter_with_int_sol 10', 'minlp_as_nlp 0', 'nlp_maximum_iterations 50', 'minlp_branch_method 1', 'minlp_integer_tol 0.05', 'minlp_gap_tol 0.01']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
max_elementb = df['Bandwidth'].max()
max_elementc = df['Capacity'].max()
start_time = time.time()
main_allocation = m.Var(value=max_elementc, lb=max_elementc, ub=max_elementc)
others_allocation = m.Var(value=max_elementc, lb=0, ub=max_elementb)
b = m.Var(value=max_elementb, lb=max_elementb, ub=max_elementb)
m.Equation(main_allocation * others_allocation <= b)
m.Maximize(main_allocation + others_allocation)
m.solve(disp=False)
import numpy as np
import pandas as pd
from gekko import GEKKO
import time
m = GEKKO()
m.options.SOLVER = 1
m.solver_options = ['minlp_maximum_iterations 50000']
df = pd.read_csv('../input/junkchanged/cloulet_dataset.csv')
bw = df['Bandwidth']
cp = df['Capacity']
print(bw)
print(cp)
n = len(bw)
start_time = time.time()
x = m.Array(m.Var, n)
for i in range(n):
x[i] = m.Var(lb=0, ub=cp[i], integer=True)
s = m.sum([x[i] for i in range(n)])
for i in range(n):
m.Equation(x[i] * s - x[i] * x[i] <= bw[i])
m.Maximize(s)
m.solve(disp=False)
print('total allocation', s.value)
for i in range(n):
print(x[i].value, ' capacity ', cp[i], ' bandwidth ', bw[i])
print('execution time', time.time() - start_time) | code |
49120672/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from skimage.io.collection import ImageCollection
import numpy as np # linear algebra
ic = ImageCollection('../input/cassava-leaf-disease-classification/train_images/1000*.jpg')
i = 0
for pic in ic:
print('{} \nPic type: {} \nPic shape: {} \n\n'.format(i, type(pic), np.shape(pic)))
i += 1 | code |
49120672/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
disease_numbers = pd.read_json('../input/cassava-leaf-disease-classification/label_num_to_disease_map.json', orient='index')
sample_submission = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print(train.info())
print('\nDescription:\n', train.describe()) | code |
49120672/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
disease_numbers = pd.read_json('../input/cassava-leaf-disease-classification/label_num_to_disease_map.json', orient='index')
sample_submission = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print(train.groupby('label').count())
ax = sns.distplot(train['label'], bins=5, kde=False, norm_hist=True, hist_kws={'edgecolor': 'k', 'align': 'mid'}) | code |
49120672/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49120672/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
disease_numbers = pd.read_json('../input/cassava-leaf-disease-classification/label_num_to_disease_map.json', orient='index')
sample_submission = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print('The image IDs are stored as {}.'.format(type(train['image_id'][1]))) | code |
49120672/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
disease_numbers = pd.read_json('../input/cassava-leaf-disease-classification/label_num_to_disease_map.json', orient='index')
sample_submission = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print('Disease Numbers:\n {} \n\nSample_Submission:\n {} \n\ntrain.csv:\n {}'.format(disease_numbers, sample_submission, train)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.