path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104115135/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values, cmap='Blues') plt.show()
code
104115135/cell_30
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, df.columns[:-1])]
code
104115135/cell_29
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() diagnostic_plots(df, 'fixed acidity', 'quality')
code
104115135/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.head(10)
code
104115135/cell_45
[ "text_plain_output_1.png" ]
from collections import Counter from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score import feature_engine.transformation as vt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, df.columns[:-1])] cols = ['fixed acidity', 'volatile acidity', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'sulphates', 'alcohol'] lt = vt.LogTransformer(variables=cols) lt.fit(df) df = lt.transform(df) X_train, X_test, y_train, y_test = train_test_split(df.drop('quality', axis=1), df['quality'], test_size=0.3, random_state=0) (X_train.shape, X_test.shape)
code
104115135/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass for variable in df: diagnostic_plots(df, variable, 'quality')
code
104115135/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape
code
104115135/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() plt.figure(1, figsize=(10, 10)) df['quality'].value_counts().plot.pie(autopct='%1.1f%%') plt.show()
code
104115135/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False)
code
104115135/cell_3
[ "image_output_1.png" ]
pip install feature-engine
code
104115135/cell_35
[ "text_html_output_1.png" ]
from collections import Counter import feature_engine.transformation as vt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, df.columns[:-1])] cols = ['fixed acidity', 'volatile acidity', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'sulphates', 'alcohol'] lt = vt.LogTransformer(variables=cols) lt.fit(df)
code
104115135/cell_43
[ "text_plain_output_1.png" ]
from collections import Counter import feature_engine.transformation as vt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, df.columns[:-1])] cols = ['fixed acidity', 'volatile acidity', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'sulphates', 'alcohol'] lt = vt.LogTransformer(variables=cols) lt.fit(df) df = lt.transform(df) df.head()
code
104115135/cell_31
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum() def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, df.columns[:-1])] diagnostic_plots(df, 'fixed acidity', 'quality')
code
104115135/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique() df.quality.value_counts(ascending=False) def diagnostic_plots(df, variable, target): pass corr = df.corr() plt.figure(figsize=(20, 9)) k = 12 #number of variables for heatmap cols = corr.nlargest(k, 'quality')['quality'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues") plt.show() df.isnull().sum()
code
104115135/cell_14
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.quality.unique()
code
104115135/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.shape df.info()
code
72073117/cell_4
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN GCS_DS_PATH_TRAIN_LS = datasets.get_gcs_path('des-train-ls') GCS_DS_PATH_TRAIN_LS
code
72073117/cell_6
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN GCS_DS_PATH_TRAIN_LS = datasets.get_gcs_path('des-train-ls') GCS_DS_PATH_TRAIN_LS GCS_DS_PATH_TEST = datasets.get_gcs_path('des-test-non-ls') GCS_DS_PATH_TEST GCS_DS_PATH_TEST_LS = datasets.get_gcs_path('des-test-ls') GCS_DS_PATH_TEST_LS
code
72073117/cell_7
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN GCS_DS_PATH_TRAIN_LS = datasets.get_gcs_path('des-train-ls') GCS_DS_PATH_TRAIN_LS GCS_DS_PATH_TEST = datasets.get_gcs_path('des-test-non-ls') GCS_DS_PATH_TEST GCS_DS_PATH_TEST_LS = datasets.get_gcs_path('des-test-ls') GCS_DS_PATH_TEST_LS GCS_DS_PATH_VAL = datasets.get_gcs_path('des-val-non-ls') GCS_DS_PATH_VAL
code
72073117/cell_8
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN GCS_DS_PATH_TRAIN_LS = datasets.get_gcs_path('des-train-ls') GCS_DS_PATH_TRAIN_LS GCS_DS_PATH_TEST = datasets.get_gcs_path('des-test-non-ls') GCS_DS_PATH_TEST GCS_DS_PATH_TEST_LS = datasets.get_gcs_path('des-test-ls') GCS_DS_PATH_TEST_LS GCS_DS_PATH_VAL = datasets.get_gcs_path('des-val-non-ls') GCS_DS_PATH_VAL GCS_DS_PATH_VAL_LS = datasets.get_gcs_path('des-val-ls') GCS_DS_PATH_VAL_LS
code
72073117/cell_3
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN
code
72073117/cell_5
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets datasets = KaggleDatasets() GCS_DS_PATH_TRAIN = datasets.get_gcs_path('des-train-non-ls') GCS_DS_PATH_TRAIN GCS_DS_PATH_TRAIN_LS = datasets.get_gcs_path('des-train-ls') GCS_DS_PATH_TRAIN_LS GCS_DS_PATH_TEST = datasets.get_gcs_path('des-test-non-ls') GCS_DS_PATH_TEST
code
34122297/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) X_train = df4.drop(columns=['PassengerId', 'Survived']) X_test = dft4.drop(columns=['PassengerId']) y_train = df4['Survived'] X_train.head(6)
code
34122297/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df2.head(6)
code
34122297/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) df.head(6)
code
34122297/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) class1_survivors = DF[(DF.Survived == 1) & (DF.Pclass == 1)] percent_class1_survivors = len(class1_survivors) / len(survivors) class2_survivors = DF[(DF.Survived == 1) & (DF.Pclass == 2)] percent_class2_survivors = len(class2_survivors) / len(survivors) class3_survivors = DF[(DF.Survived == 1) & (DF.Pclass == 3)] percent_class3_survivors = len(class3_survivors) / len(survivors) print(percent_class1_survivors, percent_class2_survivors, percent_class3_survivors)
code
34122297/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.head(6)
code
34122297/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) df4.head(6)
code
34122297/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34122297/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) df.isna().sum()
code
34122297/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft.isna().sum()
code
34122297/cell_15
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) X_train = df4.drop(columns=['PassengerId', 'Survived']) X_test = dft4.drop(columns=['PassengerId']) y_train = df4['Survived'] from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, StratifiedKFold sc = StandardScaler() X_train[['Age', 'Fare']] = sc.fit_transform(X_train[['Age', 'Fare']]) X_test[['Age', 'Fare']] = sc.transform(X_test[['Age', 'Fare']]) skf = StratifiedKFold(n_splits=4, shuffle=True) clf = LogisticRegression(max_iter=1000, C=0.1) scores = cross_val_score(clf, X_train, y_train, cv=skf) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV clf2 = RandomForestClassifier() param_grid = {'criterion': ['gini', 'entropy'], 'min_impurity_decrease': np.linspace(0, 0.001, 100)} gcv = GridSearchCV(clf2, param_grid=param_grid, cv=skf) gcv.fit(X_train, y_train) print(gcv.best_params_) print(gcv.best_score_)
code
34122297/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) X_train = df4.drop(columns=['PassengerId', 'Survived']) X_test = dft4.drop(columns=['PassengerId']) y_train = df4['Survived'] from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, StratifiedKFold sc = StandardScaler() X_train[['Age', 'Fare']] = sc.fit_transform(X_train[['Age', 'Fare']]) X_test[['Age', 'Fare']] = sc.transform(X_test[['Age', 'Fare']]) skf = StratifiedKFold(n_splits=4, shuffle=True) clf = LogisticRegression(max_iter=1000, C=0.1) scores = cross_val_score(clf, X_train, y_train, cv=skf) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV clf2 = RandomForestClassifier() param_grid = {'criterion': ['gini', 'entropy'], 'min_impurity_decrease': np.linspace(0, 0.001, 100)} gcv = GridSearchCV(clf2, param_grid=param_grid, cv=skf) gcv.fit(X_train, y_train) predictions = gcv.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print('Your submission was successfully saved!')
code
34122297/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') test_data.head(6)
code
34122297/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) X_train = df4.drop(columns=['PassengerId', 'Survived']) X_test = dft4.drop(columns=['PassengerId']) y_train = df4['Survived'] from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, StratifiedKFold sc = StandardScaler() X_train[['Age', 'Fare']] = sc.fit_transform(X_train[['Age', 'Fare']]) X_test[['Age', 'Fare']] = sc.transform(X_test[['Age', 'Fare']]) skf = StratifiedKFold(n_splits=4, shuffle=True) clf = LogisticRegression(max_iter=1000, C=0.1) scores = cross_val_score(clf, X_train, y_train, cv=skf) print(scores) print(scores.mean())
code
34122297/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df3.head(6)
code
34122297/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) df.isna().sum() dft.isna().sum() df2 = df.copy() df2.Age = df.Age.fillna(df.Age.mean()) dft2 = dft.copy() dft2.Age = dft.Age.fillna(dft.Age.mean()) dft2.Fare = dft.Fare.fillna(dft.Fare.mean()) df3 = df2.copy() df3.Sex = df3.Sex.map({'male': 0, 'female': 1}) dft3 = dft2.copy() dft3.Sex = dft3.Sex.map({'male': 0, 'female': 1}) df4 = pd.get_dummies(df3) dft4 = pd.get_dummies(dft3) dft4.head(6)
code
34122297/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') df = train_data.drop(columns=['Name', 'Ticket', 'Cabin']) dft = test_data.drop(columns=['Name', 'Ticket', 'Cabin']) DF = pd.concat([df, dft]) survivors = DF[DF.Survived == 1] women_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'female')] percent_women_survivors = len(women_survivors) / len(survivors) men_survivors = DF[(DF.Survived == 1) & (DF.Sex == 'male')] percent_men_survivors = len(men_survivors) / len(survivors) print(percent_women_survivors, percent_men_survivors)
code
88100838/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) data = data.drop('PoolQC', 1) data = data.drop('Fence', 1) data = data.drop('MiscFeature', 1) data = data.drop('Alley', 1) data.dtypes.value_counts()
code
88100838/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) print(f'Shape of train set: {train_df.shape}') print(f'Shape of test set: {test_df.shape}')
code
88100838/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) train_df.columns
code
88100838/cell_19
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) nans = pd.isnull(data).sum() nans[nans > 0] data = data.drop('PoolQC', 1) data = data.drop('Fence', 1) data = data.drop('MiscFeature', 1) data = data.drop('Alley', 1) data.dtypes.value_counts() all_columns = data.columns.values non_categorical = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal'] categorical = [value for value in all_columns if value not in non_categorical] data = pd.get_dummies(data) data.fillna(0) data[data == -np.inf] = 0 def avg(x): s = np.sum(x, axis=0) s = s / x.shape[0] return s def std(x): m = avg(x) s = np.subtract(x, m) s = np.power(s, 2) c = s.shape s = np.sum(s, axis=0) s = np.divide(s, c[0] - 1) s = np.sqrt(s) return s def standardize(x): A = avg(x) S = std(x) x = np.subtract(x, A) x = np.divide(x, S) return x data_transformed = data.to_numpy() t = standardize(data_transformed) def cov(x): cov = np.dot(x.T, x) cov = np.divide(cov, x.shape[0]) return cov tr = cov(t) e, v = LA.eig(tr) print(e) print(v)
code
88100838/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import warnings import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.model_selection import KFold from sklearn import linear_model from sklearn.metrics import make_scorer from sklearn.ensemble import BaggingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn import svm from sklearn.metrics import r2_score from sklearn.ensemble import AdaBoostRegressor from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV import matplotlib.pyplot as plt import tensorflow as tf import seaborn import warnings from numpy import linalg as LA warnings.filterwarnings('ignore') from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
88100838/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) nans = pd.isnull(data).sum() nans[nans > 0]
code
88100838/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/neolen-house-price-prediction/test.csv') train_df = train_df.drop(['Id'], axis=1) test_df = test_df.drop(['Id'], axis=1) labels = train_df['SalePrice'] data = pd.concat([train_df, test_df], ignore_index=True) data = data.drop('SalePrice', 1) train_df.head()
code
332331/cell_2
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
332331/cell_1
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
332331/cell_3
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['Age'] = train['Age'].fillna(train['Age'].median()) train.describe()
code
130013533/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import models from keras.backend import set_session from skimage.transform import resize from skimage.transform import resize from tensorflow.keras.optimizers import Adam import cv2 import datetime import datetime import datetime import matplotlib.image as mpimg import numpy as np import os import pickle import tensorflow as tf import time import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) # Making predictions and drawing them. # First row: Occluded images # Second row: Ground Truth images # Third row: Predictions import datetime plot_path = "./" a = 8690 b = 8700 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) import cv2 import matplotlib.image as mpimg img = mpimg.imread('/kaggle/input/real-time-face/WithMask/5)Thivagaran_surgical1.png') img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img.shape from skimage.transform import resize img = resize(img, (64, 64), anti_aliasing=False) import datetime plot_path = './' img = np.expand_dims(img, axis=-1) img = np.expand_dims(img, axis=0) import time start_time = time.time() pred = generator.predict(img) inference_time = time.time() - start_time pred = np.reshape(pred, (64, 64)) pred = cv2.cvtColor(pred, cv2.COLOR_GRAY2RGB) plt.imshow(pred) print(pred.shape) print('inference time', inference_time)
code
130013533/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import models from keras.backend import set_session from tensorflow.keras.optimizers import Adam import tensorflow as tf import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) print('Model compiled')
code
130013533/cell_6
[ "text_plain_output_1.png" ]
from keras import models from keras.backend import set_session from skimage.transform import resize from skimage.transform import resize from tensorflow.keras.optimizers import Adam import cv2 import datetime import datetime import matplotlib.image as mpimg import numpy as np import os import pickle import tensorflow as tf import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) # Making predictions and drawing them. # First row: Occluded images # Second row: Ground Truth images # Third row: Predictions import datetime plot_path = "./" a = 8690 b = 8700 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) import cv2 import matplotlib.image as mpimg img = mpimg.imread('/kaggle/input/real-time-face/WithMask/5)Thivagaran_surgical1.png') img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img.shape plt.imshow(img) from skimage.transform import resize img = resize(img, (64, 64), anti_aliasing=False) plt.imshow(img, cmap='gray')
code
130013533/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from skimage.transform import resize import os import pickle path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) print(x.shape) print(y.shape) print(type(x)) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) print(x.shape) print(y.shape)
code
130013533/cell_11
[ "text_plain_output_1.png" ]
from PIL import Image from keras import models from keras.backend import set_session from skimage.transform import resize from skimage.transform import resize from tensorflow.keras.optimizers import Adam import cv2 import datetime import datetime import datetime import matplotlib.image as mpimg import numpy as np import os import pickle import tensorflow as tf import tensorflow as tf import time import time import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) # Making predictions and drawing them. # First row: Occluded images # Second row: Ground Truth images # Third row: Predictions import datetime plot_path = "./" a = 8690 b = 8700 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) import cv2 import matplotlib.image as mpimg img = mpimg.imread('/kaggle/input/real-time-face/WithMask/5)Thivagaran_surgical1.png') img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img.shape from skimage.transform import resize img = resize(img, (64, 64), anti_aliasing=False) import datetime plot_path = './' img = np.expand_dims(img, axis=-1) img = np.expand_dims(img, axis=0) import time start_time = time.time() pred = generator.predict(img) inference_time = time.time() - start_time pred = np.reshape(pred, (64, 64)) pred = cv2.cvtColor(pred, cv2.COLOR_GRAY2RGB) from PIL import Image import tensorflow as tf import time print('in_predict') with open('/kaggle/input/build-in-models/model.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/model.h5') generator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print('Model compiled') low_resolution_image = cv2.imread('/kaggle/input/objects-image/compress_image_32/compress_image_32/image00000.png') if low_resolution_image.shape >= (32, 32, 3): low_resolution_image = cv2.resize(low_resolution_image, (32, 32)) else: print('error') low_resolution_image = cv2.cvtColor(low_resolution_image, cv2.COLOR_BGR2RGB) low_resolution_image = low_resolution_image / 255.0 low_resolution_image = np.expand_dims(low_resolution_image, axis=0) start_time = time.time() high_resolution_image = generator.predict(low_resolution_image) inference_time = time.time() - start_time high_resolution_image = np.squeeze(high_resolution_image, axis=0) high_resolution_image = cv2.normalize(high_resolution_image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) img = Image.fromarray(high_resolution_image) plt.imshow(high_resolution_image) img.save('high_resolution_image.png') print('Inference time: {} seconds'.format(inference_time))
code
130013533/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import models from keras.backend import set_session from skimage.transform import resize from skimage.transform import resize from tensorflow.keras.optimizers import Adam import cv2 import datetime import datetime import datetime import matplotlib.image as mpimg import numpy as np import os import pickle import tensorflow as tf import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) # Making predictions and drawing them. # First row: Occluded images # Second row: Ground Truth images # Third row: Predictions import datetime plot_path = "./" a = 8690 b = 8700 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) import cv2 import matplotlib.image as mpimg img = mpimg.imread('/kaggle/input/real-time-face/WithMask/5)Thivagaran_surgical1.png') img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img.shape from skimage.transform import resize img = resize(img, (64, 64), anti_aliasing=False) import datetime plot_path = './' img = np.expand_dims(img, axis=-1) print(img.shape)
code
130013533/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import models from keras.backend import set_session from skimage.transform import resize from skimage.transform import resize from tensorflow.keras.optimizers import Adam import cv2 import datetime import datetime import datetime import matplotlib.image as mpimg import numpy as np import os import pickle import tensorflow as tf import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) # Making predictions and drawing them. # First row: Occluded images # Second row: Ground Truth images # Third row: Predictions import datetime plot_path = "./" a = 8690 b = 8700 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) import cv2 import matplotlib.image as mpimg img = mpimg.imread('/kaggle/input/real-time-face/WithMask/5)Thivagaran_surgical1.png') img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img.shape from skimage.transform import resize img = resize(img, (64, 64), anti_aliasing=False) import datetime plot_path = './' img = np.expand_dims(img, axis=-1) img = np.expand_dims(img, axis=0) print(img.shape)
code
130013533/cell_5
[ "text_plain_output_1.png" ]
from keras import models from keras.backend import set_session from skimage.transform import resize from tensorflow.keras.optimizers import Adam import datetime import datetime import numpy as np import os import pickle import tensorflow as tf import numpy as np import pandas as pd from matplotlib import pyplot as plt import pickle import os import csv import keras import tensorflow as tf from keras import backend from keras.backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from skimage.transform import resize from keras.layers import Reshape from keras import layers import datetime from keras import initializers config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 config.gpu_options.visible_device_list = '0' set_session(tf.compat.v1.Session(config=config)) path = '/kaggle/input/face-mask-dataset-1' xname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_mask1.pickle' yname = '/kaggle/input/face-mask-dataset-1/celebA_real_with_out_mask1.pickle' pickle_in = open(os.path.join(path, xname), 'rb') x = pickle.load(pickle_in) pickle_in = open(os.path.join(path, yname), 'rb') y = pickle.load(pickle_in) x = resize(x, (len(x), 64, 64, 1), anti_aliasing=False) y = resize(y, (len(y), 64, 64, 1), anti_aliasing=False) from keras import models from keras.models import model_from_json model = models.load_model('/kaggle/input/weight/results/results/500_mg_04-07-20_47.h5') model_json = model.to_json() with open('dcgan.json', 'w') as json_file: json_file.write(model_json) with open('/kaggle/input/build-in-models/dcgan.json', 'r') as json_file: json_savedModel = json_file.read() generator = tf.keras.models.model_from_json(json_savedModel) generator.load_weights('/kaggle/input/build-in-models/250_wg_12-07-12_42.h5/250_wg_12-07-12_42.h5') generator.compile(loss='mean_squared_error', optimizer=Adam(lr=2e-05)) import datetime plot_path = './' a = 8690 b = 8700 pred = generator.predict(x[a:b]) fig = plt.figure(figsize=(20, 10)) for ctr in range(10): fig.add_subplot(3, 10, ctr + 1) plt.imshow(np.reshape(x[a + ctr], (64, 64)), cmap='gray') for ctr in range(10): fig.add_subplot(3, 10, 10 + ctr + 1) plt.imshow(np.reshape(y[a + ctr] / 255, (64, 64)), cmap='gray') for ctr in range(10): fig.add_subplot(3, 10, 20 + ctr + 1) plt.imshow(np.reshape(pred[ctr], (64, 64)), cmap='gray') plt.savefig(os.path.join(plot_path, str(datetime.datetime.now().strftime('%m-%d-%H:%M'))))
code
122252967/cell_4
[ "text_plain_output_1.png" ]
a = [1, 2, 3, 4, 5, 6, 7, 8, 9] print(a * 2) print(a + a)
code
122252967/cell_6
[ "text_plain_output_1.png" ]
b = {'한국': '서울', '중국': '베이징', '일본': '도쿄', '미국': '워싱턴'} for country in b: print(f'{country}의 수도는 {b[country]} 이다')
code
122252967/cell_2
[ "text_plain_output_1.png" ]
x = '안녕하세요' y = '반갑습니다' print(type(x)) print(x + y) print(x, y) print(x, y, sep=',')
code
122252967/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
c = set([1, 3, 5, 7, 9]) d = set([1, 2, 4, 6, 8]) print(c & d) print(c | d) print(c - d)
code
122252967/cell_14
[ "text_plain_output_1.png" ]
a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b = {'한국': '서울', '중국': '베이징', '일본': '도쿄', '미국': '워싱턴'} class Person: def __init__(self, name, age): self.name = name self.age = age def get_name(self): return self.name def get_age(self): return self.age g = Person('Dave', 27) h = Person('Tom', 32) print(f'{a.get_name()} is {g.get_age()} years old') print(f'{b.get_name()} is {h.get_age()} years old')
code
122252967/cell_10
[ "text_plain_output_1.png" ]
e = ((0, 1), (2, 3), (4, 5)) f = (0, 1, 2, 3, 4, 5) print(4 in e) print(4 in f)
code
122252967/cell_12
[ "text_plain_output_1.png" ]
x = '안녕하세요' y = '반갑습니다' def number(x): if x % 2 == 1: return 'odd' else: return 'even' num = [3, 6, 9] [number(x) for x in num]
code
72105169/cell_16
[ "text_plain_output_1.png" ]
from pathlib import Path from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, KFold import lightgbm as lgbm import numpy as np import optuna import pandas as pd path = Path('/kaggle/input/house-prices-advanced-regression-techniques/') train_ = pd.read_csv(path.joinpath('train.csv')) test_ = pd.read_csv(path.joinpath('test.csv')) categorical_feature = [] for col in train_.columns.tolist(): if train_[col].dtype == object: categorical_feature.append(col) nan_train_cols = train_.columns[1:-1][train_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_train_cols: if col in categorical_feature: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mode().values[0] else: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mean() nan_test_cols = test_.columns[1:-1][test_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_test_cols: if col in categorical_feature: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mode().values[0] else: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mean() columns_drop = ['GarageCars', 'GarageYrBlt', 'GrLivArea', 'Id', 'YearRemodAdd'] train_.drop(columns=columns_drop, inplace=True) test_.drop(columns=columns_drop, inplace=True) all_ = pd.concat([train_, test_]) dumy = pd.get_dummies(all_[categorical_feature]) all_ = pd.concat([all_.loc[:, ~all_.columns.isin(categorical_feature)], dumy], axis=1) train_data = all_.iloc[0:1460, :] test_data = all_.iloc[1460:, :] test_data.drop(columns=['SalePrice'], inplace=True) feature_cols = train_data.columns.tolist() feature_cols.remove('SalePrice') train_data['SalePrice'] = np.log(train_data['SalePrice']) train_data, validation_data = train_test_split(train_data, test_size=0.2, random_state=42) train_data.reset_index(drop=True, inplace=True) validation_data.reset_index(drop=True, inplace=True) baseline = lgbm.LGBMRegressor() baseline.fit(train_data[feature_cols], train_data['SalePrice']) baseline_val_y = baseline.predict(validation_data[feature_cols]) base_line_score = np.sqrt(mean_squared_error(baseline_val_y, validation_data['SalePrice'].values)) def objective(trial, x_train, y_train, x_valid, y_valid): train_d = lgbm.Dataset(x_train, y_train) val_d = lgbm.Dataset(x_valid, y_valid) param = {'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': trial.suggest_categorical('boosting_type', ['gbdt', 'rf', 'dart']), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-08, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-08, 10.0), 'num_leaves': trial.suggest_int('num_leaves', 2, 10000), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 500)} gbm = lgbm.train(param, train_d, valid_sets=val_d, verbose_eval=100) off = gbm.predict(x_valid) error = mean_squared_error(y_valid, off) return np.sqrt(error) x_train, x_val = (train_data[feature_cols].values, validation_data[feature_cols].values) y_train, y_val = (train_data['SalePrice'].values, validation_data['SalePrice'].values) study = optuna.create_study(direction='minimize') study.optimize(lambda trial: objective(trial, x_train, y_train, x_val, y_val), n_trials=1000)
code
72105169/cell_17
[ "text_plain_output_100.png", "text_plain_output_334.png", "application_vnd.jupyter.stderr_output_145.png", "text_plain_output_770.png", "application_vnd.jupyter.stderr_output_791.png", "text_plain_output_640.png", "application_vnd.jupyter.stderr_output_493.png", "text_plain_output_822.png", "application_vnd.jupyter.stderr_output_667.png", "application_vnd.jupyter.stderr_output_289.png", "text_plain_output_586.png", "application_vnd.jupyter.stderr_output_313.png", "text_plain_output_522.png", "application_vnd.jupyter.stderr_output_373.png", "text_plain_output_84.png", "text_plain_output_624.png", "text_plain_output_322.png", "application_vnd.jupyter.stderr_output_529.png", "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_115.png", "text_plain_output_826.png", "text_plain_output_828.png", "application_vnd.jupyter.stderr_output_207.png", "application_vnd.jupyter.stderr_output_341.png", "text_plain_output_824.png", "application_vnd.jupyter.stderr_output_723.png", "text_plain_output_608.png", "text_plain_output_56.png", "text_plain_output_158.png", "text_plain_output_218.png", "text_plain_output_264.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_637.png", "text_plain_output_282.png", "text_plain_output_396.png", "application_vnd.jupyter.stderr_output_745.png", "text_plain_output_232.png", "application_vnd.jupyter.stderr_output_499.png", "text_plain_output_362.png", "application_vnd.jupyter.stderr_output_77.png", "text_plain_output_258.png", "application_vnd.jupyter.stderr_output_417.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "application_vnd.jupyter.stderr_output_461.png", "text_plain_output_790.png", "application_vnd.jupyter.stderr_output_205.png", "application_vnd.jupyter.stderr_output_203.png", "application_vnd.jupyter.stderr_output_575.png", "text_plain_output_462.png", "text_plain_output_286.png", "application_vnd.jupyter.stderr_output_185.png", "application_vnd.jupyter.stderr_output_227.png", "text_plain_output_750.png", "text_plain_output_262.png", "application_vnd.jupyter.stderr_output_287.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_254.png", "application_vnd.jupyter.stderr_output_553.png", "application_vnd.jupyter.stderr_output_335.png", "application_vnd.jupyter.stderr_output_215.png", "application_vnd.jupyter.stderr_output_691.png", "application_vnd.jupyter.stderr_output_283.png", "text_plain_output_570.png", "application_vnd.jupyter.stderr_output_815.png", "application_vnd.jupyter.stderr_output_643.png", "text_plain_output_674.png", "application_vnd.jupyter.stderr_output_631.png", "application_vnd.jupyter.stderr_output_449.png", "text_plain_output_98.png", "text_plain_output_718.png", "text_plain_output_236.png", "application_vnd.jupyter.stderr_output_223.png", "application_vnd.jupyter.stderr_output_767.png", "text_plain_output_756.png", "application_vnd.jupyter.stderr_output_435.png", "application_vnd.jupyter.stderr_output_219.png", "text_plain_output_678.png", "text_plain_output_688.png", "application_vnd.jupyter.stderr_output_279.png", "application_vnd.jupyter.stderr_output_771.png", "application_vnd.jupyter.stderr_output_81.png", "application_vnd.jupyter.stderr_output_111.png", "text_plain_output_614.png", "text_plain_output_768.png", "text_plain_output_420.png", "text_plain_output_514.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_605.png", "application_vnd.jupyter.stderr_output_131.png", "application_vnd.jupyter.stderr_output_695.png", "application_vnd.jupyter.stderr_output_437.png", "text_plain_output_284.png", "application_vnd.jupyter.stderr_output_545.png", "text_plain_output_576.png", "text_plain_output_78.png", "application_vnd.jupyter.stderr_output_99.png", "text_plain_output_106.png", "text_plain_output_138.png", "text_plain_output_670.png", "application_vnd.jupyter.stderr_output_385.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "application_vnd.jupyter.stderr_output_183.png", "application_vnd.jupyter.stderr_output_181.png", "application_vnd.jupyter.stderr_output_299.png", "application_vnd.jupyter.stderr_output_141.png", "text_plain_output_184.png", "application_vnd.jupyter.stderr_output_807.png", "application_vnd.jupyter.stderr_output_737.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_664.png", "application_vnd.jupyter.stderr_output_297.png", "text_plain_output_716.png", "application_vnd.jupyter.stderr_output_837.png", "text_plain_output_736.png", "application_vnd.jupyter.stderr_output_93.png", "text_plain_output_332.png", "application_vnd.jupyter.stderr_output_563.png", "text_plain_output_684.png", "text_plain_output_774.png", "text_plain_output_256.png", "application_vnd.jupyter.stderr_output_651.png", "application_vnd.jupyter.stderr_output_641.png", "text_plain_output_90.png", "application_vnd.jupyter.stderr_output_713.png", "application_vnd.jupyter.stderr_output_471.png", "application_vnd.jupyter.stderr_output_655.png", "text_plain_output_642.png", "text_plain_output_550.png", "application_vnd.jupyter.stderr_output_123.png", "application_vnd.jupyter.stderr_output_465.png", "text_plain_output_48.png", "text_plain_output_388.png", "application_vnd.jupyter.stderr_output_391.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_126.png", "text_plain_output_676.png", "application_vnd.jupyter.stderr_output_765.png", "application_vnd.jupyter.stderr_output_731.png", "text_plain_output_704.png", "application_vnd.jupyter.stderr_output_355.png", "application_vnd.jupyter.stderr_output_421.png", "text_plain_output_776.png", "text_plain_output_492.png", "application_vnd.jupyter.stderr_output_431.png", "text_plain_output_272.png", "application_vnd.jupyter.stderr_output_73.png", "application_vnd.jupyter.stderr_output_137.png", "application_vnd.jupyter.stderr_output_823.png", "application_vnd.jupyter.stderr_output_133.png", "text_plain_output_748.png", "text_plain_output_474.png", "application_vnd.jupyter.stderr_output_165.png", "application_vnd.jupyter.stderr_output_715.png", "application_vnd.jupyter.stderr_output_381.png", "application_vnd.jupyter.stderr_output_75.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_634.png", "text_plain_output_656.png", "application_vnd.jupyter.stderr_output_627.png", "application_vnd.jupyter.stderr_output_585.png", "application_vnd.jupyter.stderr_output_365.png", "application_vnd.jupyter.stderr_output_693.png", "text_plain_output_390.png", "application_vnd.jupyter.stderr_output_513.png", "application_vnd.jupyter.stderr_output_593.png", "application_vnd.jupyter.stderr_output_653.png", "text_plain_output_198.png", "application_vnd.jupyter.stderr_output_321.png", "application_vnd.jupyter.stderr_output_629.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_467.png", "text_plain_output_548.png", "application_vnd.jupyter.stderr_output_407.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_404.png", "text_plain_output_114.png", "application_vnd.jupyter.stderr_output_537.png", "application_vnd.jupyter.stderr_output_447.png", "text_plain_output_494.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_361.png", "application_vnd.jupyter.stderr_output_155.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_836.png", "text_plain_output_70.png", "text_plain_output_712.png", "text_plain_output_484.png", "application_vnd.jupyter.stderr_output_649.png", "text_plain_output_44.png", "application_vnd.jupyter.stderr_output_423.png", "application_vnd.jupyter.stderr_output_277.png", "application_vnd.jupyter.stderr_output_291.png", "text_plain_output_796.png", "application_vnd.jupyter.stderr_output_231.png", "application_vnd.jupyter.stderr_output_317.png", "application_vnd.jupyter.stderr_output_65.png", "text_plain_output_546.png", "text_plain_output_540.png", "application_vnd.jupyter.stderr_output_813.png", "application_vnd.jupyter.stderr_output_785.png", "application_vnd.jupyter.stderr_output_443.png", "application_vnd.jupyter.stderr_output_235.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "application_vnd.jupyter.stderr_output_453.png", "application_vnd.jupyter.stderr_output_751.png", "application_vnd.jupyter.stderr_output_179.png", "application_vnd.jupyter.stderr_output_143.png", "application_vnd.jupyter.stderr_output_409.png", "application_vnd.jupyter.stderr_output_615.png", "text_plain_output_40.png", "application_vnd.jupyter.stderr_output_817.png", "text_plain_output_74.png", "application_vnd.jupyter.stderr_output_171.png", "text_plain_output_734.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_340.png", "application_vnd.jupyter.stderr_output_351.png", "application_vnd.jupyter.stderr_output_105.png", "application_vnd.jupyter.stderr_output_763.png", "text_plain_output_20.png", "application_vnd.jupyter.stderr_output_275.png", "application_vnd.jupyter.stderr_output_345.png", "application_vnd.jupyter.stderr_output_495.png", "application_vnd.jupyter.stderr_output_729.png", "text_plain_output_706.png", "text_plain_output_102.png", "application_vnd.jupyter.stderr_output_749.png", "application_vnd.jupyter.stderr_output_577.png", "text_plain_output_686.png", "text_plain_output_802.png", "application_vnd.jupyter.stderr_output_439.png", "text_plain_output_414.png", "application_vnd.jupyter.stderr_output_371.png", "text_plain_output_510.png", "text_plain_output_222.png", "application_vnd.jupyter.stderr_output_795.png", "application_vnd.jupyter.stderr_output_779.png", "application_vnd.jupyter.stderr_output_747.png", "application_vnd.jupyter.stderr_output_253.png", "application_vnd.jupyter.stderr_output_803.png", "text_plain_output_530.png", "text_plain_output_144.png", "application_vnd.jupyter.stderr_output_633.png", "application_vnd.jupyter.stderr_output_389.png", "application_vnd.jupyter.stderr_output_489.png", "application_vnd.jupyter.stderr_output_323.png", "application_vnd.jupyter.stderr_output_387.png", "text_plain_output_132.png", "text_plain_output_60.png", "application_vnd.jupyter.stderr_output_393.png", "application_vnd.jupyter.stderr_output_623.png", "text_plain_output_764.png", "application_vnd.jupyter.stderr_output_31.png", "text_plain_output_502.png", "text_plain_output_794.png", "application_vnd.jupyter.stderr_output_125.png", "application_vnd.jupyter.stderr_output_809.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_720.png", "text_plain_output_654.png", "text_plain_output_330.png", "text_plain_output_638.png", "text_plain_output_434.png", "application_vnd.jupyter.stderr_output_113.png", "text_plain_output_68.png", "text_plain_output_618.png", "text_plain_output_64.png", "application_vnd.jupyter.stderr_output_755.png", "application_vnd.jupyter.stderr_output_221.png", "application_vnd.jupyter.stderr_output_599.png", "application_vnd.jupyter.stderr_output_305.png", "text_plain_output_818.png", "text_plain_output_532.png", "application_vnd.jupyter.stderr_output_497.png", "text_plain_output_200.png", "text_plain_output_666.png", "application_vnd.jupyter.stderr_output_383.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_519.png", "text_plain_output_746.png", "text_plain_output_628.png", "text_plain_output_398.png", "application_vnd.jupyter.stderr_output_725.png", "text_plain_output_312.png", "text_plain_output_248.png", "application_vnd.jupyter.stderr_output_245.png", "text_plain_output_318.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_808.png", "text_plain_output_690.png", "text_plain_output_52.png", "text_plain_output_758.png", "application_vnd.jupyter.stderr_output_699.png", "application_vnd.jupyter.stderr_output_419.png", "application_vnd.jupyter.stderr_output_697.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "application_vnd.jupyter.stderr_output_609.png", "text_plain_output_446.png", "application_vnd.jupyter.stderr_output_403.png", "application_vnd.jupyter.stderr_output_249.png", "application_vnd.jupyter.stderr_output_229.png", "application_vnd.jupyter.stderr_output_263.png", "text_plain_output_380.png", "text_plain_output_692.png", "text_plain_output_442.png", "application_vnd.jupyter.stderr_output_273.png", "application_vnd.jupyter.stderr_output_525.png", "application_vnd.jupyter.stderr_output_135.png", "text_plain_output_300.png", "application_vnd.jupyter.stderr_output_769.png", "text_plain_output_660.png", "application_vnd.jupyter.stderr_output_555.png", "application_vnd.jupyter.stderr_output_211.png", "application_vnd.jupyter.stderr_output_821.png", "application_vnd.jupyter.stderr_output_517.png", "application_vnd.jupyter.stderr_output_777.png", "application_vnd.jupyter.stderr_output_503.png", "text_plain_output_476.png", "application_vnd.jupyter.stderr_output_515.png", "application_vnd.jupyter.stderr_output_757.png", "application_vnd.jupyter.stderr_output_463.png", "text_plain_output_740.png", "text_plain_output_518.png", "application_vnd.jupyter.stderr_output_285.png", "application_vnd.jupyter.stderr_output_177.png", "text_plain_output_14.png", "application_vnd.jupyter.stderr_output_527.png", "application_vnd.jupyter.stderr_output_665.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "application_vnd.jupyter.stderr_output_89.png", "text_plain_output_140.png", "text_plain_output_606.png", "application_vnd.jupyter.stderr_output_269.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_728.png", "application_vnd.jupyter.stderr_output_535.png", "text_plain_output_242.png", "text_plain_output_460.png", "application_vnd.jupyter.stderr_output_189.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_680.png", "application_vnd.jupyter.stderr_output_149.png", "text_plain_output_622.png", "application_vnd.jupyter.stderr_output_91.png", "text_plain_output_708.png", "application_vnd.jupyter.stderr_output_239.png", "text_plain_output_260.png", "application_vnd.jupyter.stderr_output_95.png", "application_vnd.jupyter.stderr_output_541.png", "text_plain_output_294.png", "application_vnd.jupyter.stderr_output_559.png", "application_vnd.jupyter.stderr_output_827.png", "application_vnd.jupyter.stderr_output_583.png", "text_plain_output_392.png", "text_plain_output_320.png", "application_vnd.jupyter.stderr_output_67.png", "application_vnd.jupyter.stderr_output_237.png", "application_vnd.jupyter.stderr_output_339.png", "application_vnd.jupyter.stderr_output_689.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_108.png", "application_vnd.jupyter.stderr_output_337.png", "application_vnd.jupyter.stderr_output_481.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "application_vnd.jupyter.stderr_output_539.png", "text_plain_output_700.png", "text_plain_output_276.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_71.png", "text_plain_output_326.png", "text_plain_output_744.png", "text_plain_output_578.png", "application_vnd.jupyter.stderr_output_259.png", "text_plain_output_170.png", "text_plain_output_92.png", "application_vnd.jupyter.stderr_output_819.png", "text_plain_output_658.png", "text_plain_output_120.png", "application_vnd.jupyter.stderr_output_293.png", "application_vnd.jupyter.stderr_output_709.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_257.png", "application_vnd.jupyter.stderr_output_775.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_159.png", "application_vnd.jupyter.stderr_output_799.png", "application_vnd.jupyter.stderr_output_735.png", "text_plain_output_344.png", "application_vnd.jupyter.stderr_output_325.png", "application_vnd.jupyter.stderr_output_663.png", "application_vnd.jupyter.stderr_output_247.png", "application_vnd.jupyter.stderr_output_657.png", "text_plain_output_104.png", "text_plain_output_784.png", "text_plain_output_270.png", "text_plain_output_466.png", "application_vnd.jupyter.stderr_output_675.png", "text_plain_output_568.png", "application_vnd.jupyter.stderr_output_677.png", "application_vnd.jupyter.stderr_output_59.png", "text_plain_output_134.png", "application_vnd.jupyter.stderr_output_589.png", "text_plain_output_288.png", "application_vnd.jupyter.stderr_output_197.png", "application_vnd.jupyter.stderr_output_369.png", "application_vnd.jupyter.stderr_output_459.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_266.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_646.png", "text_plain_output_766.png", "application_vnd.jupyter.stderr_output_441.png", "application_vnd.jupyter.stderr_output_543.png", "application_vnd.jupyter.stderr_output_485.png", "application_vnd.jupyter.stderr_output_83.png", "text_plain_output_96.png", "application_vnd.jupyter.stderr_output_19.png", "text_plain_output_418.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_210.png", "text_plain_output_112.png", "application_vnd.jupyter.stderr_output_505.png", "application_vnd.jupyter.stderr_output_281.png", "text_plain_output_152.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_127.png", "text_plain_output_798.png", "application_vnd.jupyter.stderr_output_705.png", "application_vnd.jupyter.stderr_output_661.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_290.png", "application_vnd.jupyter.stderr_output_571.png", "application_vnd.jupyter.stderr_output_587.png", "text_plain_output_506.png", "application_vnd.jupyter.stderr_output_195.png", "application_vnd.jupyter.stderr_output_331.png", "application_vnd.jupyter.stderr_output_561.png", "application_vnd.jupyter.stderr_output_721.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "application_vnd.jupyter.stderr_output_327.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_261.png", "text_plain_output_328.png", "application_vnd.jupyter.stderr_output_483.png", "text_plain_output_730.png", "application_vnd.jupyter.stderr_output_301.png", "text_plain_output_368.png", "application_vnd.jupyter.stderr_output_347.png", "text_plain_output_372.png", "application_vnd.jupyter.stderr_output_411.png", "application_vnd.jupyter.stderr_output_687.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_754.png", "text_plain_output_454.png", "text_plain_output_806.png", "text_plain_output_814.png", "application_vnd.jupyter.stderr_output_241.png", "application_vnd.jupyter.stderr_output_405.png", "application_vnd.jupyter.stderr_output_475.png", "application_vnd.jupyter.stderr_output_163.png", "text_plain_output_338.png", "application_vnd.jupyter.stderr_output_647.png", "application_vnd.jupyter.stderr_output_151.png", "text_plain_output_512.png", "application_vnd.jupyter.stderr_output_617.png", "application_vnd.jupyter.stderr_output_103.png", "application_vnd.jupyter.stderr_output_109.png", "text_plain_output_738.png", "text_plain_output_382.png", "text_plain_output_38.png", "text_plain_output_682.png", "application_vnd.jupyter.stderr_output_367.png", "text_plain_output_528.png", "text_plain_output_648.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "application_vnd.jupyter.stderr_output_97.png", "application_vnd.jupyter.stderr_output_473.png", "text_plain_output_726.png", "text_plain_output_714.png", "application_vnd.jupyter.stderr_output_395.png", "text_plain_output_314.png", "text_plain_output_592.png", "text_plain_output_410.png", "application_vnd.jupyter.stderr_output_669.png", "text_plain_output_432.png", "application_vnd.jupyter.stderr_output_201.png", "application_vnd.jupyter.stderr_output_307.png", "text_plain_output_308.png", "application_vnd.jupyter.stderr_output_673.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_551.png", "text_plain_output_174.png", "text_plain_output_812.png", "text_plain_output_212.png", "text_plain_output_652.png", "text_plain_output_644.png", "text_plain_output_230.png", "application_vnd.jupyter.stderr_output_15.png", "application_vnd.jupyter.stderr_output_603.png", "text_plain_output_430.png", "text_plain_output_742.png", "text_plain_output_630.png", "text_plain_output_778.png", "application_vnd.jupyter.stderr_output_591.png", "application_vnd.jupyter.stderr_output_579.png", "text_plain_output_378.png", "text_plain_output_580.png", "application_vnd.jupyter.stderr_output_679.png", "text_plain_output_206.png", "application_vnd.jupyter.stderr_output_671.png", "application_vnd.jupyter.stderr_output_479.png", "text_plain_output_788.png", "application_vnd.jupyter.stderr_output_611.png", "application_vnd.jupyter.stderr_output_739.png", "text_plain_output_732.png", "application_vnd.jupyter.stderr_output_635.png", "text_plain_output_8.png", "text_plain_output_122.png", "application_vnd.jupyter.stderr_output_193.png", "text_plain_output_384.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_498.png", "text_plain_output_662.png", "text_plain_output_780.png", "application_vnd.jupyter.stderr_output_87.png", "text_plain_output_182.png", "text_plain_output_26.png", "application_vnd.jupyter.stderr_output_619.png", "application_vnd.jupyter.stderr_output_711.png", "text_plain_output_830.png", "application_vnd.jupyter.stderr_output_187.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_620.png", "text_plain_output_406.png", "application_vnd.jupyter.stderr_output_445.png", "text_plain_output_310.png", "text_plain_output_760.png", "text_plain_output_456.png", "application_vnd.jupyter.stderr_output_477.png", "text_plain_output_558.png", "application_vnd.jupyter.stderr_output_455.png", "text_plain_output_668.png", "application_vnd.jupyter.stderr_output_469.png", "text_plain_output_702.png", "text_plain_output_724.png", "text_plain_output_220.png", "application_vnd.jupyter.stderr_output_117.png", "text_plain_output_834.png", "application_vnd.jupyter.stderr_output_625.png", "application_vnd.jupyter.stderr_output_413.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_616.png", "application_vnd.jupyter.stderr_output_401.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_832.png", "application_vnd.jupyter.stderr_output_789.png", "application_vnd.jupyter.stderr_output_531.png", "text_plain_output_346.png", "text_plain_output_168.png", "text_plain_output_800.png", "application_vnd.jupyter.stderr_output_69.png", "application_vnd.jupyter.stderr_output_487.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_350.png", "application_vnd.jupyter.stderr_output_41.png", "text_plain_output_636.png", "application_vnd.jupyter.stderr_output_157.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_508.png", "application_vnd.jupyter.stderr_output_805.png", "text_plain_output_468.png", "text_plain_output_370.png", "application_vnd.jupyter.stderr_output_377.png", "application_vnd.jupyter.stderr_output_727.png", "text_plain_output_224.png", "application_vnd.jupyter.stderr_output_167.png", "application_vnd.jupyter.stderr_output_797.png", "application_vnd.jupyter.stderr_output_79.png", "application_vnd.jupyter.stderr_output_825.png", "application_vnd.jupyter.stderr_output_49.png", "text_plain_output_696.png", "application_vnd.jupyter.stderr_output_743.png", "application_vnd.jupyter.stderr_output_753.png", "text_plain_output_816.png", "application_vnd.jupyter.stderr_output_333.png", "application_vnd.jupyter.stderr_output_793.png", "application_vnd.jupyter.stderr_output_569.png", "application_vnd.jupyter.stderr_output_63.png", "text_plain_output_610.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_621.png", "application_vnd.jupyter.stderr_output_607.png", "application_vnd.jupyter.stderr_output_681.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_363.png", "text_plain_output_632.png", "text_plain_output_626.png", "application_vnd.jupyter.stderr_output_169.png", "text_plain_output_450.png", "application_vnd.jupyter.stderr_output_547.png", "application_vnd.jupyter.stderr_output_801.png", "text_plain_output_252.png", "application_vnd.jupyter.stderr_output_683.png", "application_vnd.jupyter.stderr_output_415.png", "application_vnd.jupyter.stderr_output_343.png", "text_plain_output_296.png", "text_plain_output_672.png", "application_vnd.jupyter.stderr_output_567.png", "text_plain_output_28.png", "text_plain_output_72.png", "application_vnd.jupyter.stderr_output_173.png", "application_vnd.jupyter.stderr_output_511.png", "application_vnd.jupyter.stderr_output_319.png", "application_vnd.jupyter.stderr_output_191.png", "application_vnd.jupyter.stderr_output_399.png", "text_plain_output_820.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_433.png", "text_plain_output_772.png", "application_vnd.jupyter.stderr_output_213.png", "application_vnd.jupyter.stderr_output_829.png", "application_vnd.jupyter.stderr_output_613.png", "text_plain_output_710.png", "text_plain_output_500.png", "application_vnd.jupyter.stderr_output_349.png", "application_vnd.jupyter.stderr_output_645.png", "application_vnd.jupyter.stderr_output_397.png", "application_vnd.jupyter.stderr_output_491.png", "text_plain_output_562.png", "application_vnd.jupyter.stderr_output_429.png", "text_plain_output_196.png", "text_plain_output_342.png", "application_vnd.jupyter.stderr_output_835.png", "application_vnd.jupyter.stderr_output_295.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_101.png", "application_vnd.jupyter.stderr_output_359.png", "application_vnd.jupyter.stderr_output_267.png", "application_vnd.jupyter.stderr_output_225.png", "application_vnd.jupyter.stderr_output_209.png", "text_plain_output_786.png", "text_plain_output_650.png", "application_vnd.jupyter.stderr_output_139.png", "application_vnd.jupyter.stderr_output_717.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "application_vnd.jupyter.stderr_output_533.png", "text_plain_output_752.png", "text_plain_output_176.png", "application_vnd.jupyter.stderr_output_217.png", "text_plain_output_584.png", "application_vnd.jupyter.stderr_output_61.png", "text_plain_output_186.png", "application_vnd.jupyter.stderr_output_51.png", "application_vnd.jupyter.stderr_output_311.png", "text_plain_output_228.png", "application_vnd.jupyter.stderr_output_831.png", "text_plain_output_478.png", "text_plain_output_762.png", "text_plain_output_412.png", "application_vnd.jupyter.stderr_output_353.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_354.png", "text_plain_output_360.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "text_plain_output_698.png", "application_vnd.jupyter.stderr_output_161.png", "application_vnd.jupyter.stderr_output_379.png", "text_plain_output_336.png", "application_vnd.jupyter.stderr_output_427.png", "text_plain_output_80.png", "text_plain_output_94.png", "text_plain_output_164.png", "application_vnd.jupyter.stderr_output_707.png", "text_plain_output_534.png", "application_vnd.jupyter.stderr_output_233.png", "text_plain_output_444.png", "application_vnd.jupyter.stderr_output_153.png", "text_plain_output_216.png", "text_plain_output_124.png", "application_vnd.jupyter.stderr_output_45.png", "text_plain_output_148.png", "text_plain_output_694.png", "application_vnd.jupyter.stderr_output_659.png", "text_plain_output_402.png", "text_plain_output_722.png", "text_plain_output_424.png", "text_plain_output_486.png", "application_vnd.jupyter.stderr_output_639.png", "text_plain_output_250.png", "application_vnd.jupyter.stderr_output_425.png", "application_vnd.jupyter.stderr_output_509.png", "application_vnd.jupyter.stderr_output_175.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_804.png", "text_plain_output_524.png", "application_vnd.jupyter.stderr_output_601.png", "application_vnd.jupyter.stderr_output_457.png", "text_plain_output_538.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_773.png", "application_vnd.jupyter.stderr_output_39.png", "text_plain_output_838.png", "text_plain_output_408.png", "application_vnd.jupyter.stderr_output_119.png", "application_vnd.jupyter.stderr_output_581.png", "application_vnd.jupyter.stderr_output_781.png", "application_vnd.jupyter.stderr_output_741.png", "application_vnd.jupyter.stderr_output_787.png", "application_vnd.jupyter.stderr_output_309.png", "application_vnd.jupyter.stderr_output_107.png", "text_plain_output_428.png", "application_vnd.jupyter.stderr_output_255.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_523.png", "application_vnd.jupyter.stderr_output_733.png", "application_vnd.jupyter.stderr_output_719.png", "application_vnd.jupyter.stderr_output_43.png", "application_vnd.jupyter.stderr_output_357.png", "application_vnd.jupyter.stderr_output_595.png", "text_plain_output_416.png", "application_vnd.jupyter.stderr_output_265.png", "text_plain_output_194.png", "application_vnd.jupyter.stderr_output_685.png", "application_vnd.jupyter.stderr_output_85.png", "application_vnd.jupyter.stderr_output_521.png", "text_plain_output_782.png", "text_plain_output_62.png", "text_plain_output_480.png", "text_plain_output_810.png", "application_vnd.jupyter.stderr_output_597.png", "application_vnd.jupyter.stderr_output_761.png", "application_vnd.jupyter.stderr_output_783.png", "text_plain_output_440.png", "text_plain_output_458.png", "application_vnd.jupyter.stderr_output_549.png", "application_vnd.jupyter.stderr_output_271.png", "application_vnd.jupyter.stderr_output_55.png", "application_vnd.jupyter.stderr_output_501.png", "text_plain_output_464.png", "application_vnd.jupyter.stderr_output_303.png", "text_plain_output_156.png", "application_vnd.jupyter.stderr_output_147.png", "application_vnd.jupyter.stderr_output_375.png", "application_vnd.jupyter.stderr_output_811.png", "text_plain_output_298.png", "application_vnd.jupyter.stderr_output_121.png", "text_plain_output_348.png", "application_vnd.jupyter.stderr_output_451.png", "text_plain_output_448.png", "text_plain_output_364.png", "application_vnd.jupyter.stderr_output_703.png", "application_vnd.jupyter.stderr_output_329.png", "text_plain_output_792.png", "application_vnd.jupyter.stderr_output_243.png", "text_plain_output_352.png", "application_vnd.jupyter.stderr_output_759.png", "application_vnd.jupyter.stderr_output_199.png", "text_plain_output_374.png", "application_vnd.jupyter.stderr_output_565.png", "application_vnd.jupyter.stderr_output_573.png", "text_plain_output_472.png", "application_vnd.jupyter.stderr_output_129.png", "text_plain_output_566.png", "text_plain_output_600.png", "application_vnd.jupyter.stderr_output_507.png", "application_vnd.jupyter.stderr_output_251.png", "application_vnd.jupyter.stderr_output_557.png", "text_plain_output_292.png", "application_vnd.jupyter.stderr_output_315.png", "application_vnd.jupyter.stderr_output_37.png", "application_vnd.jupyter.stderr_output_701.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "application_vnd.jupyter.stderr_output_833.png", "text_plain_output_46.png" ]
from pathlib import Path from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, KFold import lightgbm as lgbm import numpy as np import optuna import pandas as pd path = Path('/kaggle/input/house-prices-advanced-regression-techniques/') train_ = pd.read_csv(path.joinpath('train.csv')) test_ = pd.read_csv(path.joinpath('test.csv')) categorical_feature = [] for col in train_.columns.tolist(): if train_[col].dtype == object: categorical_feature.append(col) nan_train_cols = train_.columns[1:-1][train_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_train_cols: if col in categorical_feature: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mode().values[0] else: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mean() nan_test_cols = test_.columns[1:-1][test_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_test_cols: if col in categorical_feature: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mode().values[0] else: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mean() columns_drop = ['GarageCars', 'GarageYrBlt', 'GrLivArea', 'Id', 'YearRemodAdd'] train_.drop(columns=columns_drop, inplace=True) test_.drop(columns=columns_drop, inplace=True) all_ = pd.concat([train_, test_]) dumy = pd.get_dummies(all_[categorical_feature]) all_ = pd.concat([all_.loc[:, ~all_.columns.isin(categorical_feature)], dumy], axis=1) train_data = all_.iloc[0:1460, :] test_data = all_.iloc[1460:, :] test_data.drop(columns=['SalePrice'], inplace=True) feature_cols = train_data.columns.tolist() feature_cols.remove('SalePrice') train_data['SalePrice'] = np.log(train_data['SalePrice']) train_data, validation_data = train_test_split(train_data, test_size=0.2, random_state=42) train_data.reset_index(drop=True, inplace=True) validation_data.reset_index(drop=True, inplace=True) baseline = lgbm.LGBMRegressor() baseline.fit(train_data[feature_cols], train_data['SalePrice']) baseline_val_y = baseline.predict(validation_data[feature_cols]) base_line_score = np.sqrt(mean_squared_error(baseline_val_y, validation_data['SalePrice'].values)) def objective(trial, x_train, y_train, x_valid, y_valid): train_d = lgbm.Dataset(x_train, y_train) val_d = lgbm.Dataset(x_valid, y_valid) param = {'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': trial.suggest_categorical('boosting_type', ['gbdt', 'rf', 'dart']), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-08, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-08, 10.0), 'num_leaves': trial.suggest_int('num_leaves', 2, 10000), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 500)} gbm = lgbm.train(param, train_d, valid_sets=val_d, verbose_eval=100) off = gbm.predict(x_valid) error = mean_squared_error(y_valid, off) return np.sqrt(error) x_train, x_val = (train_data[feature_cols].values, validation_data[feature_cols].values) y_train, y_val = (train_data['SalePrice'].values, validation_data['SalePrice'].values) study = optuna.create_study(direction='minimize') study.optimize(lambda trial: objective(trial, x_train, y_train, x_val, y_val), n_trials=1000) param = {'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': 'gbdt'} param.update(study.best_trial.params) print(param)
code
72105169/cell_14
[ "text_plain_output_1.png" ]
from pathlib import Path from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, KFold import lightgbm as lgbm import numpy as np import pandas as pd path = Path('/kaggle/input/house-prices-advanced-regression-techniques/') train_ = pd.read_csv(path.joinpath('train.csv')) test_ = pd.read_csv(path.joinpath('test.csv')) categorical_feature = [] for col in train_.columns.tolist(): if train_[col].dtype == object: categorical_feature.append(col) nan_train_cols = train_.columns[1:-1][train_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_train_cols: if col in categorical_feature: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mode().values[0] else: train_.loc[train_[col].isna(), col] = train_.loc[~train_[col].isna(), col].mean() nan_test_cols = test_.columns[1:-1][test_.iloc[:, 1:-1].isnull().sum() > 0].tolist() for col in nan_test_cols: if col in categorical_feature: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mode().values[0] else: test_.loc[test_[col].isna(), col] = test_.loc[~test_[col].isna(), col].mean() columns_drop = ['GarageCars', 'GarageYrBlt', 'GrLivArea', 'Id', 'YearRemodAdd'] train_.drop(columns=columns_drop, inplace=True) test_.drop(columns=columns_drop, inplace=True) all_ = pd.concat([train_, test_]) dumy = pd.get_dummies(all_[categorical_feature]) all_ = pd.concat([all_.loc[:, ~all_.columns.isin(categorical_feature)], dumy], axis=1) train_data = all_.iloc[0:1460, :] test_data = all_.iloc[1460:, :] test_data.drop(columns=['SalePrice'], inplace=True) feature_cols = train_data.columns.tolist() feature_cols.remove('SalePrice') train_data['SalePrice'] = np.log(train_data['SalePrice']) train_data, validation_data = train_test_split(train_data, test_size=0.2, random_state=42) train_data.reset_index(drop=True, inplace=True) validation_data.reset_index(drop=True, inplace=True) baseline = lgbm.LGBMRegressor() baseline.fit(train_data[feature_cols], train_data['SalePrice']) baseline_val_y = baseline.predict(validation_data[feature_cols]) base_line_score = np.sqrt(mean_squared_error(baseline_val_y, validation_data['SalePrice'].values)) print(f'The base line score is : {base_line_score}')
code
73077056/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from netmiko import ConnectHandler import os from netmiko import ConnectHandler import os os.environ['NET_TEXTFSM'] = '/opt/conda/lib/python3.7/site-packages/ntc_templates/templates' linux = {'device_type': 'linux', 'host': '3.89.45.60', 'username': 'kevin', 'password': 'S!mpl312'} c = ConnectHandler(**linux) r = c.send_command('arp -a', use_textfsm=True) print(r) print(r[0]['ip_address']) for item in r: print(item) print(item['ip_address']) "\nEXPECTED OUTPUT:\n[{'rev_dns': '_gateway', 'ip_address': '172.30.1.1', 'mac_address': '0e:18:8d:7f:b8:65', 'hw_type': 'ether', 'interface': 'eth0'}]\n" '\nChassis type: ASR1004 \nSlot: R0, ASR1000-RP1 \n Running state : ok, active\n Internal state : online\n Internal operational state : ok\n Physical insert detect time : 00:00:45 (2w5d ago)\n Software declared up time : 00:00:45 (2w5d ago)\n CPLD version : 07062111\n Firmware version : 12.2(33r)XNC\nSlot: F0, ASR1000-ESP10 \n Running state : ok, active\n Internal state : online\n Internal operational state : ok\n Physical insert detect time : 00:00:45 (2w5d ago)\n Software declared up time : 00:03:15 (2w5d ago)\n Hardware ready signal time : 00:00:46 (2w5d ago)\n Packet ready signal time : 00:04:00 (2w5d ago)\n CPLD version : 07091401\n Firmware version : 12.2(33r)XNC\nSlot: P0, ASR1004-PWR-AC\n State : ok\n Physical insert detect time : 00:03:08 (2w5d ago)\nSlot: P1, ASR1004-PWR-AC\n State : ok\n Physical insert d\n'
code
73077056/cell_1
[ "text_plain_output_1.png" ]
!pip install netmiko
code
73077056/cell_3
[ "text_plain_output_1.png" ]
!pip install ntc_templates
code
34125991/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() full_scaler = MinMaxScaler() scaled_full_data = full_scaler.fit_transform(df) length = 12 generator_final = TimeseriesGenerator(scaled_full_data, scaled_full_data, length=length, batch_size=1) def finalmodel(): model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modelfinal = finalmodel() modelfinal.fit_generator(generator_final, epochs=10)
code
34125991/cell_13
[ "text_html_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop])
code
34125991/cell_25
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop]) losses = pd.DataFrame(modeltest.history.history) test_predictions = [] first_eval_batch = scaled_train[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(len(test)): current_pred = modeltest.predict(current_batch)[0] test_predictions.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) true_predictions = scaler.inverse_transform(test_predictions) test['Predictions'] = true_predictions full_scaler = MinMaxScaler() scaled_full_data = full_scaler.fit_transform(df) length = 12 generator_final = TimeseriesGenerator(scaled_full_data, scaled_full_data, length=length, batch_size=1) def finalmodel(): model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modelfinal = finalmodel() modelfinal.fit_generator(generator_final, epochs=10) forecast = [] periods = 36 first_eval_batch = scaled_full_data[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(periods): current_pred = modelfinal.predict(current_batch)[0] forecast.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) forecast = scaler.inverse_transform(forecast) forecast_index = pd.date_range(start='2020-04-01', periods=periods, freq='MS') forecast_df = pd.DataFrame(data=forecast, index=forecast_index, columns=['Forecast']) #plot the entire dataset and predictions ax = df.plot() forecast_df.plot(ax=ax) ax = df.plot() forecast_df.plot(ax=ax) plt.xlim('2017-01-01', '2023-4-01')
code
34125991/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) print('total entries = ', len(df)) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] display(train.head()) print('Train shape : ', train.shape) display(test.head()) print('Train shape : ', test.shape)
code
34125991/cell_23
[ "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop]) losses = pd.DataFrame(modeltest.history.history) test_predictions = [] first_eval_batch = scaled_train[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(len(test)): current_pred = modeltest.predict(current_batch)[0] test_predictions.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) true_predictions = scaler.inverse_transform(test_predictions) test['Predictions'] = true_predictions full_scaler = MinMaxScaler() scaled_full_data = full_scaler.fit_transform(df) length = 12 generator_final = TimeseriesGenerator(scaled_full_data, scaled_full_data, length=length, batch_size=1) def finalmodel(): model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modelfinal = finalmodel() modelfinal.fit_generator(generator_final, epochs=10) forecast = [] periods = 36 first_eval_batch = scaled_full_data[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(periods): current_pred = modelfinal.predict(current_batch)[0] forecast.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) forecast = scaler.inverse_transform(forecast) forecast_index = pd.date_range(start='2020-04-01', periods=periods, freq='MS') forecast_df = pd.DataFrame(data=forecast, index=forecast_index, columns=['Forecast']) display(forecast_df.head())
code
34125991/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] print(f'Given the Array: \n{X.flatten()}') print(f'Predict this y: \n {y}')
code
34125991/cell_16
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop]) losses = pd.DataFrame(modeltest.history.history) test_predictions = [] first_eval_batch = scaled_train[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(len(test)): current_pred = modeltest.predict(current_batch)[0] test_predictions.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) true_predictions = scaler.inverse_transform(test_predictions) test['Predictions'] = true_predictions display(test.head()) test.plot() plt.show()
code
34125991/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) print(df.info()) display(df.head()) df.plot(figsize=(12, 6))
code
34125991/cell_24
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop]) losses = pd.DataFrame(modeltest.history.history) test_predictions = [] first_eval_batch = scaled_train[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(len(test)): current_pred = modeltest.predict(current_batch)[0] test_predictions.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) true_predictions = scaler.inverse_transform(test_predictions) test['Predictions'] = true_predictions full_scaler = MinMaxScaler() scaled_full_data = full_scaler.fit_transform(df) length = 12 generator_final = TimeseriesGenerator(scaled_full_data, scaled_full_data, length=length, batch_size=1) def finalmodel(): model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modelfinal = finalmodel() modelfinal.fit_generator(generator_final, epochs=10) forecast = [] periods = 36 first_eval_batch = scaled_full_data[-length:] current_batch = first_eval_batch.reshape((1, length, n_features)) for i in range(periods): current_pred = modelfinal.predict(current_batch)[0] forecast.append(current_pred) current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1) forecast = scaler.inverse_transform(forecast) forecast_index = pd.date_range(start='2020-04-01', periods=periods, freq='MS') forecast_df = pd.DataFrame(data=forecast, index=forecast_index, columns=['Forecast']) ax = df.plot() forecast_df.plot(ax=ax)
code
34125991/cell_14
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() early_stop = EarlyStopping(monitor='val_loss', patience=2) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length=length, batch_size=1) modeltest.fit_generator(generator, epochs=20, validation_data=validation_generator, callbacks=[early_stop]) losses = pd.DataFrame(modeltest.history.history) losses.plot() plt.show()
code
34125991/cell_10
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import pandas as pd df = pd.read_csv('https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=MRTSSM448USN&scale=left&cosd=1992-01-01&coed=2020-03-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2009-06-01&line_index=1&transformation=lin&vintage_date=2020-05-16&revision_date=2020-05-16&nd=1992-01-01') df.rename(columns={'MRTSSM448USN': 'Sales'}, inplace=True) df['DATE'] = df['DATE'].astype('datetime64[ns]') df.set_index('DATE', drop=True, inplace=True) test_size = 18 test_ind = len(df) - 18 train = df.iloc[:test_ind] test = df.iloc[test_ind:] scaler = MinMaxScaler() scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) length = 12 generator = TimeseriesGenerator(scaled_train, scaled_train, length=length, batch_size=1) X, y = generator[0] n_features = 1 def testmodel(): model = Sequential() model.add(LSTM(units=100, activation='relu', input_shape=(length, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model modeltest = testmodel() print(modeltest.summary())
code
18108171/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) param = {'n_neighbors': [5, 10, 15, 20, 25, 30], 'p': [2, 3, 4, 5, 6]} gsc = GridSearchCV(knn, param, cv=5, refit=True) gsc.fit(X_train, y_train) gsc.best_estimator_
code
18108171/cell_11
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) param = {'n_neighbors': [5, 10, 15, 20, 25, 30], 'p': [2, 3, 4, 5, 6]} gsc = GridSearchCV(knn, param, cv=5, refit=True) gsc.fit(X_train, y_train)
code
18108171/cell_19
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(6, 6)) cm = confusion_matrix(y_test, grid_predict) sns.set(font_scale=1.25) sns.heatmap(cm, annot=True, fmt='g', cbar=False, cmap='Blues') plt.title('Confusion matrix')
code
18108171/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report print(classification_report(y_test, grid_predict))
code
18108171/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) param = {'n_neighbors': [5, 10, 15, 20, 25, 30], 'p': [2, 3, 4, 5, 6]} gsc = GridSearchCV(knn, param, cv=5, refit=True) gsc.fit(X_train, y_train) gsc.best_estimator_ gsc.best_params_
code
18108171/cell_10
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train)
code
90153636/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import linear_model import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] regr = linear_model.LinearRegression() regr.fit(X.values, Y) prediction = regr.predict([[2020, 20000]]) y_hat = regr.predict(X) y_hat
code
90153636/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') import sklearn from sklearn.linear_model import LinearRegression len(df)
code
90153636/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] X.head()
code
90153636/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') df.head()
code
90153636/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] regr = linear_model.LinearRegression() regr.fit(X.values, Y) prediction = regr.predict([[2020, 20000]]) y_hat = regr.predict(X) y_hat regr.score(X, Y)
code
90153636/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] Y.head()
code
90153636/cell_8
[ "text_html_output_1.png" ]
from sklearn import linear_model import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] regr = linear_model.LinearRegression() regr.fit(X.values, Y) print('intercept :', regr.intercept_) print('coefficient :', regr.coef_) print("Prediction : ['Year', 'Mileage']") prediction = regr.predict([[2020, 20000]]) print('Price Prediction : ', prediction)
code
90153636/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') sns.scatterplot(x=df['year'], y=df['price'], hue=df['fuelType'], data=df)
code
90153636/cell_10
[ "text_html_output_1.png" ]
from sklearn import linear_model import pandas as pd df = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/toyota.csv') X = df[['year', 'mileage']] Y = df['price'] regr = linear_model.LinearRegression() regr.fit(X.values, Y) prediction = regr.predict([[2020, 20000]]) y_hat = regr.predict(X) y_hat dc = pd.concat([df[0:].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns') dc
code
49118983/cell_42
[ "text_plain_output_1.png" ]
import tensorflow as tf import tensorflow.keras.layers as L import tensorflow.keras.models as M tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) with tpu_strategy.scope(): def make_ann(n_in): inp = L.Input(shape=(n_in,), name='inp') d1 = L.Dense(100, activation='relu', name='d1')(inp) d2 = L.Dense(100, activation='relu', name='d2')(d1) preds = L.Dense(1, activation='sigmoid', name='preds')(d2) model = M.Model(inp, preds, name='ANN') model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
code
49118983/cell_21
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import gc import tensorflow as tf import tensorflow.keras.models as M import tensorflow.keras.layers as L import riiideducation INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/' TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv') TEST_FILE = os.path.join(INPUT_DIR, 'test.csv') QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv') LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv') tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) def ds_to_pickle(ds, ds_file, pkl_file): ds.to_pickle(pkl_file) del ds return pd.read_pickle('tr.pkl') tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl') total_num_users = tr.user_id.unique().size unique_user_ids = list(tr.user_id.unique()) total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique()) num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())}) num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered') print(num_ques_answered.min(), num_ques_answered.max())
code
49118983/cell_25
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import gc import tensorflow as tf import tensorflow.keras.models as M import tensorflow.keras.layers as L import riiideducation INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/' TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv') TEST_FILE = os.path.join(INPUT_DIR, 'test.csv') QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv') LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv') tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) def ds_to_pickle(ds, ds_file, pkl_file): ds.to_pickle(pkl_file) del ds return pd.read_pickle('tr.pkl') tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl') total_num_users = tr.user_id.unique().size unique_user_ids = list(tr.user_id.unique()) total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique()) num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())}) num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered') def remove_user_by_num_ques_ans(num_ques_ans_thresh=100, tr=None): num_ques_ans_filtered = num_ques_answered.loc[num_ques_answered.num_ques_answered > num_ques_ans_thresh].rename(columns={'num_ques_answered': 'num_ques_answered_gt_' + str(num_ques_ans_thresh)}) num_ques_per_user_gt_thresh = num_ques_per_user.loc[num_ques_per_user.num_ques_answered > num_ques_ans_thresh].rename(columns={'num_ques_answered': 'num_ques_answered_gt' + str(num_ques_ans_thresh)}) new_tr = tr[tr['user_id'].isin(list(num_ques_per_user_gt_thresh['user_id']))] return (num_ques_per_user_gt_thresh, new_tr) num_ques_answered_gt_100, tr_user_ques_gt_100 = remove_user_by_num_ques_ans(100, tr=tr)
code
49118983/cell_34
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import gc import tensorflow as tf import tensorflow.keras.models as M import tensorflow.keras.layers as L import riiideducation INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/' TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv') TEST_FILE = os.path.join(INPUT_DIR, 'test.csv') QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv') LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv') tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) def ds_to_pickle(ds, ds_file, pkl_file): ds.to_pickle(pkl_file) del ds return pd.read_pickle('tr.pkl') tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl') total_num_users = tr.user_id.unique().size unique_user_ids = list(tr.user_id.unique()) total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique()) num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())}) num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered') new_num_rows = len(tr_user_ques_gt_100.index) old_num_rows = len(tr.index) tr_user_ques_gt_100.to_pickle('tr_user_ans_gt_100_ques.pkl') tr = tr_user_ques_gt_100 TIME_MEAN = tr.prior_question_elapsed_time.median() TIME_MIN = tr.prior_question_elapsed_time.min() TIME_MAX = tr.prior_question_elapsed_time.max() print(TIME_MEAN, TIME_MAX, TIME_MIN) map_prior = {True: 1, False: 0}
code
49118983/cell_33
[ "text_plain_output_1.png" ]
piv1 = tr.loc[tr.answered_correctly != -1].groupby('content_id')['answered_correctly'].mean().reset_index() piv1.columns = ['content_id', 'content_emb'] piv3 = tr.loc[tr.answered_correctly != -1].groupby('user_id')['answered_correctly'].mean().reset_index() piv3.columns = ['user_id', 'user_emb']
code