path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
33096987/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33096987/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df.head()
code
33096987/cell_39
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
33096987/cell_26
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])]
code
33096987/cell_48
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Age', 'Fare', 'Parch', 'Survived'] g = sns.factorplot(x="SibSp",y="Survived",data=train_df,kind="bar",size=6) g.set_ylabels("Survived Probability") g = sns.factorplot(x='Parch', y='Survived', data=train_df, kind='bar', size=6) g.set_ylabels('Survived Probability')
code
33096987/cell_2
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33096987/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df.describe()
code
33096987/cell_45
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Age', 'Fare', 'Parch', 'Survived'] g = sns.factorplot(x='SibSp', y='Survived', data=train_df, kind='bar', size=6) g.set_ylabels('Survived Probability')
code
33096987/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass numericVar = ['Fare', 'Age', 'PassengerId'] for i in numericVar: plothist(i)
code
33096987/cell_32
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum()
code
33096987/cell_51
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Age', 'Fare', 'Parch', 'Survived'] g = sns.factorplot(x="SibSp",y="Survived",data=train_df,kind="bar",size=6) g.set_ylabels("Survived Probability") g = sns.factorplot(x="Parch",y="Survived",data=train_df,kind="bar",size=6) g.set_ylabels("Survived Probability") g = sns.factorplot(x='Pclass', y='Survived', data=train_df, kind='bar', size=6) g.set_ylabels('Survived Probability')
code
33096987/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns category2 = ['Cabin', 'Name', 'Ticket'] for i in category2: print(f'{train_df[i].value_counts()} \n')
code
33096987/cell_35
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df.boxplot(column='Fare', by='Embarked') plt.show()
code
33096987/cell_31
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()]
code
33096987/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) category1 = ['Survived', 'Sex', 'Pclass', 'Embarked', 'SibSp', 'Parch'] for i in category1: barplot(i)
code
33096987/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33096987/cell_37
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
33096987/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns
code
33096987/cell_36
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df['Embarked'] = train_df['Embarked'].fillna('C') train_df[train_df['Embarked'].isnull()]
code
90148984/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] len(y)
code
90148984/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() len(df)
code
90148984/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ X_test = df.drop(columns=['price'])[:10] X_test y_hat = model.predict(X_test) X = df.drop(columns=['price'])[:10] y = df['price'][:10] model.score(X, y)
code
90148984/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ X_test = df.drop(columns=['price'])[:10] X_test y_hat = model.predict(X_test) dc = pd.concat([df[:10].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns') dc
code
90148984/cell_30
[ "text_html_output_1.png" ]
import numpy as np import seaborn as sns dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() with sns.plotting_context("notebook",font_scale=2.5): g = sns.pairplot(dataset[['sqft_lot','sqft_above','price','sqft_living','bedrooms']], hue='bedrooms', palette='tab20',height=6) g.set(xticklabels=[]); df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_test = df.drop(columns=['price'])[:10] X_test X = df.drop(columns=['price'])[:10] y = df['price'][:10] X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_b = np.c_[np.ones((10, 1)), X] X = X.to_numpy() y = y.to_numpy() sns.lmplot(x='sqft_living', y='price', data=df, ci=None)
code
90148984/cell_20
[ "text_plain_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_test = df.drop(columns=['price'])[:10] X_test
code
90148984/cell_6
[ "text_plain_output_1.png" ]
import seaborn as sns dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() sns.lineplot(x='yr_built', y='sqft_living', data=df, ci=None)
code
90148984/cell_29
[ "text_html_output_1.png" ]
import numpy as np dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_test = df.drop(columns=['price'])[:10] X_test X = df.drop(columns=['price'])[:10] y = df['price'][:10] X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_b = np.c_[np.ones((10, 1)), X] X = X.to_numpy() y = y.to_numpy() eta = 0.1 n_iterations = 10 m = 100 theta = np.random.randn(19, 1) for iteration in range(n_iterations): gradients = 2 / m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients theta
code
90148984/cell_2
[ "text_plain_output_1.png" ]
dataset.columns
code
90148984/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_
code
90148984/cell_1
[ "text_html_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn.linear_model import LinearRegression dataset = pd.read_csv('../input/kc-house-data/kc_house_data.csv') dataset.head()
code
90148984/cell_7
[ "text_plain_output_1.png" ]
import seaborn as sns dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() sns.lmplot(x='bedrooms', y='price', data=df, ci=None)
code
90148984/cell_18
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_
code
90148984/cell_28
[ "text_plain_output_1.png" ]
import numpy as np dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_test = df.drop(columns=['price'])[:10] X_test X = df.drop(columns=['price'])[:10] y = df['price'][:10] X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_b = np.c_[np.ones((10, 1)), X] X = X.to_numpy() y = y.to_numpy() len(y)
code
90148984/cell_8
[ "text_plain_output_1.png" ]
import seaborn as sns dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() with sns.plotting_context('notebook', font_scale=2.5): g = sns.pairplot(dataset[['sqft_lot', 'sqft_above', 'price', 'sqft_living', 'bedrooms']], hue='bedrooms', palette='tab20', height=6) g.set(xticklabels=[])
code
90148984/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X.head()
code
90148984/cell_16
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y)
code
90148984/cell_3
[ "text_plain_output_1.png" ]
dataset.columns print(dataset.dtypes)
code
90148984/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y)
code
90148984/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] y.head()
code
90148984/cell_22
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ X_test = df.drop(columns=['price'])[:10] X_test y_hat = model.predict(X_test) y_hat
code
90148984/cell_10
[ "text_plain_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns
code
90148984/cell_27
[ "text_plain_output_1.png" ]
import numpy as np dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_test = df.drop(columns=['price'])[:10] X_test X = df.drop(columns=['price'])[:10] y = df['price'][:10] X = df.drop(columns=['price'])[:10] y = df['price'][:10] X_b = np.c_[np.ones((10, 1)), X] X = X.to_numpy() y = y.to_numpy() len(X)
code
90148984/cell_12
[ "text_plain_output_1.png" ]
dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() df.columns X = df.drop(columns=['price'])[:10] y = df['price'][:10] len(X)
code
90148984/cell_5
[ "text_plain_output_1.png" ]
import seaborn as sns dataset.columns df = dataset.drop(columns=['date', 'id']) df1 = df.dropna() sns.lmplot(x='price', y='sqft_living', data=df, ci=None)
code
50212838/cell_13
[ "text_html_output_1.png" ]
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans text_content = data['description'] vector = TfidfVectorizer(max_df=0.3, min_df=1, stop_words='english', lowercase=True, use_idf=True, norm=u'l2', smooth_idf=True) tfidf = vector.fit_transform(text_content) kmeans = MiniBatchKMeans(n_clusters=200) kmeans.fit(tfidf) centers = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vector.get_feature_names() request_transform = vector.transform(data['description']) data['cluster'] = kmeans.predict(request_transform) def find_similar(tfidf_matrix, index, top_n=5): cosine_similarities = linear_kernel(tfidf_matrix[index:index + 1], tfidf_matrix).flatten() related_docs_indices = [i for i in cosine_similarities.argsort()[::-1] if i != index] return [index for index in related_docs_indices][0:top_n] G = nx.Graph(label='NETFLIX') for i, row in data.iterrows(): G.add_node(row['title'], key=row['show_id'], label='MOVIE', mtype=row['type'], rating=row['rating']) for j in row['actors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='ACTED_IN') for j in row['directors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='DIRECTED') for j in row['categories']: G.add_node(j, label='CAT') G.add_edge(row['title'], j, label='CAT_IN') for j in row['countries']: G.add_node(j, label='COUNTRY') G.add_edge(row['title'], j, label='COUNTRY_IN') for i, row in data.iterrows(): similar = find_similar(tfidf, i, top_n=5) for e in similar: G.add_edge(row['title'], data['title'].loc[e], label='SIMILAR_TO') G.number_of_nodes()
code
50212838/cell_9
[ "text_plain_output_1.png" ]
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans text_content = data['description'] vector = TfidfVectorizer(max_df=0.3, min_df=1, stop_words='english', lowercase=True, use_idf=True, norm=u'l2', smooth_idf=True) tfidf = vector.fit_transform(text_content) kmeans = MiniBatchKMeans(n_clusters=200) kmeans.fit(tfidf) centers = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vector.get_feature_names() request_transform = vector.transform(data['description']) data['cluster'] = kmeans.predict(request_transform) print(request_transform)
code
50212838/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50212838/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data.head()
code
50212838/cell_8
[ "image_output_1.png" ]
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans text_content = data['description'] vector = TfidfVectorizer(max_df=0.3, min_df=1, stop_words='english', lowercase=True, use_idf=True, norm=u'l2', smooth_idf=True) tfidf = vector.fit_transform(text_content) kmeans = MiniBatchKMeans(n_clusters=200) kmeans.fit(tfidf) centers = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vector.get_feature_names() request_transform = vector.transform(data['description']) data['cluster'] = kmeans.predict(request_transform) data['cluster'].value_counts().head()
code
50212838/cell_16
[ "text_plain_output_1.png" ]
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans text_content = data['description'] vector = TfidfVectorizer(max_df=0.3, min_df=1, stop_words='english', lowercase=True, use_idf=True, norm=u'l2', smooth_idf=True) tfidf = vector.fit_transform(text_content) kmeans = MiniBatchKMeans(n_clusters=200) kmeans.fit(tfidf) centers = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vector.get_feature_names() request_transform = vector.transform(data['description']) data['cluster'] = kmeans.predict(request_transform) def find_similar(tfidf_matrix, index, top_n=5): cosine_similarities = linear_kernel(tfidf_matrix[index:index + 1], tfidf_matrix).flatten() related_docs_indices = [i for i in cosine_similarities.argsort()[::-1] if i != index] return [index for index in related_docs_indices][0:top_n] G = nx.Graph(label='NETFLIX') for i, row in data.iterrows(): G.add_node(row['title'], key=row['show_id'], label='MOVIE', mtype=row['type'], rating=row['rating']) for j in row['actors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='ACTED_IN') for j in row['directors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='DIRECTED') for j in row['categories']: G.add_node(j, label='CAT') G.add_edge(row['title'], j, label='CAT_IN') for j in row['countries']: G.add_node(j, label='COUNTRY') G.add_edge(row['title'], j, label='COUNTRY_IN') for i, row in data.iterrows(): similar = find_similar(tfidf, i, top_n=5) for e in similar: G.add_edge(row['title'], data['title'].loc[e], label='SIMILAR_TO') G.number_of_nodes() G.number_of_edges() def get_all_adj_nodes(list_in): sub_graph = set() for m in list_in: sub_graph.add(m) for e in G.neighbors(m): sub_graph.add(e) return list(sub_graph) def draw_sub_graph(sub_graph): subgraph = G.subgraph(sub_graph) colors = [] for e in subgraph.nodes(): if G.nodes[e]['label'] == 'MOVIE': colors.append('blue') elif G.nodes[e]['label'] == 'PERSON': colors.append('red') elif G.nodes[e]['label'] == 'CAT': colors.append('green') elif G.nodes[e]['label'] == 'COUNTRY': colors.append('yellow') elif G.nodes[e]['label'] == 'SIMILAR_TO': colors.append('orange') list_in = ["Ocean's Twelve", "Ocean's Thirteen"] plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [14, 14] sub_graph = get_all_adj_nodes(list_in) draw_sub_graph(sub_graph)
code
50212838/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data.head()
code
50212838/cell_14
[ "text_html_output_1.png" ]
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans text_content = data['description'] vector = TfidfVectorizer(max_df=0.3, min_df=1, stop_words='english', lowercase=True, use_idf=True, norm=u'l2', smooth_idf=True) tfidf = vector.fit_transform(text_content) kmeans = MiniBatchKMeans(n_clusters=200) kmeans.fit(tfidf) centers = kmeans.cluster_centers_.argsort()[:, ::-1] terms = vector.get_feature_names() request_transform = vector.transform(data['description']) data['cluster'] = kmeans.predict(request_transform) def find_similar(tfidf_matrix, index, top_n=5): cosine_similarities = linear_kernel(tfidf_matrix[index:index + 1], tfidf_matrix).flatten() related_docs_indices = [i for i in cosine_similarities.argsort()[::-1] if i != index] return [index for index in related_docs_indices][0:top_n] G = nx.Graph(label='NETFLIX') for i, row in data.iterrows(): G.add_node(row['title'], key=row['show_id'], label='MOVIE', mtype=row['type'], rating=row['rating']) for j in row['actors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='ACTED_IN') for j in row['directors']: G.add_node(j, label='PERSON') G.add_edge(row['title'], j, label='DIRECTED') for j in row['categories']: G.add_node(j, label='CAT') G.add_edge(row['title'], j, label='CAT_IN') for j in row['countries']: G.add_node(j, label='COUNTRY') G.add_edge(row['title'], j, label='COUNTRY_IN') for i, row in data.iterrows(): similar = find_similar(tfidf, i, top_n=5) for e in similar: G.add_edge(row['title'], data['title'].loc[e], label='SIMILAR_TO') G.number_of_nodes() G.number_of_edges()
code
50212838/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data['date_added'] = pd.to_datetime(data['date_added']) data['year'] = data['date_added'].dt.year data['month'] = data['date_added'].dt.month data['day'] = data['date_added'].dt.day data['directors'] = data['director'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['actors'] = data['cast'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['categories'] = data['listed_in'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) data['countries'] = data['country'].apply(lambda x: [] if pd.isna(x) else [i.strip() for i in x.split(',')]) print(data['cluster'])
code
50212838/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv') data.describe()
code
1008986/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) lrn = LogisticRegression() skf = StratifiedKFold(n_splits=5, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) break lrn.fit(X_train, y_train) y_pred = lrn.predict(X_test) cm = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: cm = np.array([[cm[1, 1], cm[1, 0]], [cm[0, 1], cm[0, 0]]]) plot_confusion_matrix(cm, ['0', '1']) pr, tpr, fpr = show_data(cm, print_res=1)
code
1008986/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') print(df.head(3)) y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix())
code
1008986/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) lrn = LogisticRegression() skf = StratifiedKFold(n_splits = 5, shuffle = True) for train_index, test_index in skf.split(X, y): X_train, y_train = X[train_index], y[train_index] X_test, y_test = X[test_index], y[test_index] break lrn.fit(X_train, y_train) y_pred = lrn.predict(X_test) cm = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: cm = np.array([[cm[1,1], cm[1,0]], [cm[0,1], cm[0,0]]]) plot_confusion_matrix(cm, ['0', '1'], ) pr, tpr, fpr = show_data(cm, print_res = 1); def ROC(X, y, c, r): dic_weight = {1: len(y) / (r * np.sum(y)), 0: len(y) / (len(y) - r * np.sum(y))} lrn = LogisticRegression(penalty='l2', C=c, class_weight=dic_weight) N = 5 N_iter = 7 mean_tpr = 0.0 mean_thresh = 0.0 mean_fpr = np.linspace(0, 1, 50000) for it in range(N_iter): skf = StratifiedKFold(n_splits=N, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] fpr, tpr, thresholds = roc_curve(y_test, y_prob) mean_tpr += np.interp(mean_fpr, fpr, tpr) mean_thresh += np.interp(mean_fpr, fpr, thresholds) mean_tpr[0] = 0.0 mean_tpr /= N * N_iter mean_thresh /= N * N_iter mean_tpr[-1] = 1.0 return (mean_fpr, mean_tpr, roc_auc_score(y_test, y_prob), mean_thresh) N = np.arange(10, 80, 2) cm = {} for n in N: cm[n] = 0.0 lrn = LogisticRegression(penalty='l2', C=1, class_weight='balanced') N_Kfold = 5 N_iter = 5 for it in range(N_iter): skf = StratifiedKFold(n_splits=N_Kfold, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] for n in N: thresh = 1 - np.power(10.0, -(n / 10)) y_pred = np.zeros(len(y_prob)) for j in range(len(y_prob)): if y_prob[j] > thresh: y_pred[j] = 1 B = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: B = np.array([[B[1, 1], B[1, 0]], [B[0, 1], B[0, 0]]]) cm[n] += B for n in N: cm[n] = cm[n] // (N_Kfold * N_iter)
code
1008986/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) print('Fraction of frauds: {:.5f}'.format(np.sum(y) / len(y)))
code
1008986/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) lrn = LogisticRegression() skf = StratifiedKFold(n_splits = 5, shuffle = True) for train_index, test_index in skf.split(X, y): X_train, y_train = X[train_index], y[train_index] X_test, y_test = X[test_index], y[test_index] break lrn.fit(X_train, y_train) y_pred = lrn.predict(X_test) cm = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: cm = np.array([[cm[1,1], cm[1,0]], [cm[0,1], cm[0,0]]]) plot_confusion_matrix(cm, ['0', '1'], ) pr, tpr, fpr = show_data(cm, print_res = 1); def ROC(X, y, c, r): dic_weight = {1: len(y) / (r * np.sum(y)), 0: len(y) / (len(y) - r * np.sum(y))} lrn = LogisticRegression(penalty='l2', C=c, class_weight=dic_weight) N = 5 N_iter = 7 mean_tpr = 0.0 mean_thresh = 0.0 mean_fpr = np.linspace(0, 1, 50000) for it in range(N_iter): skf = StratifiedKFold(n_splits=N, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] fpr, tpr, thresholds = roc_curve(y_test, y_prob) mean_tpr += np.interp(mean_fpr, fpr, tpr) mean_thresh += np.interp(mean_fpr, fpr, thresholds) mean_tpr[0] = 0.0 mean_tpr /= N * N_iter mean_thresh /= N * N_iter mean_tpr[-1] = 1.0 return (mean_fpr, mean_tpr, roc_auc_score(y_test, y_prob), mean_thresh) def plot_roc(X,y, list_par_1, par_1 = 'C', par_2 = 1): f = plt.figure(figsize = (12,8)); for p in list_par_1: if par_1 == 'C': c = p r = par_2 else: r = p c = par_2 list_FP, list_TP, AUC, mean_thresh = ROC(X, y, c, r) plt.plot(list_FP, list_TP, label = 'C = {}, r = {}, TPR(3e-4) = {:.4f}'.format(c,r,list_TP[10])); plt.legend(title = 'values', loc='lower right') plt.xlim(0, 0.001) #we are only interested in small values of FPR plt.ylim(0.5, 0.9) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC detail') plt.axvline(3e-4, color='b', linestyle='dashed', linewidth=2) plt.show() plt.close() plot_roc(X, y, [0.001, 0.01, 0.1, 1, 10, 100], 'C', 1)
code
1008986/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) lrn = LogisticRegression() skf = StratifiedKFold(n_splits = 5, shuffle = True) for train_index, test_index in skf.split(X, y): X_train, y_train = X[train_index], y[train_index] X_test, y_test = X[test_index], y[test_index] break lrn.fit(X_train, y_train) y_pred = lrn.predict(X_test) cm = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: cm = np.array([[cm[1,1], cm[1,0]], [cm[0,1], cm[0,0]]]) plot_confusion_matrix(cm, ['0', '1'], ) pr, tpr, fpr = show_data(cm, print_res = 1); def ROC(X, y, c, r): dic_weight = {1: len(y) / (r * np.sum(y)), 0: len(y) / (len(y) - r * np.sum(y))} lrn = LogisticRegression(penalty='l2', C=c, class_weight=dic_weight) N = 5 N_iter = 7 mean_tpr = 0.0 mean_thresh = 0.0 mean_fpr = np.linspace(0, 1, 50000) for it in range(N_iter): skf = StratifiedKFold(n_splits=N, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] fpr, tpr, thresholds = roc_curve(y_test, y_prob) mean_tpr += np.interp(mean_fpr, fpr, tpr) mean_thresh += np.interp(mean_fpr, fpr, thresholds) mean_tpr[0] = 0.0 mean_tpr /= N * N_iter mean_thresh /= N * N_iter mean_tpr[-1] = 1.0 return (mean_fpr, mean_tpr, roc_auc_score(y_test, y_prob), mean_thresh) def plot_roc(X,y, list_par_1, par_1 = 'C', par_2 = 1): f = plt.figure(figsize = (12,8)); for p in list_par_1: if par_1 == 'C': c = p r = par_2 else: r = p c = par_2 list_FP, list_TP, AUC, mean_thresh = ROC(X, y, c, r) plt.plot(list_FP, list_TP, label = 'C = {}, r = {}, TPR(3e-4) = {:.4f}'.format(c,r,list_TP[10])); plt.legend(title = 'values', loc='lower right') plt.xlim(0, 0.001) #we are only interested in small values of FPR plt.ylim(0.5, 0.9) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC detail') plt.axvline(3e-4, color='b', linestyle='dashed', linewidth=2) plt.show() plt.close() plot_roc(X, y, [1, 3, 10, 30, 100], 'r', 1)
code
1008986/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix()) lrn = LogisticRegression() skf = StratifiedKFold(n_splits = 5, shuffle = True) for train_index, test_index in skf.split(X, y): X_train, y_train = X[train_index], y[train_index] X_test, y_test = X[test_index], y[test_index] break lrn.fit(X_train, y_train) y_pred = lrn.predict(X_test) cm = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: cm = np.array([[cm[1,1], cm[1,0]], [cm[0,1], cm[0,0]]]) plot_confusion_matrix(cm, ['0', '1'], ) pr, tpr, fpr = show_data(cm, print_res = 1); def ROC(X, y, c, r): dic_weight = {1: len(y) / (r * np.sum(y)), 0: len(y) / (len(y) - r * np.sum(y))} lrn = LogisticRegression(penalty='l2', C=c, class_weight=dic_weight) N = 5 N_iter = 7 mean_tpr = 0.0 mean_thresh = 0.0 mean_fpr = np.linspace(0, 1, 50000) for it in range(N_iter): skf = StratifiedKFold(n_splits=N, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] fpr, tpr, thresholds = roc_curve(y_test, y_prob) mean_tpr += np.interp(mean_fpr, fpr, tpr) mean_thresh += np.interp(mean_fpr, fpr, thresholds) mean_tpr[0] = 0.0 mean_tpr /= N * N_iter mean_thresh /= N * N_iter mean_tpr[-1] = 1.0 return (mean_fpr, mean_tpr, roc_auc_score(y_test, y_prob), mean_thresh) def plot_roc(X,y, list_par_1, par_1 = 'C', par_2 = 1): f = plt.figure(figsize = (12,8)); for p in list_par_1: if par_1 == 'C': c = p r = par_2 else: r = p c = par_2 list_FP, list_TP, AUC, mean_thresh = ROC(X, y, c, r) plt.plot(list_FP, list_TP, label = 'C = {}, r = {}, TPR(3e-4) = {:.4f}'.format(c,r,list_TP[10])); plt.legend(title = 'values', loc='lower right') plt.xlim(0, 0.001) #we are only interested in small values of FPR plt.ylim(0.5, 0.9) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC detail') plt.axvline(3e-4, color='b', linestyle='dashed', linewidth=2) plt.show() plt.close() N = np.arange(10, 80, 2) cm = {} for n in N: cm[n] = 0.0 lrn = LogisticRegression(penalty='l2', C=1, class_weight='balanced') N_Kfold = 5 N_iter = 5 for it in range(N_iter): skf = StratifiedKFold(n_splits=N_Kfold, shuffle=True) for train_index, test_index in skf.split(X, y): X_train, y_train = (X[train_index], y[train_index]) X_test, y_test = (X[test_index], y[test_index]) lrn.fit(X_train, y_train) y_prob = lrn.predict_proba(X_test)[:, lrn.classes_[1]] for n in N: thresh = 1 - np.power(10.0, -(n / 10)) y_pred = np.zeros(len(y_prob)) for j in range(len(y_prob)): if y_prob[j] > thresh: y_pred[j] = 1 B = confusion_matrix(y_test, y_pred) if lrn.classes_[0] == 1: B = np.array([[B[1, 1], B[1, 0]], [B[0, 1], B[0, 0]]]) cm[n] += B for n in N: cm[n] = cm[n] // (N_Kfold * N_iter) PR = [] TPR = [] FPR = [] THRESH = N for n in N: pr, tpr, fpr = show_data(cm[n], title='Results for threshold = 1-10^-{:.1f}'.format(n / 10)) PR.append(pr) TPR.append(tpr) FPR.append(-np.log(fpr) / 10) g = plt.figure(figsize=(12, 8)) plt.plot(THRESH, PR, label='Precision') plt.plot(THRESH, TPR, label='Recall (TPR)') plt.plot(THRESH, FPR, label='-log(FPR)/10') plt.axhline(-np.log(0.0003) / 10, color='b', linestyle='dashed', linewidth=2) plt.title('Evaluation of the classifier') plt.legend(loc='lower right') plt.xlabel('-log(1-thresh)/log(10)') plt.ylim(0.55, 0.9) plt.show()
code
90112109/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() data2 = data.drop(['children', 'region'], axis=1) sb.relplot(x='age', y='charges', hue='smoker', data=data2)
code
90112109/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() sb.heatmap(cor, xticklabels=cor.columns, yticklabels=cor.columns, annot=True)
code
90112109/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.info()
code
90112109/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.nunique()
code
90112109/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum()
code
90112109/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr()
code
90112109/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.head()
code
90112109/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() data2 = data.drop(['children', 'region'], axis=1) sb.displot(data2['charges'])
code
90112109/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() sb.pairplot(data)
code
90112109/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() data2 = data.drop(['children', 'region'], axis=1) data2.head()
code
90112109/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.describe()
code
73074228/cell_13
[ "text_html_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.info()
code
73074228/cell_20
[ "text_plain_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.isnull().any().sum() y = train['loss'] y.head()
code
73074228/cell_55
[ "text_html_output_1.png" ]
from pathlib import Path from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, KFold from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor import numpy as np import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.isnull().any().sum() test.isnull().any().sum() ss_features = [col for col in test.columns if 'f' in col] ss = StandardScaler() train[ss_features] = ss.fit_transform(train[ss_features]) test[ss_features] = ss.transform(test[ss_features]) X = train kf = KFold(n_splits=10, shuffle=True, random_state=42) for fold, (train_indices, valid_indices) in enumerate(kf.split(X=X)): X.loc[valid_indices, 'kfold'] = fold useful_features = [col for col in X.columns if col not in ('loss', 'kfold')] xgb_params = {'learning_rate': 0.013222817649672616, 'n_estimators': 12462, 'max_depth': 5, 'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor'} xgb = XGBRegressor(**xgb_params) xgb_predictions = [] for fold in range(10): xtrain = X[X.kfold != fold].reset_index(drop=True) xvalid = X[X.kfold == fold].reset_index(drop=True) ytrain = xtrain.loss yvalid = xvalid.loss xtrain = xtrain[useful_features] xvalid = xvalid[useful_features] model = xgb model.fit(xtrain, ytrain, early_stopping_rounds=10, eval_set=[(xvalid, yvalid)], verbose=1000) preds_valid = model.predict(xvalid) test_preds = model.predict(test) xgb_predictions.append(test_preds) final_predictions = xgb_predictions predictions = np.mean(np.column_stack(final_predictions), axis=1) submission = pd.read_csv(INPUT / 'sample_submission.csv') submission['loss'] = predictions submission.to_csv('submission.csv', index=False) submission
code
73074228/cell_54
[ "text_plain_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') submission = pd.read_csv(INPUT / 'sample_submission.csv') submission.head()
code
73074228/cell_11
[ "text_plain_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.head()
code
73074228/cell_32
[ "text_plain_output_1.png" ]
from pathlib import Path from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, KFold import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.isnull().any().sum() X = train kf = KFold(n_splits=10, shuffle=True, random_state=42) for fold, (train_indices, valid_indices) in enumerate(kf.split(X=X)): X.loc[valid_indices, 'kfold'] = fold X.head()
code
73074228/cell_8
[ "text_html_output_1.png" ]
from xgboost import XGBRegressor from catboost import CatBoostRegressor from lightgbm import LGBMRegressor
code
73074228/cell_15
[ "text_plain_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') test.isnull().any().sum()
code
73074228/cell_46
[ "text_html_output_1.png" ]
from pathlib import Path from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, KFold from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.isnull().any().sum() test.isnull().any().sum() ss_features = [col for col in test.columns if 'f' in col] ss = StandardScaler() train[ss_features] = ss.fit_transform(train[ss_features]) test[ss_features] = ss.transform(test[ss_features]) X = train kf = KFold(n_splits=10, shuffle=True, random_state=42) for fold, (train_indices, valid_indices) in enumerate(kf.split(X=X)): X.loc[valid_indices, 'kfold'] = fold useful_features = [col for col in X.columns if col not in ('loss', 'kfold')] xgb_params = {'learning_rate': 0.013222817649672616, 'n_estimators': 12462, 'max_depth': 5, 'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor'} xgb = XGBRegressor(**xgb_params) xgb_predictions = [] for fold in range(10): xtrain = X[X.kfold != fold].reset_index(drop=True) xvalid = X[X.kfold == fold].reset_index(drop=True) ytrain = xtrain.loss yvalid = xvalid.loss xtrain = xtrain[useful_features] xvalid = xvalid[useful_features] model = xgb model.fit(xtrain, ytrain, early_stopping_rounds=10, eval_set=[(xvalid, yvalid)], verbose=1000) preds_valid = model.predict(xvalid) test_preds = model.predict(test) xgb_predictions.append(test_preds) print(f'fold: {fold}, rmse: {mean_squared_error(yvalid, preds_valid, squared=False)}')
code
73074228/cell_14
[ "text_plain_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') train.shape train.isnull().any().sum()
code
73074228/cell_12
[ "text_html_output_1.png" ]
from pathlib import Path import pandas as pd INPUT = Path('../input/tabular-playground-series-aug-2021') train = pd.read_csv(INPUT / 'train.csv') test = pd.read_csv(INPUT / 'test.csv') test.head()
code
17137542/cell_21
[ "image_output_1.png" ]
from sklearn import manifold from sklearn.cluster import KMeans from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.annotate(label, xy=(x, y)) df.query('Distillery == "GlenSpey" or Distillery == "Miltonduff"') df.query('Distillery == "GlenSpey" or Distillery == "Glendronach"') tree = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=1, min_samples_leaf=5) X_train = df.iloc[:, 2:-4] y_train = df['class'] tree.fit(X_train, y_train)
code
17137542/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/whisky.csv') df.head()
code
17137542/cell_23
[ "text_html_output_1.png" ]
from IPython.display import Image, display_png from pydotplus import graph_from_dot_data from sklearn import manifold from sklearn.cluster import KMeans from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_graphviz import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.annotate(label, xy=(x, y)) df.query('Distillery == "GlenSpey" or Distillery == "Miltonduff"') df.query('Distillery == "GlenSpey" or Distillery == "Glendronach"') tree = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=1, min_samples_leaf=5) X_train = df.iloc[:, 2:-4] y_train = df['class'] tree.fit(X_train, y_train) dot_data = export_graphviz(tree, filled=True, rounded=True, class_names=['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5'], feature_names=df.columns[2:-4].values, out_file=None) graph = graph_from_dot_data(dot_data) graph.write_png('tree.png') display_png(Image('tree.png'))
code
17137542/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/whisky.csv') df.info()
code
17137542/cell_26
[ "text_html_output_1.png" ]
from IPython.display import Image, display_png from pydotplus import graph_from_dot_data from pyproj import Proj, transform from sklearn import manifold from sklearn.cluster import KMeans from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_graphviz import folium import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.annotate(label, xy=(x, y)) df.query('Distillery == "GlenSpey" or Distillery == "Miltonduff"') df.query('Distillery == "GlenSpey" or Distillery == "Glendronach"') tree = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=1, min_samples_leaf=5) X_train = df.iloc[:, 2:-4] y_train = df['class'] tree.fit(X_train, y_train) dot_data = export_graphviz(tree, filled=True, rounded=True, class_names=['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5'], feature_names=df.columns[2:-4].values, out_file=None) graph = graph_from_dot_data(dot_data) graph.write_png('tree.png') map_whisky = folium.Map(location=[57.49952, -2.77639], zoom_start=9) inProj = Proj(init='epsg:27700') outProj = Proj(init='epsg:4326') for label, lon, lat, c in zip(labels, df['Latitude'], df['Longitude'], df['class']): lat2, lon2 = transform(inProj, outProj, lon, lat) folium.Marker([lon2, lat2], popup=label, icon=folium.Icon(color=col[c])).add_to(map_whisky) map_whisky
code
17137542/cell_2
[ "text_plain_output_1.png" ]
!pip install pydotplus import pandas as pd import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from IPython.display import Image, display_png from pydotplus import graph_from_dot_data from sklearn.tree import export_graphviz from sklearn import manifold import folium from pyproj import Proj, transform
code
17137542/cell_18
[ "image_output_1.png" ]
from sklearn import manifold from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.annotate(label, xy=(x, y)) df.query('Distillery == "GlenSpey" or Distillery == "Miltonduff"') df.query('Distillery == "GlenSpey" or Distillery == "Glendronach"')
code
17137542/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/whisky.csv') df.describe()
code
17137542/cell_16
[ "text_html_output_1.png" ]
from sklearn import manifold from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.annotate(label, xy=(x, y)) df.query('Distillery == "GlenSpey" or Distillery == "Miltonduff"')
code
17137542/cell_14
[ "text_plain_output_1.png" ]
from sklearn import manifold from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values mds = manifold.MDS(n_components=2, dissimilarity='euclidean', random_state=0) pos = mds.fit_transform(df.iloc[:, 2:-4]) col = ['orange', 'green', 'blue', 'purple', 'red'] chars = '^<>vo+d' c_flag = 0 labels = df['Distillery'] plt.figure(figsize=(20, 20), dpi=50) plt.rcParams['font.size'] = 15 for label, x, y, c in zip(labels, pos[:, 0], pos[:, 1], df['class']): if c == c_flag: c_flag = c_flag + 1 plt.scatter(x, y, c=col[c], marker=chars[c], s=100, label='Class ' + str(c + 1)) else: plt.scatter(x, y, c=col[c], marker=chars[c], s=100) plt.annotate(label, xy=(x, y)) plt.legend(loc='upper right') plt.show()
code
17137542/cell_10
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) plt.plot(range(2, 20), dist) plt.show()
code
17137542/cell_12
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/whisky.csv') dist = [] for i in range(2, 20): km = KMeans(n_clusters=i, n_init=10, max_iter=500, random_state=0) km.fit(df.iloc[:, 2:-3]) dist.append(km.inertia_) km = KMeans(n_clusters=5, n_init=10, max_iter=300, random_state=0) df['class'] = km.fit_predict(df.iloc[:, 2:-3]) df['class'].values
code
17144010/cell_13
[ "text_plain_output_1.png" ]
import keras as K import numpy as np import tensorflow as tf np.random.seed(1) tf.set_random_seed(1) tf.logging.set_verbosity(tf.logging.ERROR) discriminator = K.Sequential() depth = 64 dropout = 0.4 input_shape = (28, 28, 1) discriminator.add(K.layers.Conv2D(depth * 1, 5, strides=2, input_shape=input_shape, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 2, 5, strides=2, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 4, 5, strides=2, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 8, 5, strides=1, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Flatten()) discriminator.add(K.layers.Dense(1, activation='sigmoid')) discriminator.summary() tf.logging.set_verbosity(tf.logging.ERROR) generator = K.Sequential() depth = 64 + 64 + 64 + 64 dim = 7 dropout = 0.4 generator.add(K.layers.Dense(dim * dim * depth, input_shape=(100,))) generator.add(K.layers.BatchNormalization(momentum=0.9)) generator.add(K.layers.ReLU()) generator.add(K.layers.Reshape((dim, dim, depth))) generator.add(K.layers.Dropout(dropout)) generator.add(K.layers.UpSampling2D()) generator.add(K.layers.Conv2DTranspose(int(depth / 2), 5, padding='same')) generator.add(K.layers.BatchNormalization(momentum=0.9)) generator.add(K.layers.ReLU()) generator.add(K.layers.UpSampling2D()) generator.add(K.layers.Conv2DTranspose(int(depth / 4), 5, padding='same')) generator.add(K.layers.BatchNormalization(momentum=0.9)) generator.add(K.layers.ReLU()) generator.add(K.layers.Conv2DTranspose(int(depth / 8), 5, padding='same')) generator.add(K.layers.BatchNormalization(momentum=0.9)) generator.add(K.layers.ReLU()) generator.add(K.layers.Conv2DTranspose(1, 5, padding='same', activation='sigmoid')) generator.summary()
code
17144010/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import keras as K import numpy as np import tensorflow as tf np.random.seed(1) tf.set_random_seed(1) tf.logging.set_verbosity(tf.logging.ERROR) discriminator = K.Sequential() depth = 64 dropout = 0.4 input_shape = (28, 28, 1) discriminator.add(K.layers.Conv2D(depth * 1, 5, strides=2, input_shape=input_shape, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 2, 5, strides=2, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 4, 5, strides=2, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Conv2D(depth * 8, 5, strides=1, padding='same')) discriminator.add(K.layers.LeakyReLU(alpha=0.2)) discriminator.add(K.layers.Dropout(dropout)) discriminator.add(K.layers.Flatten()) discriminator.add(K.layers.Dense(1, activation='sigmoid')) discriminator.summary()
code
17144010/cell_8
[ "text_plain_output_1.png" ]
from tensorflow.examples.tutorials.mnist import input_data import numpy as np import tensorflow as tf np.random.seed(1) tf.set_random_seed(1) from tensorflow.examples.tutorials.mnist import input_data x_train = input_data.read_data_sets('mnist', one_hot=True).train.images x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
code
17144010/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import keras as K import tensorflow as tf import pandas as pd import os from matplotlib import pyplot as plt import seaborn as sns os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
code
73067082/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code