path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105213782/cell_31
[ "text_html_output_1.png" ]
X_train
code
105213782/cell_46
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3) knn_classifier.fit(X_train, Y_train)
code
105213782/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #plotting import numpy as np import pandas as pd import seaborn as sns #visualization data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') data.drop(['id'], axis=1, inplace=True) data.isnull().sum() data.isnull().sum() strokes = len(data[data['stroke'] == 1]) no_strokes = data[data.stroke == 0].index random_indices = np.random.choice(no_strokes, strokes, replace=False) stroke_indices = data[data.stroke == 1].index under_sample_indices = np.concatenate([stroke_indices, random_indices]) udata = data.loc[under_sample_indices] sns.countplot(data=udata, x='stroke') plt.show()
code
105213782/cell_14
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') data.drop(['id'], axis=1, inplace=True) data.isnull().sum() data.isnull().sum() from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() data['gender'] = label_encoder.fit_transform(data['gender']) data['gender'].unique() data['ever_married'] = label_encoder.fit_transform(data['ever_married']) data['ever_married'].unique()
code
105213782/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') data.drop(['id'], axis=1, inplace=True) data.isnull().sum() data.isnull().sum() data.info()
code
105213782/cell_70
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, f1_score,accuracy_score #metrics from sklearn.metrics import roc_auc_score, roc_curve #metrics from sklearn.metrics import roc_curve, roc_auc_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import numpy as np import pandas as pd data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') data.drop(['id'], axis=1, inplace=True) data.isnull().sum() data.isnull().sum() strokes = len(data[data['stroke'] == 1]) no_strokes = data[data.stroke == 0].index random_indices = np.random.choice(no_strokes, strokes, replace=False) stroke_indices = data[data.stroke == 1].index under_sample_indices = np.concatenate([stroke_indices, random_indices]) udata = data.loc[under_sample_indices] from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, Y_train) Y_pred = lr.predict(X_test) features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]]) prediction = lr.predict(features) from sklearn.neighbors import KNeighborsClassifier knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3) knn_classifier.fit(X_train, Y_train) Y_pred_knn = knn_classifier.predict(X_test) features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]]) prediction = knn_classifier.predict(features) from sklearn import tree dt_classifier = DecisionTreeClassifier() dt_classifier.fit(X_train, Y_train) Y_pred_dtc = dt_classifier.predict(X_test) features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]]) prediction = dt_classifier.predict(features) from sklearn.naive_bayes import GaussianNB gnb_classifier = GaussianNB() gnb_classifier.fit(X_train, Y_train) Y_pred_gnb = gnb_classifier.predict(X_test) features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]]) prediction = gnb_classifier.predict(features) print('Accuracy:', accuracy_score(Y_test, Y_pred_gnb)) print('Precision', precision_score(Y_test, Y_pred_gnb)) print('Recall', recall_score(Y_test, Y_pred_gnb)) print('F1 score', f1_score(Y_test, Y_pred_gnb)) print('ROC score', roc_auc_score(Y_test, Y_pred_gnb))
code
105213782/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') data
code
105213782/cell_36
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, Y_train)
code
18117432/cell_21
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 sns.stripplot(x='Initial', y='Age', data=train_df, jitter=True, hue='Survived')
code
18117432/cell_25
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 train_df['FamilySize'] = train_df['Parch'] + train_df['SibSp'] sns.barplot(x='FamilySize', y='Survived', data=train_df)
code
18117432/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 f,ax=plt.subplots() train_df['Survived'].value_counts().plot.pie(explode=[0,0.05],autopct='%1.1f%%',shadow=True) ax.set_title('Survived') ax.set_ylabel('') plt.show() f, ax = plt.subplots(1, 1, figsize=(6, 5)) train_df['Embarked'].value_counts().plot.pie(explode=[0, 0, 0], autopct='%1.1f%%', ax=ax) plt.show()
code
18117432/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 sns.violinplot(x='Sex', y='Age', data=train_df, hue='Survived', split=True)
code
18117432/cell_6
[ "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') train_df.describe()
code
18117432/cell_26
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 train_df['FamilySize'] = train_df['Parch'] + train_df['SibSp'] sns.pairplot(train_df, hue='Sex', palette='coolwarm')
code
18117432/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 sns.barplot(x='Sex', y='Survived', data=train_df)
code
18117432/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') train_df.isnull().sum()
code
18117432/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 f, ax = plt.subplots() train_df['Survived'].value_counts().plot.pie(explode=[0, 0.05], autopct='%1.1f%%', shadow=True) ax.set_title('Survived') ax.set_ylabel('') plt.show()
code
18117432/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean()
code
18117432/cell_22
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df.groupby('Initial')['Age'].mean() train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mr'), 'Age'] = 32.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Mrs'), 'Age'] = 36 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Master'), 'Age'] = 4.5 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Miss'), 'Age'] = 22 train_df.loc[train_df.Age.isnull() & (train_df.Initial == 'Other'), 'Age'] = 44.5 sns.factorplot(x='Embarked', y='Age', data=train_df, kind='bar', hue='Survived')
code
18117432/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow')
code
18117432/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') train_df.isnull().sum() train_df['Initial'] = 0 for i in train_df: train_df['Initial'] = train_df.Name.str.extract('([A-Za-z]+)\\.') pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow') train_df['Initial'].replace(['Dr', 'Mlle', 'Mme', 'Ms', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Other', 'Miss', 'Miss', 'Miss', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) pd.crosstab(train_df.Initial, train_df.Sex).T.style.background_gradient(cmap='gist_rainbow')
code
18117432/cell_5
[ "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') display(train_df.head()) print('Shape of Data : ', train_df.shape)
code
2026799/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df['eta'] = df['eta'].astype(float) sns.jointplot(data=df, x='eta', y='size')
code
2026799/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df.head()
code
2026799/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2026799/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes pd.isnull(df).any()
code
2026799/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df['eta'] = df['eta'].astype(float) sns.jointplot(data=df, x='speed', y='size')
code
2026799/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df['eta'] = df['eta'].astype(float) df[df['eta'] == 4]
code
2026799/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.describe()
code
2026799/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df['eta'] = df['eta'].astype(float) df[df['size'] == 9010]
code
2026799/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes df['eta'] = df.eta.map(lambda x: x.split(':')[-1]) df['percentage'] = df['percentage'].apply(lambda x: x.split('%')[0]) df['percentage'] = df['percentage'].astype(float) df['size'] = df['size'].map(lambda x: x.split('KB')[0]) df['size'] = df['size'].astype(float) df['eta'] = df['eta'].astype(float) df.describe()
code
2026799/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/akosombo.csv') df.dtypes
code
128024531/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesReviews.nunique() recipesReviews[recipesReviews.duplicated(subset=['Name', 'AuthorId'], keep=False)].sort_values(by='Name')
code
128024531/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] len(recipes)
code
128024531/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() len(recipe_counts)
code
128024531/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.head(4)
code
128024531/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] len(reviews)
code
128024531/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] recipesNot_in_list.head(5)
code
128024531/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] checkingForRecipe = recipes.loc[recipes['RecipeId'] == 194165] checkingForRecipe
code
128024531/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns recipes['Name'].nunique()
code
128024531/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesReviews.nunique()
code
128024531/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] len(recipesNot_in_list)
code
128024531/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] len(recipesReviews)
code
128024531/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] len(recipesReviews)
code
128024531/cell_41
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesReviews.nunique() recipesReviews[recipesReviews['Name'].duplicated(keep=False)].sort_values(by='Name').head(10)
code
128024531/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns len(recipes)
code
128024531/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128024531/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') reviews['RecipeId'].nunique()
code
128024531/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] reviews = reviews.loc[~reviews['ReviewId'].isin(recipesNot_in_list['ReviewId'])] reviews.isnull().sum(axis=0) recipesReviews.nunique() recipesReviews[recipesReviews.duplicated(subset=['Name', 'AuthorId'], keep=False)].sort_values(by='Name') duplicates = recipesReviews[recipesReviews.duplicated(subset=['Name', 'AuthorId'], keep=False)] dropped_duplicates_df = duplicates.drop_duplicates(subset=['Name', 'AuthorId'], keep='first') temp2 = reviews.loc[reviews['RecipeId'].isin(duplicates['RecipeId'])] temp2['RecipeId'].value_counts()
code
128024531/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] len(reviews)
code
128024531/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() len(useless_recipes)
code
128024531/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() len(reviews)
code
128024531/cell_38
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesReviews.head(5)
code
128024531/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') reviews.head(4)
code
128024531/cell_46
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] reviews = reviews.loc[~reviews['ReviewId'].isin(recipesNot_in_list['ReviewId'])] reviews.isnull().sum(axis=0) recipesReviews.nunique() recipesReviews[recipesReviews.duplicated(subset=['Name', 'AuthorId'], keep=False)].sort_values(by='Name') duplicates = recipesReviews[recipesReviews.duplicated(subset=['Name', 'AuthorId'], keep=False)] dropped_duplicates_df = duplicates.drop_duplicates(subset=['Name', 'AuthorId'], keep='first') temp2 = reviews.loc[reviews['RecipeId'].isin(duplicates['RecipeId'])] temp2
code
128024531/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0)
code
128024531/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] reviews = reviews.loc[~reviews['ReviewId'].isin(recipesNot_in_list['ReviewId'])] reviews.isnull().sum(axis=0)
code
128024531/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() recipe_counts.head(5)
code
128024531/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns
code
128024531/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) recipes = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/recipes.csv') reviews = pd.read_csv('/kaggle/input/foodcom-recipes-and-reviews/reviews.csv') recipes.columns user_counts = reviews.groupby('AuthorId')['RecipeId'].nunique() single_recipe_users = user_counts[user_counts == 1].index.tolist() recipesidss = reviews.loc[reviews['AuthorId'].isin(single_recipe_users), 'RecipeId'] recipe_counts = reviews.loc[reviews['RecipeId'].isin(recipesidss), 'RecipeId'].value_counts() useless_recipes = recipe_counts[recipe_counts == 1].index.tolist() reviews = reviews[~reviews['RecipeId'].isin(useless_recipes)] recipes = recipes[~recipes['RecipeId'].isin(useless_recipes)] recipes.isnull().sum(axis=0) recipesReviews = recipes.loc[recipes['RecipeId'].isin(reviews['RecipeId'].unique())] recipesNot_in_list = reviews.loc[~reviews['RecipeId'].isin(recipes['RecipeId'])] reviews = reviews.loc[~reviews['ReviewId'].isin(recipesNot_in_list['ReviewId'])] len(reviews)
code
32070986/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import numpy as np import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) label_names = train_df_d.columns sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01 labels_01.shape sub = pd.DataFrame(labels_01, columns=label_names) sam_sub_df['id'] = sam_sub_df['id'].str[:-4] sam_sub_df['attribute_ids'] = sub['attribute_ids'] sam_sub_df.head()
code
32070986/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
probs.shape
code
32070986/cell_4
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) print(train_df_d.shape) train_df_d.head()
code
32070986/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') sam_sub_df['id'] = sam_sub_df['id'].str[:-4] sam_sub_df.head()
code
32070986/cell_2
[ "text_plain_output_1.png" ]
import sys import numpy as np import pandas as pd import os import sys import tensorflow as tf, tensorflow.keras.backend as K from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator from matplotlib import pyplot as plt sys.path.insert(0, '/kaggle/input/efficientnet-keras-source-code/') import efficientnet.tfkeras as efn print(tf.__version__) print(tf.keras.__version__)
code
32070986/cell_11
[ "text_plain_output_1.png" ]
test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_generator = test_datagen.flow_from_dataframe(dataframe=sam_sub_df, directory='../input/imet-2020-fgvc7/test', x_col='id', target_size=(img_size, img_size), batch_size=1, shuffle=False, class_mode=None)
code
32070986/cell_19
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import numpy as np import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) label_names = train_df_d.columns sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01 labels_01.shape sub = pd.DataFrame(labels_01, columns=label_names) sub.head()
code
32070986/cell_7
[ "text_html_output_1.png" ]
import gc import gc del train_df_d gc.collect()
code
32070986/cell_18
[ "text_plain_output_1.png" ]
sub['attribute_ids'] = '' for col_name in sub.columns: sub.ix[sub[col_name] == 1, 'attribute_ids'] = sub['attribute_ids'] + ' ' + col_name
code
32070986/cell_8
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') print(sam_sub_df.shape) sam_sub_df.head()
code
32070986/cell_15
[ "text_plain_output_1.png" ]
import numpy as np probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01
code
32070986/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01 labels_01.shape
code
32070986/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') print(train_df.shape) train_df.head()
code
32070986/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import numpy as np import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) label_names = train_df_d.columns sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01 labels_01.shape sub = pd.DataFrame(labels_01, columns=label_names) print(sub.shape) sub.head()
code
32070986/cell_14
[ "text_html_output_1.png" ]
probs.shape probs[0].mean()
code
32070986/cell_22
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import numpy as np import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) label_names = train_df_d.columns sam_sub_df = pd.read_csv('../input/imet-2020-fgvc7/sample_submission.csv') sam_sub_df['id'] = sam_sub_df['id'].apply(lambda x: x + '.png') probs.shape threshold = probs[0].mean() labels_01 = (probs > threshold).astype(np.int) labels_01 labels_01.shape sub = pd.DataFrame(labels_01, columns=label_names) sam_sub_df['id'] = sam_sub_df['id'].str[:-4] sam_sub_df['attribute_ids'] = sub['attribute_ids'] sam_sub_df.tail()
code
32070986/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
test_generator.reset() probs = model.predict_generator(test_generator, steps=len(test_generator.filenames))
code
32070986/cell_5
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd train_df = pd.read_csv('../input/imet-2020-fgvc7/train.csv') train_df['attribute_ids'] = train_df['attribute_ids'].apply(lambda x: x.split(' ')) train_df['id'] = train_df['id'].apply(lambda x: x + '.png') from sklearn.preprocessing import MultiLabelBinarizer mlb = MultiLabelBinarizer() train_df_d = pd.DataFrame(mlb.fit_transform(train_df['attribute_ids']), columns=mlb.classes_, index=train_df.index) train_df_d[:1][['448', '2429', '782']]
code
90147004/cell_4
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4)
code
90147004/cell_6
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import time import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) print('> Building features based on the lags of item_cnt_month') cols = [] for lag in [1, 2, 3, 4, 6, 12]: t = time.process_time() print(f'Processing lag {lag} - filling strategy is for decision trees') tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=np.nan) elapsed_time = time.process_time() - t print(f' -- {new_col} took {round(elapsed_time, 1)}') cols.append(new_col) print('> Building (renaming) target') tme, new_col = lag_feature(tme, -1, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=0) tme = tme.rename(columns={new_col: 'target'})
code
90147004/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import time import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) cols = [] for lag in [1, 2, 3, 4, 6, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=np.nan) elapsed_time = time.process_time() - t cols.append(new_col) tme, new_col = lag_feature(tme, -1, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=0) tme = tme.rename(columns={new_col: 'target'}) for lag in [1, 2, 3, 4, 12]: t = time.process_time() print(f'Processing lag {lag} - filling strategy is for decision trees') tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id'], fill_value=np.nan, suffix='s') elapsed_time = time.process_time() - t print(f' -- {new_col} took {round(elapsed_time, 1)}') cols.append(new_col)
code
90147004/cell_8
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import time import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) cols = [] for lag in [1, 2, 3, 4, 6, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=np.nan) elapsed_time = time.process_time() - t cols.append(new_col) tme, new_col = lag_feature(tme, -1, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=0) tme = tme.rename(columns={new_col: 'target'}) for lag in [1, 2, 3, 4, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id'], fill_value=np.nan, suffix='s') elapsed_time = time.process_time() - t cols.append(new_col) for lag in [1, 2, 3, 4, 12]: t = time.process_time() print(f'Processing lag {lag} - filling strategy is for decision trees') tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'item_id'], fill_value=np.nan, suffix='i') elapsed_time = time.process_time() - t print(f' -- {new_col} took {round(elapsed_time, 1)}') cols.append(new_col)
code
90147004/cell_3
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) train.sample(3)
code
90147004/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import mean_squared_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) import nltk from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords stop_words = ['per', 'I', 'me', 'the', 'what', 'which', 'having', 'for', 'with', 'of', 'about', 'but', 'if', 'both', 'each', 'any', 'a'] stemmer = SnowballStemmer('english') custom_tokenizer = RegexpTokenizer('\\w+') def manipulate_str(a): a = a.lower() word_list = custom_tokenizer.tokenize(a) stemmed_words = list() for w in word_list: sw = stemmer.stem(w) if w not in stop_words and len(sw) > 2: stemmed_words.append(sw) return ' '.join(set(stemmed_words)) items['item_name_en_tokenized'] = items.item_name_en.apply(lambda x: manipulate_str(x)) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df=15, max_features=40, stop_words='english') vectorizer.fit(items['item_name_en_tokenized']) text_features = vectorizer.transform(items['item_name_en_tokenized']) text_features.shape col_names = [f'txt_{c}' for c in vectorizer.get_feature_names_out()] if type(text_features) is not pd.DataFrame: text_features = pd.DataFrame.sparse.from_spmatrix(text_features, columns=col_names) text_features.mean()
code
90147004/cell_14
[ "text_html_output_1.png" ]
from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import mean_squared_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) import nltk from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords stop_words = ['per', 'I', 'me', 'the', 'what', 'which', 'having', 'for', 'with', 'of', 'about', 'but', 'if', 'both', 'each', 'any', 'a'] stemmer = SnowballStemmer('english') custom_tokenizer = RegexpTokenizer('\\w+') def manipulate_str(a): a = a.lower() word_list = custom_tokenizer.tokenize(a) stemmed_words = list() for w in word_list: sw = stemmer.stem(w) if w not in stop_words and len(sw) > 2: stemmed_words.append(sw) return ' '.join(set(stemmed_words)) items['item_name_en_tokenized'] = items.item_name_en.apply(lambda x: manipulate_str(x)) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df=15, max_features=40, stop_words='english') vectorizer.fit(items['item_name_en_tokenized']) text_features = vectorizer.transform(items['item_name_en_tokenized']) text_features.shape
code
90147004/cell_10
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import time import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) cols = [] for lag in [1, 2, 3, 4, 6, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=np.nan) elapsed_time = time.process_time() - t cols.append(new_col) tme, new_col = lag_feature(tme, -1, 'item_cnt_month', ['date_block_num', 'shop_id', 'item_id'], fill_value=0) tme = tme.rename(columns={new_col: 'target'}) for lag in [1, 2, 3, 4, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'shop_id'], fill_value=np.nan, suffix='s') elapsed_time = time.process_time() - t cols.append(new_col) for lag in [1, 2, 3, 4, 12]: t = time.process_time() tme, new_col = lag_feature(tme, lag, 'item_cnt_month', ['date_block_num', 'item_id'], fill_value=np.nan, suffix='i') elapsed_time = time.process_time() - t cols.append(new_col) tme['avg_item_3mo'] = ((tme['item_cnt_month_lag1'] + tme['item_cnt_month_lag2'] + tme['item_cnt_month_lag3']) / 3).astype(np.float16) tme['diff_1yr'] = (tme['item_cnt_month_lag1'] - tme['item_cnt_month_lag12']).astype(np.float16) tme['roc_1_2'] = tme['item_cnt_month_lag1'] / tme['item_cnt_month_lag2'] tme['roc_2_3'] = tme['item_cnt_month_lag1'] / tme['item_cnt_month_lag3'] tme['roc_1_4'] = tme['item_cnt_month_lag1'] / tme['item_cnt_month_lag4'] tme['roc_1_12'] = tme['item_cnt_month_lag1'] / tme['item_cnt_month_lag12'] tme['diff_12_34'] = tme['item_cnt_month_lag1'] + tme['item_cnt_month_lag2'] - tme['item_cnt_month_lag3'] - tme['item_cnt_month_lag4'] tme['month_num'] = (1 + tme['date_block_num'] % 12).astype(np.uint8) tme['daydiff'] = (tme['days_no_sales_beginning'] - tme['days_with_sales']).astype(np.uint8) print('How long does a given item stay in a shop?') tme['item_age'] = (tme['date_block_num'] - tme.groupby('item_id')['date_block_num'].transform('min')).astype('int8') tme['item_age'] = tme['item_age'].clip(0, 25) tme['item_age_in_shop'] = (tme['date_block_num'] - tme.groupby(['item_id', 'shop_id'])['date_block_num'].transform('min')).astype('int8').clip(0, 25) tme['price_greater_than80'] = (tme['item_price_avg'] > 1000).astype(int) tme['price_var_within_month'] = (tme['item_price_max'] - tme['item_price_min']) / tme['item_price_avg'] tme['price_max_avg_within_month'] = tme['item_price_max'] - tme['item_price_avg'] for lag in [1, 2, 3]: t = time.process_time() print(f'Processing lag {lag} - filling strategy is for decision trees') tme, new_col = lag_feature(tme, lag, 'price_var_within_month', ['date_block_num', 'item_id'], fill_value=np.nan, suffix='p') elapsed_time = time.process_time() - t print(f' -- {new_col} took {round(elapsed_time, 1)}') cols.append(new_col) tme.sample(4)
code
90147004/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import seaborn as sns import plotly.express as px import time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error mse = mean_squared_error def downcast(df: pd.DataFrame) -> pd.DataFrame: float_cols = [c for c in df if df[c].dtype in ['float64']] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df def lag_feature(df: pd.DataFrame, lag: int, col: str, merge_cols, fill_value=-10, suffix=''): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols).agg({f'{col}': 'mean'}).reset_index() new_col_name = f'{col}{suffix}_lag{lag}' temp.columns = merge_cols + [new_col_name] temp['date_block_num'] += lag if new_col_name not in df.columns: df = pd.merge(df, temp, on=merge_cols, how='left') temp = None df[new_col_name] = df[new_col_name].fillna(fill_value).astype('float32') return (df, new_col_name) items = pd.read_csv('../input/data-preprocessing/items.csv') shops = pd.read_csv('../input/data-preprocessing/shops.csv') cats = pd.read_csv('../input/data-preprocessing/item_categories.csv') train = pd.read_csv('../input/data-preprocessing/sales_train.csv') test = pd.read_csv('../input/data-preprocessing/test.csv').set_index('ID') dataframes = [train, shops, items, cats] for d in dataframes: d = downcast(d) tme = pd.read_csv('../input/data-preprocessing/train_monthly_extended.csv') tme = downcast(tme) cast_cols = ['item_cnt_month', 'days_with_sales', 'date_block_num'] tme[cast_cols] = tme[cast_cols].astype(int) tme['item_cnt_month'] = tme['item_cnt_month'].clip(0, 50) tme.sample(4) sns.scatterplot(data=tme.query('item_cnt_month>0 and days_with_sales>=0').sample(7000), x='days_with_sales', y='item_cnt_month', alpha=0.5)
code
129035325/cell_21
[ "text_html_output_1.png" ]
def outlier_removal(dataframe, features): for feature_name in features: Q1 = dataframe[feature_name].quantile(0.25) Q3 = dataframe[feature_name].quantile(0.75) IQR = Q3 - Q1 dataframe = dataframe[(dataframe[feature_name] >= Q1 - 1.5 * IQR) & (dataframe[feature_name] <= Q3 + 1.5 * IQR)] return dataframe
code
129035325/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T
code
129035325/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from colorama import Style, Fore import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px from plotly.subplots import make_subplots import plotly.graph_objects as go from sklearn.model_selection import train_test_split, KFold import optuna from xgboost import XGBRegressor from catboost import CatBoostRegressor from lightgbm import LGBMRegressor from sklearn.metrics import mean_absolute_error from colorama import Style, Fore red = Style.BRIGHT + Fore.RED blu = Style.BRIGHT + Fore.BLUE mgt = Style.BRIGHT + Fore.MAGENTA grn = Style.BRIGHT + Fore.GREEN gld = Style.BRIGHT + Fore.YELLOW res = Style.RESET_ALL TARGET = 'yield'
code
129035325/cell_20
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=origin, x=cont_col[i], color="#00BFC4", label="Original", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() train = train.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) test = test.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(3, 3, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() for col in test.columns: print(col) if test[col].nunique() < 20: print(test[col].value_counts()) print(test.shape)
code
129035325/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): display(train.head()) print('*' * 50) print(f'SHAPE OF THE DATA: {train.shape}') print('*' * 50) if ~train.isnull().sum().sum(): print('NO NULL VALUES FOUND!') else: print(f'NULL VALUES: {train.isnull().sum()}') print(train.info()) info(train)
code
129035325/cell_19
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() train = train.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) test = test.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) cat_feat = list() for col in train.columns: print(col) if train[col].nunique() < 20: cat_feat.append(col) print(train[col].value_counts()) print(train.shape)
code
129035325/cell_18
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=origin, x=cont_col[i], color="#00BFC4", label="Original", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() train = train.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) test = test.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) cont_col = [i for i, j in zip(test.columns, test.dtypes) if j in ['int', 'float']] fig, axes = plt.subplots(3, 3, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot(ax=ax, data=train, x=cont_col[i], color='#F8766D', label='Train', fill=True) sns.kdeplot(ax=ax, data=test, x=cont_col[i], color='#00BFC4', label='Test', fill=True) ax.set_title(f'{cont_col[i]} distribution') fig.tight_layout() plt.legend()
code
129035325/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot(ax=ax, data=train, x=cont_col[i], color='#F8766D', label='Train', fill=True) sns.kdeplot(ax=ax, data=origin, x=cont_col[i], color='#00BFC4', label='Original', fill=True) ax.set_title(f'{cont_col[i]} distribution') fig.tight_layout() plt.legend()
code
129035325/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=origin, x=cont_col[i], color="#00BFC4", label="Original", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() train = train.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) test = test.drop(['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'AverageRainingDays'], axis=1) cont_col = [i for (i, j) in zip(test.columns, test.dtypes) if j in ["int", "float"]] fig, axes = plt.subplots(3, 3, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot( ax=ax, data=train, x=cont_col[i], color="#F8766D", label="Train", fill=True ) sns.kdeplot( ax=ax, data=test, x=cont_col[i], color="#00BFC4", label="Test", fill=True ) ax.set_title(f"{cont_col[i]} distribution") fig.tight_layout() plt.legend() cat_feat = list() for col in train.columns: if train[col].nunique() < 20: cat_feat.append(col) def outlier_removal(dataframe, features): for feature_name in features: Q1 = dataframe[feature_name].quantile(0.25) Q3 = dataframe[feature_name].quantile(0.75) IQR = Q3 - Q1 dataframe = dataframe[(dataframe[feature_name] >= Q1 - 1.5 * IQR) & (dataframe[feature_name] <= Q3 + 1.5 * IQR)] return dataframe features = train.columns train = outlier_removal(train, features) for col in cat_feat: q1 = test[col].quantile(0.25) q3 = test[col].quantile(0.75) iqr = q3 - q1 lower_bound = q1 - 1.5 * iqr upper_bound = q3 + 1.5 * iqr mean_value = test[col].mean() test.loc[(test[col] < lower_bound) | (test[col] > upper_bound), col] = mean_value
code
129035325/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1) def info(train): pass train.describe().T cont_col = [i for i, j in zip(test.columns, test.dtypes) if j in ['int', 'float']] fig, axes = plt.subplots(4, 4, figsize=(30, 20)) for i, ax in enumerate(axes.flat): sns.kdeplot(ax=ax, data=train, x=cont_col[i], color='#F8766D', label='Train', fill=True) sns.kdeplot(ax=ax, data=test, x=cont_col[i], color='#00BFC4', label='Test', fill=True) ax.set_title(f'{cont_col[i]} distribution') fig.tight_layout() plt.legend()
code
129035325/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') origin = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv').drop('Row#', axis=1)
code
105193974/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') test = pd.read_csv('../input/feedback-prize-english-language-learning/test.csv') ss = pd.read_csv('../input/feedback-prize-english-language-learning/sample_submission.csv') target_cols = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions'] train[target_cols].min()
code
105193974/cell_2
[ "text_plain_output_1.png" ]
pwd
code