path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73071444/cell_36
|
[
"image_output_1.png"
] |
import pandas as pd
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1)
test_data = test_data.drop(['Cabin', 'Ticket'], axis=1)
combine = [train_data, test_data]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
(pd.crosstab(train_data['Title'], train_data['Sex']), pd.crosstab(test_data['Title'], test_data['Sex']))
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
pd.crosstab(train_data['Title'], train_data['Sex'])
|
code
|
1008801/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data.query('trip_distance == 8000010.0')
jan_data = jan_data[jan_data.trip_distance != 8000010.0]
jan_data['trip_distance'].mean()
|
code
|
1008801/cell_9
|
[
"text_plain_output_1.png"
] |
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
max(jan_data['trip_distance'])
|
code
|
1008801/cell_19
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data.query('trip_distance == 8000010.0')
jan_data = jan_data[jan_data.trip_distance != 8000010.0]
jan_data = jan_data[jan_data.trip_distance < 13.4]
jan_data['trip_distance'].mean()
|
code
|
1008801/cell_18
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data.query('trip_distance == 8000010.0')
jan_data = jan_data[jan_data.trip_distance != 8000010.0]
jan_data = jan_data[jan_data.trip_distance < 13.4]
plt.hist(jan_data['trip_distance'], normed=True, bins=[1, 2, 3, 5, 10])
|
code
|
1008801/cell_8
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data['trip_distance'][0:10]
|
code
|
1008801/cell_15
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data.query('trip_distance == 8000010.0')
jan_data = jan_data[jan_data.trip_distance != 8000010.0]
plt.hist(jan_data['trip_distance'], normed=True, bins=[1, 2, 3, 5, 50])
|
code
|
1008801/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
jan = '../input/1january.csv'
jan_data = pd.read_csv(jan)
jan_data.query('trip_distance == 8000010.0')
|
code
|
122249728/cell_13
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import os
import pandas as pd
import scipy.stats as stat
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
columns_to_drop = ['weight', 'payer_code', 'medical_specialty', 'max_glu_serum', 'A1Cresult']
df = df.drop(columns=columns_to_drop)
df = df.dropna()
missing_values = round(100 * df.isnull().sum() / df.shape[0], 1)
missing_values
def chi_square(df, output, significance_error):
for col in df.columns:
data_crosstab = pd.crosstab(df[col], df[output], margins=True, margins_name='Total')
chi_square = 0
rows = df[col].unique()
columns = df[output].unique()
for i in columns:
for j in rows:
O = data_crosstab[i][j]
E = data_crosstab[i]['Total'] * data_crosstab['Total'][j] / data_crosstab['Total']['Total']
chi_square += (O - E) ** 2 / E
p_value = stat.chi2.sf(chi_square, (len(rows) - 1) * (len(columns) - 1))
dicision = 'rejected' if p_value <= significance_error else 'failed to reject'
chi_square(df, 'readmitted', 0.001)
numerical_features = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
n_df = df.loc[:, numerical_features].copy()
label_df = df.iloc[:, -1].copy()
id_df = df.iloc[:, :2].copy()
df = df.drop(columns=numerical_features + list(id_df.columns))
df = df.drop(columns='readmitted')
from sklearn.ensemble import RandomForestClassifier as RC
def feature_importance(df, col):
test = pd.get_dummies(df[col])
test = pd.concat([test, df['readmitted']], axis=1)
model = RC(n_estimators=100)
x = test.iloc[:, :-1]
y = test.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
df2 = pd.concat([df, n_df, label_df], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in list(df2.keys()):
df2[i] = le.fit_transform(df2[i])
model = RC(n_estimators=100)
x = df2.iloc[:, :-1]
y = df2.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
columns = list(df2.columns[:-1])
pair = []
[pair.append((i, j)) for i, j in zip(importance, columns)]
sorted_pair = sorted(pair)
x = []
y = []
for i in sorted_pair:
x.append(i[0])
y.append(i[1])
plt.figure(figsize=(20, 20))
plt.barh(y, x)
plt.show()
|
code
|
122249728/cell_6
|
[
"text_plain_output_1.png"
] |
import os
import pandas as pd
import scipy.stats as stat
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
columns_to_drop = ['weight', 'payer_code', 'medical_specialty', 'max_glu_serum', 'A1Cresult']
df = df.drop(columns=columns_to_drop)
df = df.dropna()
missing_values = round(100 * df.isnull().sum() / df.shape[0], 1)
missing_values
def chi_square(df, output, significance_error):
for col in df.columns:
data_crosstab = pd.crosstab(df[col], df[output], margins=True, margins_name='Total')
chi_square = 0
rows = df[col].unique()
columns = df[output].unique()
for i in columns:
for j in rows:
O = data_crosstab[i][j]
E = data_crosstab[i]['Total'] * data_crosstab['Total'][j] / data_crosstab['Total']['Total']
chi_square += (O - E) ** 2 / E
p_value = stat.chi2.sf(chi_square, (len(rows) - 1) * (len(columns) - 1))
dicision = 'rejected' if p_value <= significance_error else 'failed to reject'
print(f'{col:<25}', f"'{dicision:^20}'", ' -> chisquare-score is:', f'{chi_square:<20}', ' and p value is:', p_value)
chi_square(df, 'readmitted', 0.001)
|
code
|
122249728/cell_19
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import os
import pandas as pd
import scipy.stats as stat
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
columns_to_drop = ['weight', 'payer_code', 'medical_specialty', 'max_glu_serum', 'A1Cresult']
df = df.drop(columns=columns_to_drop)
df = df.dropna()
missing_values = round(100 * df.isnull().sum() / df.shape[0], 1)
missing_values
def chi_square(df, output, significance_error):
for col in df.columns:
data_crosstab = pd.crosstab(df[col], df[output], margins=True, margins_name='Total')
chi_square = 0
rows = df[col].unique()
columns = df[output].unique()
for i in columns:
for j in rows:
O = data_crosstab[i][j]
E = data_crosstab[i]['Total'] * data_crosstab['Total'][j] / data_crosstab['Total']['Total']
chi_square += (O - E) ** 2 / E
p_value = stat.chi2.sf(chi_square, (len(rows) - 1) * (len(columns) - 1))
dicision = 'rejected' if p_value <= significance_error else 'failed to reject'
chi_square(df, 'readmitted', 0.001)
numerical_features = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
n_df = df.loc[:, numerical_features].copy()
label_df = df.iloc[:, -1].copy()
id_df = df.iloc[:, :2].copy()
df = df.drop(columns=numerical_features + list(id_df.columns))
df = df.drop(columns='readmitted')
from sklearn.ensemble import RandomForestClassifier as RC
def feature_importance(df, col):
test = pd.get_dummies(df[col])
test = pd.concat([test, df['readmitted']], axis=1)
model = RC(n_estimators=100)
x = test.iloc[:, :-1]
y = test.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
df2 = pd.concat([df, n_df, label_df], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in list(df2.keys()):
df2[i] = le.fit_transform(df2[i])
model = RC(n_estimators=100)
x = df2.iloc[:, :-1]
y = df2.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
columns = list(df2.columns[:-1])
pair = []
[pair.append((i, j)) for i, j in zip(importance, columns)]
sorted_pair = sorted(pair)
x = []
y = []
for i in sorted_pair:
x.append(i[0])
y.append(i[1])
plt.barh(y, x)
under_failed = y[:21]
df = df.drop(columns=under_failed)
df3 = pd.concat([id_df, df, label_df], axis=1)
df3 = df3.drop(columns='readmitted')
df3.columns
|
code
|
122249728/cell_16
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import os
import pandas as pd
import scipy.stats as stat
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
columns_to_drop = ['weight', 'payer_code', 'medical_specialty', 'max_glu_serum', 'A1Cresult']
df = df.drop(columns=columns_to_drop)
df = df.dropna()
missing_values = round(100 * df.isnull().sum() / df.shape[0], 1)
missing_values
def chi_square(df, output, significance_error):
for col in df.columns:
data_crosstab = pd.crosstab(df[col], df[output], margins=True, margins_name='Total')
chi_square = 0
rows = df[col].unique()
columns = df[output].unique()
for i in columns:
for j in rows:
O = data_crosstab[i][j]
E = data_crosstab[i]['Total'] * data_crosstab['Total'][j] / data_crosstab['Total']['Total']
chi_square += (O - E) ** 2 / E
p_value = stat.chi2.sf(chi_square, (len(rows) - 1) * (len(columns) - 1))
dicision = 'rejected' if p_value <= significance_error else 'failed to reject'
chi_square(df, 'readmitted', 0.001)
numerical_features = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
n_df = df.loc[:, numerical_features].copy()
label_df = df.iloc[:, -1].copy()
id_df = df.iloc[:, :2].copy()
df = df.drop(columns=numerical_features + list(id_df.columns))
df = df.drop(columns='readmitted')
from sklearn.ensemble import RandomForestClassifier as RC
def feature_importance(df, col):
test = pd.get_dummies(df[col])
test = pd.concat([test, df['readmitted']], axis=1)
model = RC(n_estimators=100)
x = test.iloc[:, :-1]
y = test.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
df2 = pd.concat([df, n_df, label_df], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in list(df2.keys()):
df2[i] = le.fit_transform(df2[i])
model = RC(n_estimators=100)
x = df2.iloc[:, :-1]
y = df2.iloc[:, -1]
model.fit(x, y)
importance = model.feature_importances_
columns = list(df2.columns[:-1])
pair = []
[pair.append((i, j)) for i, j in zip(importance, columns)]
sorted_pair = sorted(pair)
x = []
y = []
for i in sorted_pair:
x.append(i[0])
y.append(i[1])
plt.barh(y, x)
under_failed = y[:21]
df = df.drop(columns=under_failed)
df3 = pd.concat([id_df, df, label_df], axis=1)
chi_square(df3, 'readmitted', 0.001)
|
code
|
122249728/cell_3
|
[
"text_plain_output_1.png"
] |
import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
print('The shape of the dataset is {}.\n\n'.format(df.shape))
df.head()
|
code
|
122249728/cell_5
|
[
"text_plain_output_1.png"
] |
import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
columns_to_drop = ['weight', 'payer_code', 'medical_specialty', 'max_glu_serum', 'A1Cresult']
df = df.drop(columns=columns_to_drop)
df = df.dropna()
missing_values = round(100 * df.isnull().sum() / df.shape[0], 1)
missing_values
|
code
|
105191527/cell_42
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
display(condiment_nutrition_facts)
print(condiment_nutrition_facts.shape)
|
code
|
105191527/cell_21
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
india_nutrition_facts[india_nutrition_facts['Trans fat (g)'] > 10]
|
code
|
105191527/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
india_nutrition_facts.info()
|
code
|
105191527/cell_34
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
df_veg_or_chicken = india_nutrition_facts[india_nutrition_facts['Menu Items'].str.contains('Veg|Chicken')]
chicken_list_index = [104, 109, 111]
veg_list_index = [105, 110, 112]
def highlight_color(s):
if s.name in chicken_list_index:
return ['background-color: #FFC72C'] * 13
elif s.name in veg_list_index:
return ['background-color: #9BEB34'] * 13
else:
return [''] * 13
print('The number of rows: ', df_veg_or_chicken.shape[0])
df_veg_or_chicken.tail(10).style.apply(highlight_color, axis=1)
|
code
|
105191527/cell_23
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
|
code
|
105191527/cell_30
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()]
|
code
|
105191527/cell_40
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
display(food_nutrition_facts.head())
print(food_nutrition_facts.shape)
|
code
|
105191527/cell_26
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
df_check_ratio = india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')]
df_check_ratio = df_check_ratio.drop(23)
df_check_ratio['Per Serve Size'] = df_check_ratio['Per Serve Size'].str.replace(' g', '').astype(int)
df_check_ratio.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
columns = df_check_ratio.columns.tolist()
for c in ['Menu Category', 'Menu Items', 'Added Sugars (g)']:
columns.remove(c)
for c in columns:
df_check_ratio.loc['Ratio', c] = df_check_ratio.loc[25, c] / df_check_ratio.loc[24, c]
fig, ax = plt.subplots(figsize=(15, 6))
plt.rcParams.update({'font.size': 13})
series_check_ratio = df_check_ratio.loc['Ratio', :].dropna()
series_check_ratio = series_check_ratio.apply(lambda x: np.log(x))
series_check_ratio = series_check_ratio.sort_values(ascending=False)
colors = []
for i in range(10):
if i == 0:
colors.append('#DA291C')
elif i >= 7:
colors.append('#1C75DA')
else:
colors.append('lightgray')
ax = sns.barplot(x=series_check_ratio, y=series_check_ratio.index, edgecolor='black', orient='hor', palette=colors)
sns.despine(top=True, right=True, left=True)
ax.tick_params(left=False)
plt.title('The Ratio of Nutrition Contents', fontsize=15, loc='left', y=1.05)
plt.tight_layout()
plt.show()
|
code
|
105191527/cell_48
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
drink_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('ml')]
drink_nutrition_facts['Per Serve Size'] = drink_nutrition_facts['Per Serve Size'].str.replace(' ml', '').astype(float)
drink_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (ml)'}, inplace=True)
def add_daily_value_features(df, cal):
nutrition_daily_value = ['Energy (kCal)', 'Protein (g)', 'Total fat (g)', 'Sat Fat (g)', 'Trans fat (g)', 'Total carbohydrate (g)', 'Added Sugars (g)']
max_dv_percentage = [None, 20, 30, 10, 1, 50, 2.5]
for i in range(len(nutrition_daily_value)):
if 'Energy' in nutrition_daily_value[i]:
nutrition_dv_name = nutrition_daily_value[i].replace('(kCal)', '')
else:
nutrition_dv_name = nutrition_daily_value[i].replace('(g)', '')
nutrition_dv_name = nutrition_dv_name + '(% Daily Value) - ' + str(cal) + ' kCal'
value = 0.0
if 'Energy' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / cal * 100
elif 'Protein' in nutrition_dv_name or 'carbohydrate' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 4 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Fat' in nutrition_dv_name or 'fat' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 9 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Sugars' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / (cal * max_dv_percentage[i] / 100) * 100
df[nutrition_dv_name] = round(value, 2)
df['Cholesterols (% Daily Value)'] = round(df['Cholesterols (mg)'] / 300 * 100, 2)
df['Sodium (% Daily Value)'] = round(df['Sodium (mg)'] / 2300 * 100, 2)
return df
food_nutrition_facts = add_daily_value_features(food_nutrition_facts, 1500)
drink_nutrition_facts = add_daily_value_features(drink_nutrition_facts, 1500)
condiment_nutrition_facts = add_daily_value_features(condiment_nutrition_facts, 1500)
food_nutrition_facts.head()
|
code
|
105191527/cell_41
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
drink_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('ml')]
drink_nutrition_facts['Per Serve Size'] = drink_nutrition_facts['Per Serve Size'].str.replace(' ml', '').astype(float)
drink_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (ml)'}, inplace=True)
display(drink_nutrition_facts.head())
print(drink_nutrition_facts.shape)
|
code
|
105191527/cell_19
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15, 8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove('Trans fat (g)')
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3, 5), (i // 3, i % 3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient='h', color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62, 0.232, 0.38, 0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts['Trans fat (g)'], orient='h', color='#DA291C')
plt.title('Trans fat (g)', fontweight='bold')
plt.suptitle('Distribution of the Content of Each Nutrition', fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
|
code
|
105191527/cell_50
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
drink_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('ml')]
drink_nutrition_facts['Per Serve Size'] = drink_nutrition_facts['Per Serve Size'].str.replace(' ml', '').astype(float)
drink_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (ml)'}, inplace=True)
def add_daily_value_features(df, cal):
nutrition_daily_value = ['Energy (kCal)', 'Protein (g)', 'Total fat (g)', 'Sat Fat (g)', 'Trans fat (g)', 'Total carbohydrate (g)', 'Added Sugars (g)']
max_dv_percentage = [None, 20, 30, 10, 1, 50, 2.5]
for i in range(len(nutrition_daily_value)):
if 'Energy' in nutrition_daily_value[i]:
nutrition_dv_name = nutrition_daily_value[i].replace('(kCal)', '')
else:
nutrition_dv_name = nutrition_daily_value[i].replace('(g)', '')
nutrition_dv_name = nutrition_dv_name + '(% Daily Value) - ' + str(cal) + ' kCal'
value = 0.0
if 'Energy' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / cal * 100
elif 'Protein' in nutrition_dv_name or 'carbohydrate' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 4 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Fat' in nutrition_dv_name or 'fat' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 9 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Sugars' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / (cal * max_dv_percentage[i] / 100) * 100
df[nutrition_dv_name] = round(value, 2)
df['Cholesterols (% Daily Value)'] = round(df['Cholesterols (mg)'] / 300 * 100, 2)
df['Sodium (% Daily Value)'] = round(df['Sodium (mg)'] / 2300 * 100, 2)
return df
food_nutrition_facts = add_daily_value_features(food_nutrition_facts, 1500)
drink_nutrition_facts = add_daily_value_features(drink_nutrition_facts, 1500)
condiment_nutrition_facts = add_daily_value_features(condiment_nutrition_facts, 1500)
condiment_nutrition_facts.head(10)
|
code
|
105191527/cell_52
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
df_check_ratio = india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')]
df_check_ratio = df_check_ratio.drop(23)
df_check_ratio['Per Serve Size'] = df_check_ratio['Per Serve Size'].str.replace(' g', '').astype(int)
df_check_ratio.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
columns = df_check_ratio.columns.tolist()
for c in ['Menu Category', 'Menu Items', 'Added Sugars (g)']:
columns.remove(c)
for c in columns:
df_check_ratio.loc['Ratio', c] = df_check_ratio.loc[25, c] / df_check_ratio.loc[24, c]
fig, ax = plt.subplots(figsize=(15,6))
plt.rcParams.update({'font.size': 13})
series_check_ratio = df_check_ratio.loc["Ratio", :].dropna()
series_check_ratio = series_check_ratio.apply(lambda x: np.log(x))
series_check_ratio = series_check_ratio.sort_values(ascending=False)
colors = []
for i in range(10):
if(i==0):
colors.append("#DA291C")
elif(i>=7):
colors.append("#1C75DA")
else:
colors.append("lightgray")
ax = sns.barplot(x=series_check_ratio, y=series_check_ratio.index, edgecolor='black', orient='hor', palette=colors)
sns.despine(top=True, right=True, left=True)
ax.tick_params(left=False)
plt.title("The Ratio of Nutrition Contents", fontsize=15, loc='left', y=1.05)
plt.tight_layout()
plt.show()
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
drink_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('ml')]
drink_nutrition_facts['Per Serve Size'] = drink_nutrition_facts['Per Serve Size'].str.replace(' ml', '').astype(float)
drink_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (ml)'}, inplace=True)
def add_daily_value_features(df, cal):
nutrition_daily_value = ['Energy (kCal)', 'Protein (g)', 'Total fat (g)', 'Sat Fat (g)', 'Trans fat (g)', 'Total carbohydrate (g)', 'Added Sugars (g)']
max_dv_percentage = [None, 20, 30, 10, 1, 50, 2.5]
for i in range(len(nutrition_daily_value)):
if 'Energy' in nutrition_daily_value[i]:
nutrition_dv_name = nutrition_daily_value[i].replace('(kCal)', '')
else:
nutrition_dv_name = nutrition_daily_value[i].replace('(g)', '')
nutrition_dv_name = nutrition_dv_name + '(% Daily Value) - ' + str(cal) + ' kCal'
value = 0.0
if 'Energy' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / cal * 100
elif 'Protein' in nutrition_dv_name or 'carbohydrate' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 4 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Fat' in nutrition_dv_name or 'fat' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 9 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Sugars' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / (cal * max_dv_percentage[i] / 100) * 100
df[nutrition_dv_name] = round(value, 2)
df['Cholesterols (% Daily Value)'] = round(df['Cholesterols (mg)'] / 300 * 100, 2)
df['Sodium (% Daily Value)'] = round(df['Sodium (mg)'] / 2300 * 100, 2)
return df
food_nutrition_facts = add_daily_value_features(food_nutrition_facts, 1500)
drink_nutrition_facts = add_daily_value_features(drink_nutrition_facts, 1500)
condiment_nutrition_facts = add_daily_value_features(condiment_nutrition_facts, 1500)
def clean_dv_features(features):
clean_features = [x.replace('% Daily Value', '') for x in features]
clean_features = [x.replace(' - 1500 kCal', '').strip(' ()') for x in clean_features]
print(clean_features)
return clean_features
df_food = food_nutrition_facts.copy()
df_drink = drink_nutrition_facts.copy()
df_condiment = condiment_nutrition_facts.copy()
df_food = df_food.drop('Per Serve Size (g)', axis=1)
df_drink = df_drink.drop('Per Serve Size (ml)', axis=1)
df_condiment = df_condiment.drop('Per Serve Size (g)', axis=1)
daily_value_features = [x for x in df_food.columns.tolist() if 'Daily Value' in x]
clean_daily_value_features = clean_dv_features(daily_value_features)
required_nutritions = ['Energy (% Daily Value) - 1500 kCal', 'Protein (% Daily Value) - 1500 kCal', 'Total fat (% Daily Value) - 1500 kCal', 'Total carbohydrate (% Daily Value) - 1500 kCal']
clean_required_nutritions = clean_dv_features(required_nutritions)
restricted_nutritions = ['Sat Fat (% Daily Value) - 1500 kCal', 'Trans fat (% Daily Value) - 1500 kCal', 'Added Sugars (% Daily Value) - 1500 kCal', 'Cholesterols (% Daily Value)', 'Sodium (% Daily Value)']
clean_restricted_nutritions = clean_dv_features(restricted_nutritions)
num_features = df_food.select_dtypes(include=np.number).columns.tolist()
|
code
|
105191527/cell_49
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
food_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('g')]
food_nutrition_facts['Per Serve Size'] = food_nutrition_facts['Per Serve Size'].str.replace(' g', '').astype(float)
food_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (g)'}, inplace=True)
condiment_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] == 'Condiments Menu']
food_nutrition_facts = food_nutrition_facts[food_nutrition_facts['Menu Category'] != 'Condiments Menu']
drink_nutrition_facts = india_nutrition_facts[india_nutrition_facts['Per Serve Size'].str.endswith('ml')]
drink_nutrition_facts['Per Serve Size'] = drink_nutrition_facts['Per Serve Size'].str.replace(' ml', '').astype(float)
drink_nutrition_facts.rename(columns={'Per Serve Size': 'Per Serve Size (ml)'}, inplace=True)
def add_daily_value_features(df, cal):
nutrition_daily_value = ['Energy (kCal)', 'Protein (g)', 'Total fat (g)', 'Sat Fat (g)', 'Trans fat (g)', 'Total carbohydrate (g)', 'Added Sugars (g)']
max_dv_percentage = [None, 20, 30, 10, 1, 50, 2.5]
for i in range(len(nutrition_daily_value)):
if 'Energy' in nutrition_daily_value[i]:
nutrition_dv_name = nutrition_daily_value[i].replace('(kCal)', '')
else:
nutrition_dv_name = nutrition_daily_value[i].replace('(g)', '')
nutrition_dv_name = nutrition_dv_name + '(% Daily Value) - ' + str(cal) + ' kCal'
value = 0.0
if 'Energy' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / cal * 100
elif 'Protein' in nutrition_dv_name or 'carbohydrate' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 4 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Fat' in nutrition_dv_name or 'fat' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] * 9 / (cal * max_dv_percentage[i] / 100) * 100
elif 'Sugars' in nutrition_dv_name:
value = df[nutrition_daily_value[i]] / (cal * max_dv_percentage[i] / 100) * 100
df[nutrition_dv_name] = round(value, 2)
df['Cholesterols (% Daily Value)'] = round(df['Cholesterols (mg)'] / 300 * 100, 2)
df['Sodium (% Daily Value)'] = round(df['Sodium (mg)'] / 2300 * 100, 2)
return df
food_nutrition_facts = add_daily_value_features(food_nutrition_facts, 1500)
drink_nutrition_facts = add_daily_value_features(drink_nutrition_facts, 1500)
condiment_nutrition_facts = add_daily_value_features(condiment_nutrition_facts, 1500)
drink_nutrition_facts.head()
|
code
|
105191527/cell_32
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.startswith('Piri piri Mc Spicy')]
|
code
|
105191527/cell_28
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
|
code
|
105191527/cell_14
|
[
"text_html_output_1.png"
] |
import pandas as pd
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
|
code
|
105191527/cell_37
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
def highlight_color(s):
return ['background-color: #FFC72C'] * 8 if s.name == 'Trans fat (g)' else [''] * 8
india_nutrition_facts.describe().style.apply(highlight_color, axis=0)
fig, axs = plt.subplots(figsize=(15,8))
plt.rcParams.update({'font.size': 9.5})
ax = [None for _ in range(10)]
df_num_features = india_nutrition_facts.select_dtypes(include=[np.number])
num_features = df_num_features.columns.tolist()
num_features.remove("Trans fat (g)")
for i in range(len(num_features)):
ax[i] = plt.subplot2grid((3,5), (i//3,i%3), colspan=1)
ax[i] = sns.violinplot(data=india_nutrition_facts[num_features[i]], orient="h", color='#FFC72C')
plt.title(num_features[i])
ax[9] = fig.add_axes([0.62,0.232,0.38,0.5])
ax[9] = sns.violinplot(data=india_nutrition_facts["Trans fat (g)"], orient="h", color='#DA291C')
plt.title("Trans fat (g)", fontweight="bold")
plt.suptitle("Distribution of the Content of Each Nutrition", fontsize=15)
plt.tight_layout()
plt.show()
del df_num_features
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
for c in ['Trans fat (g)', 'Total carbohydrate (g)', 'Cholesterols (mg)', 'Sat Fat (g)']:
india_nutrition_facts.loc[25, c] = india_nutrition_facts.loc[24, c] * 5 / 3
def highlight_color(s):
return ['background-color: #FFC72C'] * 13 if s.name == 25 else [''] * 13
india_nutrition_facts[india_nutrition_facts['Menu Items'].str.endswith('Chicken Strips')].style.apply(highlight_color, axis=1)
index = india_nutrition_facts[india_nutrition_facts['Sodium (mg)'].isna()].index.tolist()[0]
india_nutrition_facts.loc[index, 'Sodium (mg)'] = 1370.89
india_nutrition_facts.info()
|
code
|
105191527/cell_12
|
[
"text_html_output_1.png"
] |
import pandas as pd
india_nutrition_facts = pd.read_csv('../input/mcdonalds-india-menu-nutrition-facts/India_Menu.csv')
india_nutrition_facts.head()
|
code
|
16137293/cell_11
|
[
"text_html_output_1.png"
] |
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import gc
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv')
y = data.deal_probability.copy()
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=23)
del data
gc.collect()
predictors = ['num_desc_punct', 'num_words_title', 'words_vs_unique_title', 'num_unique_words_title', 'words_vs_unique_description', 'num_unique_words_description', 'num_words_description', 'price', 'item_seq_number', 'Day of Month', 'weekday']
categorical = ['image_top_1', 'param_1', 'param_2', 'param_3', 'city', 'region', 'category_name', 'parent_category_name', 'user_type']
predictors = predictors + categorical
for feature in categorical:
print(f'Transforming {feature}...')
encoder = LabelEncoder()
X_train[feature].fillna('unknown', inplace=True)
X_test[feature].fillna('unknown', inplace=True)
encoder.fit(X_train[feature].append(X_test[feature]).astype(str))
X_train[feature] = encoder.transform(X_train[feature].astype(str))
X_test[feature] = encoder.transform(X_test[feature].astype(str))
|
code
|
16137293/cell_18
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
import gc
import lightgbm as lgb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy
data = pd.read_csv('../input/train.csv')
y = data.deal_probability.copy()
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=23)
del data
gc.collect()
count_vectorizer_title = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=8000)
title_counts = count_vectorizer_title.fit_transform(X_train['title'].append(X_test['title']))
train_title_counts = title_counts[:len(X_train)]
test_title_counts = title_counts[len(X_train):]
count_vectorizer_desc = TfidfVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=17000)
desc_counts = count_vectorizer_desc.fit_transform(X_train['description'].append(X_test['description']))
train_desc_counts = desc_counts[:len(X_train)]
test_desc_counts = desc_counts[len(X_train):]
(train_title_counts.shape, train_desc_counts.shape)
predictors = ['num_desc_punct', 'num_words_title', 'words_vs_unique_title', 'num_unique_words_title', 'words_vs_unique_description', 'num_unique_words_description', 'num_words_description', 'price', 'item_seq_number', 'Day of Month', 'weekday']
categorical = ['image_top_1', 'param_1', 'param_2', 'param_3', 'city', 'region', 'category_name', 'parent_category_name', 'user_type']
predictors = predictors + categorical
X_train['price'] = np.log(X_train['price'] + 0.001)
X_train['price'].fillna(-1, inplace=True)
X_train['image_top_1'].fillna(-1, inplace=True)
X_test['price'] = np.log(X_test['price'] + 0.001)
X_test['price'].fillna(-1, inplace=True)
X_test['image_top_1'].fillna(-1, inplace=True)
feature_names = np.hstack([count_vectorizer_desc.get_feature_names(), count_vectorizer_title.get_feature_names(), predictors])
test = scipy.sparse.hstack([test_desc_counts, test_title_counts, X_test.loc[:, predictors]], format='csr')
train = scipy.sparse.hstack([train_desc_counts, train_title_counts, X_train.loc[:, predictors]], format='csr')
import lightgbm as lgb
lgbm_params = {'objective': 'regression', 'metric': 'rmse', 'num_leaves': 300, 'learning_rate': 0.02, 'feature_fraction': 0.6, 'bagging_fraction': 0.8, 'verbosity': -1}
lgtrain = lgb.Dataset(train, y_train, feature_name=list(feature_names), categorical_feature=categorical)
lgvalid = lgb.Dataset(test, y_test, feature_name=list(feature_names), categorical_feature=categorical)
lgb_clf = lgb.train(lgbm_params, lgtrain, num_boost_round=5000, valid_sets=[lgtrain, lgvalid], valid_names=['train', 'valid'], early_stopping_rounds=50, verbose_eval=100)
print('Model Evaluation Stage')
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, lgb_clf.predict(test))))
|
code
|
16137293/cell_8
|
[
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] |
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
import gc
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv')
y = data.deal_probability.copy()
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=23)
del data
gc.collect()
count_vectorizer_title = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=8000)
title_counts = count_vectorizer_title.fit_transform(X_train['title'].append(X_test['title']))
train_title_counts = title_counts[:len(X_train)]
test_title_counts = title_counts[len(X_train):]
count_vectorizer_desc = TfidfVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=17000)
desc_counts = count_vectorizer_desc.fit_transform(X_train['description'].append(X_test['description']))
train_desc_counts = desc_counts[:len(X_train)]
test_desc_counts = desc_counts[len(X_train):]
(train_title_counts.shape, train_desc_counts.shape)
|
code
|
16137293/cell_15
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv')
y = data.deal_probability.copy()
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=23)
del data
gc.collect()
count_vectorizer_title = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=8000)
title_counts = count_vectorizer_title.fit_transform(X_train['title'].append(X_test['title']))
train_title_counts = title_counts[:len(X_train)]
test_title_counts = title_counts[len(X_train):]
count_vectorizer_desc = TfidfVectorizer(stop_words=stopwords.words('russian'), lowercase=True, ngram_range=(1, 2), max_features=17000)
desc_counts = count_vectorizer_desc.fit_transform(X_train['description'].append(X_test['description']))
train_desc_counts = desc_counts[:len(X_train)]
test_desc_counts = desc_counts[len(X_train):]
(train_title_counts.shape, train_desc_counts.shape)
predictors = ['num_desc_punct', 'num_words_title', 'words_vs_unique_title', 'num_unique_words_title', 'words_vs_unique_description', 'num_unique_words_description', 'num_words_description', 'price', 'item_seq_number', 'Day of Month', 'weekday']
categorical = ['image_top_1', 'param_1', 'param_2', 'param_3', 'city', 'region', 'category_name', 'parent_category_name', 'user_type']
predictors = predictors + categorical
X_train['price'] = np.log(X_train['price'] + 0.001)
X_train['price'].fillna(-1, inplace=True)
X_train['image_top_1'].fillna(-1, inplace=True)
X_test['price'] = np.log(X_test['price'] + 0.001)
X_test['price'].fillna(-1, inplace=True)
X_test['image_top_1'].fillna(-1, inplace=True)
feature_names = np.hstack([count_vectorizer_desc.get_feature_names(), count_vectorizer_title.get_feature_names(), predictors])
print('Number of features:', len(feature_names))
|
code
|
16137293/cell_3
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
import gc
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv')
y = data.deal_probability.copy()
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=23)
del data
gc.collect()
X_train.head()
|
code
|
1010626/cell_6
|
[
"image_output_2.png",
"image_output_1.png"
] |
from glob import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from glob import glob
basepath = '../input/train/'
all_cervix_images = []
for path in glob(basepath + '*'):
cervix_type = path.split('/')[-1]
cervix_images = glob(basepath + cervix_type + '/*')
all_cervix_images = all_cervix_images + cervix_images
all_cervix_images = pd.DataFrame({'imagepath': all_cervix_images})
all_cervix_images['filetype'] = all_cervix_images.apply(lambda row: row.imagepath.split('.')[-1], axis=1)
all_cervix_images['type'] = all_cervix_images.apply(lambda row: row.imagepath.split('/')[-2], axis=1)
i = 1
fig = plt.figure(figsize=(12, 8))
for t in all_cervix_images['type'].unique()[:1]:
ax = fig.add_subplot(1, 3, i)
f = all_cervix_images[all_cervix_images['type'] == t]['imagepath'].values[0]
img = cv2.imread(f)
Z = img.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
'\n Right now the mask has an either in or out policy \n '
K = 8
ret, label, center = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape(img.shape)
plt.imshow(res2, cmap='gray')
plt.show()
plt.title('sample for cervix {}'.format(t))
"\n screen_res = 1280, 720\n scale_width = screen_res[0] / img.shape[1]\n scale_height = screen_res[1] / img.shape[0]\n scale = min(scale_width, scale_height)\n window_width = int(img.shape[1] * scale)\n window_height = int(img.shape[0] * scale)\n cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('dst_rt', window_width, window_height)\n cv2.imshow('dst_rt', res2)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n "
|
code
|
1010626/cell_3
|
[
"text_plain_output_1.png"
] |
from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from skimage.io import imread, imshow
import cv2
from subprocess import check_output
print(check_output(['ls', '../input/train']).decode('utf8'))
|
code
|
1010626/cell_5
|
[
"text_html_output_1.png"
] |
from glob import glob
import pandas as pd
from glob import glob
basepath = '../input/train/'
all_cervix_images = []
for path in glob(basepath + '*'):
cervix_type = path.split('/')[-1]
cervix_images = glob(basepath + cervix_type + '/*')
all_cervix_images = all_cervix_images + cervix_images
all_cervix_images = pd.DataFrame({'imagepath': all_cervix_images})
all_cervix_images['filetype'] = all_cervix_images.apply(lambda row: row.imagepath.split('.')[-1], axis=1)
all_cervix_images['type'] = all_cervix_images.apply(lambda row: row.imagepath.split('/')[-2], axis=1)
all_cervix_images.head()
|
code
|
72075238/cell_25
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
numeric_column_names = non_numeric_train.columns.tolist()
non_numeric_train.OverallQual.unique()
|
code
|
72075238/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
test_dataset.tail()
|
code
|
72075238/cell_23
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
numeric_column_names = non_numeric_train.columns.tolist()
correlation = []
for item in numeric_column_names:
correlation.append(non_numeric_train[item].corr(non_numeric_train['SalePrice']))
correlation_list_df = pd.DataFrame({'column': numeric_column_names, 'correlation': correlation})
correlation_list_df = correlation_list_df.sort_values(by='correlation', ascending=False)
print(correlation_list_df)
|
code
|
72075238/cell_6
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
|
code
|
72075238/cell_29
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
numeric_column_names = non_numeric_train.columns.tolist()
correlation = []
for item in numeric_column_names:
correlation.append(non_numeric_train[item].corr(non_numeric_train['SalePrice']))
correlation_list_df = pd.DataFrame({'column': numeric_column_names, 'correlation': correlation})
correlation_list_df = correlation_list_df.sort_values(by='correlation', ascending=False)
plt.xticks(rotation=90)
non_numeric_train.OverallQual.unique()
quality_pivot = non_numeric_train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median)
quality_pivot.plot(kind='bar', color='green')
plt.xlabel('Overall Quality')
plt.ylabel('Median')
plt.xticks(rotation=0)
plt.show()
|
code
|
72075238/cell_39
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
numeric_column_names = non_numeric_train.columns.tolist()
correlation = []
for item in numeric_column_names:
correlation.append(non_numeric_train[item].corr(non_numeric_train['SalePrice']))
correlation_list_df = pd.DataFrame({'column': numeric_column_names, 'correlation': correlation})
correlation_list_df = correlation_list_df.sort_values(by='correlation', ascending=False)
submission = pd.DataFrame()
submission['Id'] = test_dataset.Id
submission.head()
|
code
|
72075238/cell_19
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.subplots(figsize=(19, 4))
sns.barplot(x=non_numeric_train['OverallQual'], y=non_numeric_train['SalePrice'])
plt.xticks(rotation=90)
plt.show()
|
code
|
72075238/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
sns.displot(train_dataset['SalePrice'])
|
code
|
72075238/cell_15
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.xticks(rotation=90)
plt.subplots(figsize=(19, 4))
sns.barplot(x=non_numeric_train['SaleType'], y=non_numeric_train['SalePrice'])
plt.xticks(rotation=90)
plt.show()
|
code
|
72075238/cell_3
|
[
"image_output_1.png"
] |
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.head()
|
code
|
72075238/cell_17
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.subplots(figsize=(19, 4))
sns.barplot(x=train_dataset['Neighborhood'], y=non_numeric_train['SalePrice'])
plt.xticks(rotation=90)
plt.show()
|
code
|
72075238/cell_35
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
clf = LinearRegression(fit_intercept=False, normalize=False, n_jobs=-1)
model = clf.fit(X_train, y_train)
print(model.score(X_test, y_test))
|
code
|
72075238/cell_24
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
numeric_column_names = non_numeric_train.columns.tolist()
correlation = []
for item in numeric_column_names:
correlation.append(non_numeric_train[item].corr(non_numeric_train['SalePrice']))
correlation_list_df = pd.DataFrame({'column': numeric_column_names, 'correlation': correlation})
correlation_list_df = correlation_list_df.sort_values(by='correlation', ascending=False)
plt.subplots(figsize=(19, 4))
sns.barplot(x=correlation_list_df['column'], y=correlation_list_df['correlation'])
plt.xticks(rotation=90)
plt.ylabel('Correlation', fontsize=13)
plt.xlabel('Columns', fontsize=13)
plt.title('Correlation of numeric columns with SalePrice')
plt.show()
|
code
|
72075238/cell_14
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
plt.subplots(figsize=(19, 4))
sns.barplot(x=non_numeric_train['YrSold'], y=non_numeric_train['SalePrice'])
plt.xticks(rotation=90)
plt.show()
|
code
|
72075238/cell_27
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
numeric_column_names = non_numeric_train.columns.tolist()
non_numeric_train.OverallQual.unique()
quality_pivot = non_numeric_train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median)
quality_pivot
|
code
|
72075238/cell_37
|
[
"text_html_output_1.png"
] |
from sklearn.linear_model import LinearRegression
clf = LinearRegression(fit_intercept=False, normalize=False, n_jobs=-1)
model = clf.fit(X_train, y_train)
result = model.predict(X_test)
result.shape
|
code
|
72075238/cell_12
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test_dataset = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train_dataset.fillna(0, inplace=True)
test_dataset.fillna(0, inplace=True)
train_dataset.isnull().sum()
copy_train_dataset = train_dataset.copy()
def non_numeric_data(df):
columns = df.columns.values
for column in columns:
text_digit = {}
def convert_to_int(key):
return text_digit[key]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_content = df[column].values.tolist()
unique_elements = set(column_content)
x = 0
for unique in unique_elements:
if unique not in text_digit:
text_digit[unique] = x
x = x + 1
df[column] = list(map(convert_to_int, df[column]))
return df
non_numeric_train = non_numeric_data(copy_train_dataset)
non_numeric_train.head()
|
code
|
88083684/cell_9
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp['language'].nunique()
|
code
|
88083684/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
df['id']
|
code
|
88083684/cell_2
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
|
code
|
88083684/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp
|
code
|
88083684/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
88083684/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp['language']
|
code
|
88083684/cell_3
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
|
code
|
88083684/cell_14
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp.columns
out = temp[temp['language'] == 'telugu']
out
|
code
|
88083684/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp['language'].value_counts()
|
code
|
88083684/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
temp = df.copy()
temp['language'] = [i.split('-')[0] for i in df['id']]
temp.columns
|
code
|
88083684/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tydiqa-bengali-telugu/tydiqa_secondary.csv')
df
df.columns
df['id'][100].split('-')[0]
|
code
|
88081459/cell_21
|
[
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import plot_confusion_matrix
tfidf_vec = TfidfVectorizer().fit(X_train)
X_train_vec, X_test_vec = (tfidf_vec.transform(X_train), tfidf_vec.transform(X_test))
model = MultinomialNB()
model.fit(X_train_vec, y_train)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
model = RandomForestClassifier()
model.fit(X_train_vec, y_train)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train_vec, y_train)
print(classification_report(y_test, model.predict(X_test_vec)))
plot_confusion_matrix(model, X_test_vec, y_test)
|
code
|
88081459/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
spam_len.mean()
sns.countplot(spam_len)
|
code
|
88081459/cell_9
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
print('Average Length of a text is', round(sms['Length'].mean()))
print('Standard deviation of length is', round(sms['Length'].std()))
|
code
|
88081459/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
sms.head()
|
code
|
88081459/cell_20
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import plot_confusion_matrix
tfidf_vec = TfidfVectorizer().fit(X_train)
X_train_vec, X_test_vec = (tfidf_vec.transform(X_train), tfidf_vec.transform(X_test))
model = MultinomialNB()
model.fit(X_train_vec, y_train)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
model = RandomForestClassifier()
model.fit(X_train_vec, y_train)
print(classification_report(y_test, model.predict(X_test_vec)))
plot_confusion_matrix(model, X_test_vec, y_test)
|
code
|
88081459/cell_6
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
sns.countplot(sms['label'], palette=sns.color_palette('Set2'))
|
code
|
88081459/cell_2
|
[
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms.head()
|
code
|
88081459/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
spam_len.mean()
|
code
|
88081459/cell_19
|
[
"text_plain_output_1.png"
] |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import plot_confusion_matrix
tfidf_vec = TfidfVectorizer().fit(X_train)
X_train_vec, X_test_vec = (tfidf_vec.transform(X_train), tfidf_vec.transform(X_test))
model = MultinomialNB()
model.fit(X_train_vec, y_train)
print(classification_report(y_test, model.predict(X_test_vec)))
plot_confusion_matrix(model, X_test_vec, y_test)
|
code
|
88081459/cell_7
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
sms['label'].value_counts()
|
code
|
88081459/cell_18
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
from sklearn.model_selection import train_test_split
X, y = (np.asanyarray(sms['text']), np.asanyarray(sms['label_num']))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=24)
(len(X_train), len(X_test))
|
code
|
88081459/cell_8
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
sns.countplot(sms['Length'], palette=sns.color_palette('Set2'))
|
code
|
88081459/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
spam_len.mean()
ham_len.mean()
sns.countplot(ham_len)
|
code
|
88081459/cell_16
|
[
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
ham_len.mean()
print('Average Length of a text of ham mail is', round(ham_len.mean()))
print('Standard deviation of length of ham mail is', round(ham_len.std()))
|
code
|
88081459/cell_3
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms.head()
|
code
|
88081459/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
def remove_punc(test_str):
res = test_str.translate(str.maketrans(' ', ' ', string.punctuation))
return res
sms['text'] = sms['text'].apply(remove_punc)
sms['text']
|
code
|
88081459/cell_14
|
[
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
spam_len.mean()
print('Average Length of a text of spam mail is', round(spam_len.mean()))
print('Standard deviation of length of spam mail is', round(spam_len.std()))
|
code
|
88081459/cell_12
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sms = pd.read_csv('/kaggle/input/sms-spam-collection-dataset/spam.csv', encoding='latin-1')
sms = sms.rename(columns={'v1': 'label', 'v2': 'text'})
sms.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
sms['label_num'] = sms['label'].map({'ham': 0, 'spam': 1})
sms['Length'] = sms['text'].apply(len)
spam_len = sms.loc[sms['label_num'] == 1, 'Length']
ham_len = sms.loc[sms['label_num'] == 0, 'Length']
ham_len.mean()
|
code
|
105197650/cell_13
|
[
"text_html_output_1.png"
] |
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
test_data['Monthly beer production'].plot(figsize=(16, 5), legend=True)
arima_pred.plot(legend=True)
|
code
|
105197650/cell_6
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.figure(figsize=(18, 9))
plt.plot(df.index, df['Monthly beer production'], linestyle='-')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
plt.show()
|
code
|
105197650/cell_2
|
[
"image_output_1.png"
] |
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
data.head()
|
code
|
105197650/cell_11
|
[
"text_html_output_1.png"
] |
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
|
code
|
105197650/cell_19
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
test_data
|
code
|
105197650/cell_1
|
[
"text_plain_output_1.png"
] |
import pandas as pd
from pandas.plotting import autocorrelation_plot
from pandas import DataFrame
from pandas import concat
import numpy as np
from math import sqrt
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from statsmodels.tsa.arima_model import ARIMA
from scipy.stats import boxcox
import seaborn as sns
sns.set_style('whitegrid')
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
from matplotlib import colors
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
105197650/cell_18
|
[
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] |
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
test_data['ARIMA_Predictions'] = arima_pred
|
code
|
105197650/cell_8
|
[
"text_html_output_1.png"
] |
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
plt.figure(figsize=(25, 5))
a = seasonal_decompose(df['Monthly beer production'], model='add')
plt.subplot(1, 3, 1)
plt.plot(a.seasonal)
plt.subplot(1, 3, 2)
plt.plot(a.trend)
plt.subplot(1, 3, 3)
plt.plot(a.resid)
plt.show()
|
code
|
105197650/cell_15
|
[
"image_output_1.png"
] |
from statsmodels.tools.eval_measures import rmse
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
arima_rmse_error = rmse(test_data['Monthly beer production'], arima_pred)
arima_mse_error = arima_rmse_error ** 2
mean_value = df['Monthly beer production'].mean()
print(f'MSE Error: {arima_mse_error}\nRMSE Error: {arima_rmse_error}\nMean: {mean_value}')
|
code
|
105197650/cell_3
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.head()
|
code
|
105197650/cell_17
|
[
"image_output_1.png"
] |
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
a = seasonal_decompose(df['Monthly beer production'], model='add')
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
plt.figure(figsize=(10, 6))
plt.plot(test_data, label='true values', color='blue')
plt.plot(arima_pred, label='forecasts', color='orange')
plt.title('ARIMA Model', size=14)
plt.legend(loc='upper left')
plt.show()
|
code
|
105197650/cell_12
|
[
"text_html_output_1.png"
] |
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
|
code
|
105197650/cell_5
|
[
"image_output_1.png"
] |
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/Electric_Production.csv')
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
df.head()
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.