path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106198328/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data.info()
code
106198328/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data.isna().mean() round(stock_data.isna().mean().sum(), 2) stock_data.isna().sum().sum() stock_data.isnull().sum() perc = 1 min_count = int((100 - perc) / 100 * stock_data.shape[0] + 1) stock_data = stock_data.dropna(axis=1, thresh=min_count) stock_data.isna().mean().sum() for col in stock_data: stock_data.loc[stock_data[col].isnull() == True, col] = stock_data[col].mean() stock_data.isnull().sum()
code
106198328/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data.isna().mean() round(stock_data.isna().mean().sum(), 2) stock_data.isna().sum().sum() stock_data.isnull().sum() perc = 1 min_count = int((100 - perc) / 100 * stock_data.shape[0] + 1) stock_data = stock_data.dropna(axis=1, thresh=min_count) stock_data.isna().mean().sum()
code
106198328/cell_10
[ "text_html_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data.isna().mean() round(stock_data.isna().mean().sum(), 2)
code
106198328/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data.isna().mean() round(stock_data.isna().mean().sum(), 2) stock_data.isna().sum().sum() stock_data.isnull().sum()
code
106198328/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv') stock_data
code
130014120/cell_4
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd ds_df = pd.read_csv(train_file_path) print('Full train dataset shape is {}'.format(ds_df.shape)) print('Dataset head:') ds_df.head(10)
code
130014120/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd ds_df = pd.read_csv(train_file_path) (ds_df['SalePrice'].max() - ds_df['SalePrice'].min()) / ds_df.shape[0]
code
130014120/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns ds_df = pd.read_csv(train_file_path) (ds_df['SalePrice'].max() - ds_df['SalePrice'].min()) / ds_df.shape[0] sns.kdeplot(data=ds_df['SalePrice'])
code
130014120/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns ds_df = pd.read_csv(train_file_path) (ds_df['SalePrice'].max() - ds_df['SalePrice'].min()) / ds_df.shape[0] sns.kdeplot(np.log10(ds_df['SalePrice']))
code
130014120/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import math train_file_path = '../input/house-prices-advanced-regression-techniques/train.csv' test_file_path = '../input/house-prices-advanced-regression-techniques/test.csv' print('Done')
code
130014120/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd ds_df = pd.read_csv(train_file_path) ds_df['SalePrice'].isna().sum()
code
122249621/cell_9
[ "text_plain_output_1.png" ]
! pip install google-colab
code
122249621/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/data/BBox_List_2017.csv') df_train = df_train.rename(columns={'Image Index': 'filename', 'Bbox [x': 'x', 'h]': 'h', 'Finding Label': 'class'}) df_train = df_train.drop(['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8'], axis=1) df_train.head()
code
122249621/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/data/BBox_List_2017.csv') df_train.head()
code
122249621/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/data/BBox_List_2017.csv') df_train = df_train.rename(columns={'Image Index': 'filename', 'Bbox [x': 'x', 'h]': 'h', 'Finding Label': 'class'}) df_train = df_train.drop(['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8'], axis=1) df_train['bbox'] = df_train[['x', 'y', 'w', 'h']].apply(list, axis=1) df_train = df_train.drop(columns=['x', 'y', 'w', 'h']).groupby('filename', as_index=False).agg(list) df_train.head(1)
code
122249621/cell_3
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/data/BBox_List_2017.csv') print(f"количество изображений {df_train['Image Index'].nunique()}") df_train.head(1)
code
106192159/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt
code
106192159/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt[['Pid1_', 'Pid2_']] = tt['PassengerId'].str.split('_', expand=True).astype('int') tt
code
106192159/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test
code
106192159/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical
code
106192159/cell_30
[ "text_html_output_1.png" ]
from feature_engine.imputation import MeanMedianImputer import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical from feature_engine.imputation import MeanMedianImputer median_imputer = MeanMedianImputer(imputation_method='median', variables=numerical) median_imputer.fit(tt[numerical])
code
106192159/cell_33
[ "image_output_1.png" ]
from feature_engine.imputation import MeanMedianImputer import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical from feature_engine.imputation import MeanMedianImputer median_imputer = MeanMedianImputer(imputation_method='median', variables=numerical) median_imputer.fit(tt[numerical]) tt_num = median_imputer.transform(tt[numerical]) tt_num from feature_engine import transformation as vt power_tf = vt.YeoJohnsonTransformer(variables=['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_']) power_tf.fit(tt_num) tt_num = power_tf.transform(tt_num) tt_num
code
106192159/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100
code
106192159/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_train.isnull().sum() df_train.isnull().mean() * 100
code
106192159/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical columns=['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r = divmod(len(columns), 2) fig, ax=plt.subplots(q, 2, figsize=(18,10)) for i in range(0,len(columns)): q, r =divmod(i, 2) sns.kdeplot(data=tt[numerical], x=columns[i], ax=ax[q, r]) plt.show() columns = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r = divmod(len(columns), 2) fig, ax = plt.subplots(q, 2, figsize=(18, 10)) for i in range(0, len(columns)): q, r = divmod(i, 2) sns.boxplot(data=tt[numerical], x=columns[i], ax=ax[q, r]) plt.show()
code
106192159/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt[numerical]
code
106192159/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical for i in tt.columns: print('{} ------------------------------------> {}'.format(i, tt[i].nunique()))
code
106192159/cell_1
[ "text_plain_output_1.png" ]
!pip install feature_engine
code
106192159/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_test.isnull().mean() * 100
code
106192159/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical
code
106192159/cell_32
[ "image_output_1.png" ]
from feature_engine.imputation import MeanMedianImputer import matplotlib.pyplot as plt import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical columns=['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r = divmod(len(columns), 2) fig, ax=plt.subplots(q, 2, figsize=(18,10)) for i in range(0,len(columns)): q, r =divmod(i, 2) sns.kdeplot(data=tt[numerical], x=columns[i], ax=ax[q, r]) plt.show() columns=['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r =divmod(len(columns), 2) fig, ax=plt.subplots(q, 2, figsize=(18,10)) for i in range(0,len(columns)): q, r =divmod(i, 2) sns.boxplot(data=tt[numerical], x=columns[i], ax=ax[q, r]) plt.show() from feature_engine.imputation import MeanMedianImputer median_imputer = MeanMedianImputer(imputation_method='median', variables=numerical) median_imputer.fit(tt[numerical]) tt_num = median_imputer.transform(tt[numerical]) tt_num columns = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r = divmod(len(columns), 2) fig, ax = plt.subplots(q, 2, figsize=(18, 10)) for i in range(0, len(columns)): q, r = divmod(i, 2) sns.kdeplot(data=tt, x=columns[i], ax=ax[q, r]) sns.kdeplot(data=tt_num, x=columns[i], ax=ax[q, r], color='red') plt.show()
code
106192159/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical columns = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'sum_exp_', 'mean_exp_'] q, r = divmod(len(columns), 2) fig, ax = plt.subplots(q, 2, figsize=(18, 10)) for i in range(0, len(columns)): q, r = divmod(i, 2) sns.kdeplot(data=tt[numerical], x=columns[i], ax=ax[q, r]) plt.show()
code
106192159/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['sum_exp_'] = tt['RoomService'] + tt['FoodCourt'] + tt['ShoppingMall'] + tt['Spa'] + tt['VRDeck'] tt
code
106192159/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['mean_exp_'] = tt['sum_exp_'] / tt['Pid2_'] tt
code
106192159/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train
code
106192159/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_']
code
106192159/cell_31
[ "text_plain_output_1.png" ]
from feature_engine.imputation import MeanMedianImputer import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical from feature_engine.imputation import MeanMedianImputer median_imputer = MeanMedianImputer(imputation_method='median', variables=numerical) median_imputer.fit(tt[numerical]) tt_num = median_imputer.transform(tt[numerical]) tt_num
code
106192159/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical
code
106192159/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt[['Fname_', 'Lname_']] = tt['Name'].str.split(' ', expand=True) tt
code
106192159/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt
code
106192159/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt['Age_cat_'] = pd.cut(tt.Age, bins=[0, 5, 12, 18, 50, 150], labels=['Toddler/Baby', 'Child', 'Teen', 'Adult', 'Elderly']) tt['Age_cat_'] numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt.isnull().mean() * 100 tt = tt.set_index('PassengerId') tt categorical = tt.select_dtypes(['object', 'category']).columns.to_list() categorical numerical = tt.select_dtypes(exclude=['object', 'category']).columns.to_list() numerical tt[numerical].columns
code
106192159/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_test = pd.read_csv('../input/spaceship-titanic/test.csv') df_test df_train.isnull().sum() df_train.isnull().mean() * 100 df_test.isnull().mean() * 100 tt = pd.concat([df_train.drop('Transported', axis=1), df_test]) tt tt[['Cabin1_', 'Cabin2_', 'Cabin3_']] = tt['Cabin'].str.split('/', expand=True) tt
code
106192159/cell_5
[ "image_output_1.png" ]
import pandas as pd # tt processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/spaceship-titanic/train.csv') df_train df_train.isnull().sum()
code
88102916/cell_21
[ "text_plain_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt dataset.plot(kind='box', subplots=True, layout=(3, 3), figsize=(15, 8)) plt.show()
code
88102916/cell_13
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes
code
88102916/cell_9
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape
code
88102916/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() dataset.head()
code
88102916/cell_34
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import Normalizer import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values from sklearn.preprocessing import Normalizer print(Normalizer().fit_transform(X)[0])
code
88102916/cell_30
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values mms = MinMaxScaler(feature_range=(0, 1)) mms.fit(X) X_scaled = mms.transform(X) print(X[1]) print(X_scaled[0])
code
88102916/cell_20
[ "text_html_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt dataset.plot(kind='kde', subplots=True, layout=(3, 3), figsize=(15, 8))
code
88102916/cell_40
[ "text_plain_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values dataset.head()
code
88102916/cell_29
[ "image_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values X[0]
code
88102916/cell_26
[ "image_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values print(X.shape) print(y.shape)
code
88102916/cell_41
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, chi2 import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') raw = open(path, 'rt', encoding='utf8') header = raw.readline() data = np.loadtxt(raw, delimiter=',') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values from sklearn.feature_selection import SelectKBest, chi2 names = dataset.columns[:-1] skb = SelectKBest(score_func=chi2, k=5) skb.fit(X, y) argsort = np.argsort(skb.scores_)[::-1] print(skb.scores_) print(names[argsort][:5])
code
88102916/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import csv import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88102916/cell_19
[ "text_html_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt dataset.hist(figsize=(15, 8))
code
88102916/cell_7
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') raw = open(path, 'rt', encoding='utf8') header = raw.readline() data = np.loadtxt(raw, delimiter=',') print(data.shape) print(header)
code
88102916/cell_32
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values from sklearn.preprocessing import StandardScaler ss = StandardScaler() ss.fit(X) X_ss = ss.transform(X) print(X[0]) print(X_ss[0])
code
88102916/cell_15
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset['Outcome'].value_counts()
code
88102916/cell_16
[ "text_html_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr()
code
88102916/cell_17
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew()
code
88102916/cell_14
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.describe()
code
88102916/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() plt.figure(figsize=(8, 8)) sns.heatmap(corr, xticklabels=dataset.columns, yticklabels=dataset.columns) plt.show()
code
88102916/cell_37
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import Binarizer import csv import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.dtypes dataset.corr() dataset.skew() import matplotlib.pyplot as plt import seaborn as sns corr = dataset.corr() X = dataset.drop('Outcome', axis=1).values y = dataset['Outcome'].values from sklearn.preprocessing import Binarizer print(Binarizer().fit_transform(X)[0])
code
88102916/cell_12
[ "text_plain_output_1.png" ]
import csv import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) data = list(reader) data = np.array(data).astype('float') dataset = pd.read_csv(path) dataset.shape dataset.head(10)
code
88102916/cell_5
[ "text_html_output_1.png" ]
import csv import numpy as np # linear algebra path = '/kaggle/input/pima-indians-diabetes-database/diabetes.csv' raw = open(path, 'rt', encoding='utf8') reader = csv.reader(raw, delimiter=',') header = next(reader) print(header) data = list(reader) data = np.array(data).astype('float') print(data[0]) print(data.shape)
code
121152041/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) data_2022.head(5)
code
121152041/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes
code
121152041/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape
code
121152041/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.head(10)
code
121152041/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes
code
121152041/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121152041/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull()
code
121152041/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) columns_to_drop = ['Whisker-high', 'Whisker-low', 'Dystopia (1.83) + residual', 'Explained by: Generosity', 'Explained by: Perceptions of corruption'] data_2022.drop(columns_to_drop, axis='columns', inplace=True) column_rename = {'Explained by: GDP per capita': 'GDP_per_capita', 'Explained by: Social support': 'Social_support', 'Explained by: Healthy life expectancy': 'life_expectancy', 'Explained by: Freedom to make life choices': 'Freedom', 'Explained by: Generosity': 'Generosity', 'Happiness score': 'Happiness_score_2022'} data_2022.rename(columns=column_rename, inplace=True) data_2022 data_2022.sort_values(by='RANK', inplace=True) plt.figure(figsize=(11, 9)) sns.barplot(y=data_2022['Country'][:10], x=df_2022['Happiness_score_2022'][:10], palette='inferno') plt.show()
code
121152041/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() print(data_2022.shape) data_2022.isnull().sum()
code
121152041/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) columns_to_drop = ['Whisker-high', 'Whisker-low', 'Dystopia (1.83) + residual', 'Explained by: Generosity', 'Explained by: Perceptions of corruption'] data_2022.drop(columns_to_drop, axis='columns', inplace=True) column_rename = {'Explained by: GDP per capita': 'GDP_per_capita', 'Explained by: Social support': 'Social_support', 'Explained by: Healthy life expectancy': 'life_expectancy', 'Explained by: Freedom to make life choices': 'Freedom', 'Explained by: Generosity': 'Generosity', 'Happiness score': 'Happiness_score_2022'} data_2022.rename(columns=column_rename, inplace=True) data_2022
code
121152041/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) columns_to_drop = ['Whisker-high', 'Whisker-low', 'Dystopia (1.83) + residual', 'Explained by: Generosity', 'Explained by: Perceptions of corruption'] data_2022.drop(columns_to_drop, axis='columns', inplace=True) column_rename = {'Explained by: GDP per capita': 'GDP_per_capita', 'Explained by: Social support': 'Social_support', 'Explained by: Healthy life expectancy': 'life_expectancy', 'Explained by: Freedom to make life choices': 'Freedom', 'Explained by: Generosity': 'Generosity', 'Happiness score': 'Happiness_score_2022'} data_2022.rename(columns=column_rename, inplace=True) data_2022 data_2022.sort_values(by='RANK', inplace=True) data_2022.head(10)
code
121152041/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) columns_to_drop = ['Whisker-high', 'Whisker-low', 'Dystopia (1.83) + residual', 'Explained by: Generosity', 'Explained by: Perceptions of corruption'] data_2022.drop(columns_to_drop, axis='columns', inplace=True) column_rename = {'Explained by: GDP per capita': 'GDP_per_capita', 'Explained by: Social support': 'Social_support', 'Explained by: Healthy life expectancy': 'life_expectancy', 'Explained by: Freedom to make life choices': 'Freedom', 'Explained by: Generosity': 'Generosity', 'Happiness score': 'Happiness_score_2022'} data_2022.rename(columns=column_rename, inplace=True) data_2022 data_2022.sort_values(by='RANK', inplace=True) data_2022.head(5)
code
121152041/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None) columns_to_drop = ['Whisker-high', 'Whisker-low', 'Dystopia (1.83) + residual', 'Explained by: Generosity', 'Explained by: Perceptions of corruption'] data_2022.drop(columns_to_drop, axis='columns', inplace=True) data_2022.head(5)
code
121152041/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath_1 = '/kaggle/input/world-happiness-report-2022/World Happiness Report 2022.csv' data_2022 = pd.read_csv(filepath_1) data_2022.shape data_2022.notnull() data_2022.isnull().sum() data_2022.dtypes data_2022 = data_2022.round(2) data_2022.dtypes data_2022.corr().style.background_gradient(cmap='coolwarm', axis=None)
code
1004254/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import ggplot from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) menu = pd.read_csv('../input/menu.csv') menu.head(5) df = menu
code
1004254/cell_8
[ "text_html_output_1.png" ]
df.sort_values(by='Protein', ascending=False).head(10)
code
1004254/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
print(menu.describe())
code
1004254/cell_12
[ "text_plain_output_1.png" ]
df.sort_values(by='Protein', ascending=False).head(10) df.sort_values(by='Protein/Sugar', ascending=False).head(10)
code
1004254/cell_5
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd pd.pivot_table(df, index=['Category'], values=['Protein'], aggfunc=np.max).plot(kind='bar')
code
106199411/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler, MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) def remove_shop_duplicates(df_train: pd.DataFrame, df_test: pd.DataFrame, shop_dups: dict): for shop1, shop2 in shop_dups.items(): df_train.loc[df_train['shop_id'] == shop1, 'shop_id'] = shop2 df_test.loc[df_test['shop_id'] == shop1, 'shop_id'] = shop2 shop_dups = {0: 57, 1: 58, 10: 11, 39: 40} remove_shop_duplicates(sales_train, test, shop_dups) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i])) shops_cluster = [] for shop in shps: temp = [shop] for month in range(34): temp += [sales_train[(sales_train.shop_id == shop) & (sales_train.date_block_num == month)]['item_cnt_day'].sum()] shops_cluster.append(np.array(temp)) shops_cluster = pd.DataFrame(np.vstack(shops_cluster), columns=['shop'] + ['{}'.format(i) for i in range(34)]) shops_cluster cat_sales = sales_train.merge(items[['item_id', 'item_category_id']], on='item_id', how='left') fc = FeatureClustering('item_category_id', cat_sales, 34) fc.plot_graphs(cat_sales) fc.create_data(cat_sales, StandardScaler()) fc.show_metrics() fc.plot_centres(4) fc.plot_clusters()
code
106199411/cell_9
[ "image_output_1.png" ]
from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from tslearn.clustering import TimeSeriesKMeans import matplotlib.pyplot as plt import numpy as np import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) def remove_shop_duplicates(df_train: pd.DataFrame, df_test: pd.DataFrame, shop_dups: dict): for shop1, shop2 in shop_dups.items(): df_train.loc[df_train['shop_id'] == shop1, 'shop_id'] = shop2 df_test.loc[df_test['shop_id'] == shop1, 'shop_id'] = shop2 shop_dups = {0: 57, 1: 58, 10: 11, 39: 40} remove_shop_duplicates(sales_train, test, shop_dups) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i])) shops_cluster = [] for shop in shps: temp = [shop] for month in range(34): temp += [sales_train[(sales_train.shop_id == shop) & (sales_train.date_block_num == month)]['item_cnt_day'].sum()] shops_cluster.append(np.array(temp)) shops_cluster = pd.DataFrame(np.vstack(shops_cluster), columns=['shop'] + ['{}'.format(i) for i in range(34)]) shops_cluster scaler = StandardScaler() shops_scaled = scaler.fit_transform(shops_cluster.iloc[:, 1:].T).T distortions = [] silhouette = [] K = range(1, 10) for k in K: kmeanModel = TimeSeriesKMeans(n_clusters=k, metric='dtw', n_jobs=6, max_iter=10) kmeanModel.fit(shops_scaled) distortions.append(kmeanModel.inertia_) if k > 1: silhouette.append(silhouette_score(shops_scaled, kmeanModel.labels_)) n_clusters = 4 ts_kmeans_dtw = TimeSeriesKMeans(n_clusters=n_clusters, metric='dtw', n_jobs=6, max_iter=10) ts_kmeans_dtw.fit(shops_scaled) plt.figure(figsize=(12, 8)) for cluster_number in range(n_clusters): plt.plot(ts_kmeans_dtw.cluster_centers_[cluster_number, :, 0].T, label=cluster_number) plt.title('Cluster centroids') plt.legend() plt.show()
code
106199411/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) def remove_shop_duplicates(df_train: pd.DataFrame, df_test: pd.DataFrame, shop_dups: dict): for shop1, shop2 in shop_dups.items(): df_train.loc[df_train['shop_id'] == shop1, 'shop_id'] = shop2 df_test.loc[df_test['shop_id'] == shop1, 'shop_id'] = shop2 shop_dups = {0: 57, 1: 58, 10: 11, 39: 40} remove_shop_duplicates(sales_train, test, shop_dups) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i])) shops_cluster = [] for shop in shps: temp = [shop] for month in range(34): temp += [sales_train[(sales_train.shop_id == shop) & (sales_train.date_block_num == month)]['item_cnt_day'].sum()] shops_cluster.append(np.array(temp)) shops_cluster = pd.DataFrame(np.vstack(shops_cluster), columns=['shop'] + ['{}'.format(i) for i in range(34)]) shops_cluster
code
106199411/cell_2
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) sales_train.head()
code
106199411/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from tslearn.clustering import TimeSeriesKMeans import matplotlib.pyplot as plt import numpy as np import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) def remove_shop_duplicates(df_train: pd.DataFrame, df_test: pd.DataFrame, shop_dups: dict): for shop1, shop2 in shop_dups.items(): df_train.loc[df_train['shop_id'] == shop1, 'shop_id'] = shop2 df_test.loc[df_test['shop_id'] == shop1, 'shop_id'] = shop2 shop_dups = {0: 57, 1: 58, 10: 11, 39: 40} remove_shop_duplicates(sales_train, test, shop_dups) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i])) shops_cluster = [] for shop in shps: temp = [shop] for month in range(34): temp += [sales_train[(sales_train.shop_id == shop) & (sales_train.date_block_num == month)]['item_cnt_day'].sum()] shops_cluster.append(np.array(temp)) shops_cluster = pd.DataFrame(np.vstack(shops_cluster), columns=['shop'] + ['{}'.format(i) for i in range(34)]) shops_cluster scaler = StandardScaler() shops_scaled = scaler.fit_transform(shops_cluster.iloc[:, 1:].T).T distortions = [] silhouette = [] K = range(1, 10) for k in K: kmeanModel = TimeSeriesKMeans(n_clusters=k, metric='dtw', n_jobs=6, max_iter=10) kmeanModel.fit(shops_scaled) distortions.append(kmeanModel.inertia_) if k > 1: silhouette.append(silhouette_score(shops_scaled, kmeanModel.labels_)) plt.figure(figsize=(10, 4)) plt.plot(K, distortions, 'bx-') plt.xlabel('k') plt.ylabel('Distortion') plt.title('Elbow Method') plt.show() plt.figure(figsize=(10, 4)) plt.plot(K[1:], silhouette, 'bx-') plt.xlabel('k') plt.ylabel('Silhouette score') plt.title('Silhouette') plt.show()
code
106199411/cell_10
[ "text_html_output_1.png" ]
from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from tslearn.clustering import TimeSeriesKMeans import matplotlib.pyplot as plt import numpy as np import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) def remove_shop_duplicates(df_train: pd.DataFrame, df_test: pd.DataFrame, shop_dups: dict): for shop1, shop2 in shop_dups.items(): df_train.loc[df_train['shop_id'] == shop1, 'shop_id'] = shop2 df_test.loc[df_test['shop_id'] == shop1, 'shop_id'] = shop2 shop_dups = {0: 57, 1: 58, 10: 11, 39: 40} remove_shop_duplicates(sales_train, test, shop_dups) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i])) shops_cluster = [] for shop in shps: temp = [shop] for month in range(34): temp += [sales_train[(sales_train.shop_id == shop) & (sales_train.date_block_num == month)]['item_cnt_day'].sum()] shops_cluster.append(np.array(temp)) shops_cluster = pd.DataFrame(np.vstack(shops_cluster), columns=['shop'] + ['{}'.format(i) for i in range(34)]) shops_cluster scaler = StandardScaler() shops_scaled = scaler.fit_transform(shops_cluster.iloc[:, 1:].T).T distortions = [] silhouette = [] K = range(1, 10) for k in K: kmeanModel = TimeSeriesKMeans(n_clusters=k, metric='dtw', n_jobs=6, max_iter=10) kmeanModel.fit(shops_scaled) distortions.append(kmeanModel.inertia_) if k > 1: silhouette.append(silhouette_score(shops_scaled, kmeanModel.labels_)) n_clusters = 4 ts_kmeans_dtw = TimeSeriesKMeans(n_clusters=n_clusters, metric='dtw', n_jobs=6, max_iter=10) ts_kmeans_dtw.fit(shops_scaled) shops_cluster['cluster'] = ts_kmeans_dtw.predict(shops_scaled) for cluster in range(n_clusters): print('=================================================================================') print(f' Cluster number: {cluster}') print('=================================================================================') plot_cluster_shops(shops_cluster[shops_cluster.cluster == cluster])
code
106199411/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd def read_data(path: str, files: list): dataframes = [] for file in files: dataframes.append(pd.read_csv(path + file)) return dataframes path = '../input/competitive-data-science-predict-future-sales/' files = ['sales_train.csv', 'items.csv', 'shops.csv', 'item_categories.csv', 'test.csv'] sales_train, items, shops, item_categories, test = read_data(path, files) def remove_outliers(df: pd.DataFrame, max_price: int, max_cnt: int): df = df[df['item_price'] > 0] df = df[df['item_price'] < max_price] df = df[df['item_cnt_day'] > 0] df = df[df['item_cnt_day'] < max_cnt] return df sales_train = remove_outliers(sales_train, 50000, 1000) shps = list(sales_train.shop_id.unique()) shps.sort() fig = plt.figure(figsize=(20, 50)) for i in range(len(shps)): plt.subplot(12, 5, i + 1) plt.plot(sales_train[sales_train.shop_id == shps[i]].groupby(['date_block_num'])['item_cnt_day'].sum()) plt.title('Shop {} sales'.format(shps[i]))
code
88102789/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.countplot(data=df, x='alive', hue='embark_town', palette='deep')
code
88102789/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.countplot(data=df, x='alive', hue='class', palette='deep')
code
88102789/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.jointplot(data=df, x='age', y='fare', kind='scatter', color='c')
code
88102789/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') df.info()
code
88102789/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.countplot(data=df, x='class', hue='sex')
code
88102789/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.countplot(data=df, x='embark_town', palette='Set2')
code
88102789/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns df = sns.load_dataset('titanic') sns.kdeplot(df['age'], shade=True, color='m')
code