path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73101177/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.describe()
code
73101177/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') num_data.head()
code
73101177/cell_38
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): pass cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists) X = train.drop(['id', 'target'], axis=1) y = train['target'] test_df = test.drop('id', axis=1) train.head()
code
73101177/cell_35
[ "image_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.preprocessing import StandardScaler, RobustScaler,OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): pass cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists) X = train.drop(['id', 'target'], axis=1) y = train['target'] test_df = test.drop('id', axis=1) ct = make_column_transformer((OrdinalEncoder(), cat_lists), (StandardScaler(), ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont7', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']), (RobustScaler(), ['cont0', 'cont6', 'cont8']), remainder='passthrough') X_train = pd.DataFrame(ct.fit_transform(X)) test = pd.DataFrame(ct.fit_transform(test_df)) test.head()
code
73101177/cell_31
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): for i in cat_list: print(train[i].unique()) cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists)
code
73101177/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(train.shape) train.head()
code
73101177/cell_36
[ "image_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.preprocessing import StandardScaler, RobustScaler,OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): pass cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists) X = train.drop(['id', 'target'], axis=1) y = train['target'] test_df = test.drop('id', axis=1) ct = make_column_transformer((OrdinalEncoder(), cat_lists), (StandardScaler(), ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont7', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']), (RobustScaler(), ['cont0', 'cont6', 'cont8']), remainder='passthrough') X_train = pd.DataFrame(ct.fit_transform(X)) test = pd.DataFrame(ct.fit_transform(test_df)) print(X_train.shape) print(test.shape)
code
16137929/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() sns.scatterplot(x='CGPA', y='university_rating', hue='research', data=df) plt.show()
code
16137929/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] sns.pairplot(data=df, diag_kind='kde')
code
16137929/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T print('Showing Meta Data :') df.info()
code
16137929/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum() plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() pd.DataFrame(df.groupby('university_rating')['GRE'].mean()) pd.DataFrame(df.groupby('university_rating')['TOEFL'].mean()) df.groupby('university_rating')[['SOP', 'LOR', 'CGPA']].mean() sns.regplot(x='CGPA', y='admit_chance', data=df, line_kws={'color': 'red'}) plt.title('CGPA vs Chance of Admit') plt.show()
code
16137929/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum() plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() pd.DataFrame(df.groupby('university_rating')['GRE'].mean()) print('Avg. TOEFL scores based on University Ratings') pd.DataFrame(df.groupby('university_rating')['TOEFL'].mean())
code
16137929/cell_20
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() sns.countplot(df.university_rating)
code
16137929/cell_26
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum() plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() pd.DataFrame(df.groupby('university_rating')['GRE'].mean()) pd.DataFrame(df.groupby('university_rating')['TOEFL'].mean()) df.groupby('university_rating')[['SOP', 'LOR', 'CGPA']].mean() admt_sort = df.sort_values(by=df.columns[-1], ascending=False) admt_sort[admt_sort['admit_chance'] > 0.8].mean().reset_index().T
code
16137929/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] df.head()
code
16137929/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts()
code
16137929/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') type(df)
code
16137929/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() sns.regplot(x='GRE', y='TOEFL', data=df, line_kws={'color': 'red'}) plt.title('GRE Score vs TOEFL Score') plt.show()
code
16137929/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') print('Descriptive Statastics of our Data:') df.describe().T
code
16137929/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] df.research.value_counts()
code
16137929/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() print(chances) sns.factorplot('research', 'admit_chance', data=df) plt.show()
code
16137929/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() sns.regplot(x='GRE', y='CGPA', data=df, line_kws={'color': 'red'}) plt.title('GRE Score vs CGPA') plt.show()
code
16137929/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum() plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() pd.DataFrame(df.groupby('university_rating')['GRE'].mean()) pd.DataFrame(df.groupby('university_rating')['TOEFL'].mean()) df.groupby('university_rating')[['SOP', 'LOR', 'CGPA']].mean()
code
16137929/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] df[['GRE', 'TOEFL', 'university_rating', 'CGPA', 'SOP', 'LOR', 'research']].hist(figsize=(10, 8), bins=15, linewidth='1', edgecolor='black') plt.tight_layout() plt.show()
code
16137929/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum() plt.tight_layout() df.research.value_counts() chances = df.groupby('research')['admit_chance'].median() df.university_rating.value_counts() print('Avg. GRE scores based on University Ratings') pd.DataFrame(df.groupby('university_rating')['GRE'].mean())
code
16137929/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns
code
16137929/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.describe().T df.columns df.columns = ['sno', 'GRE', 'TOEFL', 'university_rating', 'SOP', 'LOR', 'CGPA', 'research', 'admit_chance'] pd.isnull(df).sum()
code
16137929/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Admission_Predict_Ver1.1.csv') df.head(10)
code
2034924/cell_21
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull())
code
2034924/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) df_test = pd.read_csv('../input/test.tsv', sep='\t', index_col=0) df_test.info()
code
2034924/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10)
code
2034924/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0)
code
2034924/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape
code
2034924/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10)
code
2034924/cell_33
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull())
code
2034924/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape
code
2034924/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) df.info()
code
2034924/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape
code
2034924/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull()) df.price.quantile(0.9) df.price.quantile(0.99) sum(df.price == 0)
code
2034924/cell_41
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull()) df.price.quantile(0.9) df.price.quantile(0.99) sum(df.price == 0) plot = np.log10(df.price + 1).hist(bins=20, log=True) plot.set_ylabel('Count') plot.set_xlabel('log10(price)')
code
2034924/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) df.head()
code
2034924/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull())
code
2034924/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique()
code
2034924/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull()) df.price.describe()
code
2034924/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape
code
2034924/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull())
code
2034924/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape
code
2034924/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) df_test = pd.read_csv('../input/test.tsv', sep='\t', index_col=0) df_test.head()
code
2034924/cell_37
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull()) df.price.quantile(0.9) df.price.quantile(0.99)
code
2034924/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.tsv', sep='\t', index_col=0) sum(df.name.isnull()) df.item_condition_id.unique() df.category_name.unique().shape sum(df.category_name.isnull()) df.category_name.fillna('//').str.split('/').apply(lambda x: x[0]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[1]).unique().shape df.category_name.fillna('//').str.split('/').apply(lambda x: x[2]).unique().shape df.groupby('category_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.brand_name.isnull()) df.brand_name.unique().shape df.groupby('brand_name').agg({'price': 'mean'}).sort_values('price', ascending=False).head(10) sum(df.price.isnull()) df.price.quantile(0.9)
code
121153285/cell_21
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} param_grid = {'n_estimators': [2000], 'learning_rate': [0.15], 'max_depth': [8], 'l2_leaf_reg': [5], 'subsample': [0.5], 'colsample_bylevel': [0.8], 'bagging_temperature': [0.0], 'grow_policy': ['SymmetricTree', 'Depthwise', 'Lossguide']} RS_CB = RandomizedSearchCV(estimator=CatBoostClassifier(early_stopping_rounds=10), param_distributions=param_grid, n_iter=3, cv=10, n_jobs=-1, random_state=0) RS_CB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_CB.best_params_
code
121153285/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') data.info()
code
121153285/cell_23
[ "text_plain_output_1.png" ]
!pip install lightgbm from lightgbm import LGBMClassifier
code
121153285/cell_20
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} param_grid = {'n_estimators': [2000], 'learning_rate': [0.15], 'max_depth': [8], 'l2_leaf_reg': [5], 'subsample': [0.5], 'colsample_bylevel': [0.8], 'bagging_temperature': [0.0], 'grow_policy': ['SymmetricTree', 'Depthwise', 'Lossguide']} RS_CB = RandomizedSearchCV(estimator=CatBoostClassifier(early_stopping_rounds=10), param_distributions=param_grid, n_iter=3, cv=10, n_jobs=-1, random_state=0) RS_CB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0)
code
121153285/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
code
121153285/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121153285/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) plt.show()
code
121153285/cell_18
[ "text_plain_output_1.png" ]
!pip install catboost from catboost import CatBoostClassifier
code
121153285/cell_15
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} RS_XGB = RandomizedSearchCV(estimator=XGBClassifier(early_stopping_rounds=30), param_distributions=param_grid, n_iter=3, cv=6, n_jobs=-1, random_state=0) RS_XGB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0)
code
121153285/cell_16
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} RS_XGB = RandomizedSearchCV(estimator=XGBClassifier(early_stopping_rounds=30), param_distributions=param_grid, n_iter=3, cv=6, n_jobs=-1, random_state=0) RS_XGB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_XGB.best_params_
code
121153285/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error, roc_auc_score from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} RS_XGB = RandomizedSearchCV(estimator=XGBClassifier(early_stopping_rounds=30), param_distributions=param_grid, n_iter=3, cv=6, n_jobs=-1, random_state=0) RS_XGB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_XGB.best_params_ xgb_models = [] scores = [] feature_importances = [] skf = StratifiedKFold(n_splits=30, shuffle=True, random_state=0) for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_valid, y_valid = (X.iloc[val_idx], y.iloc[val_idx]) evaluation = [(X_train, y_train), (X_valid, y_valid)] model = XGBClassifier(**RS_XGB.best_params_, n_jobs=-1, early_stopping_rounds=300) model.fit(X_train, y_train, eval_set=evaluation, verbose=0) val_preds = model.predict_proba(X_valid)[:, 1] val_score = roc_auc_score(y_valid, val_preds) best_iter = model.best_iteration feature_importances.append({i: j for i in model.feature_names_in_ for j in model.feature_importances_}) print(f' auc :{val_score:.5f} best iteration :{best_iter}') if val_score > 0.9: scores.append(val_score) xgb_models.append(model) mean_val_auc = np.array(scores).mean(0) print(f'Mean AUC: {mean_val_auc}')
code
121153285/cell_24
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from lightgbm import LGBMClassifier from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} param_grid = {'n_estimators': [2000], 'learning_rate': [0.15], 'max_depth': [8], 'l2_leaf_reg': [5], 'subsample': [0.5], 'colsample_bylevel': [0.8], 'bagging_temperature': [0.0], 'grow_policy': ['SymmetricTree', 'Depthwise', 'Lossguide']} RS_CB = RandomizedSearchCV(estimator=CatBoostClassifier(early_stopping_rounds=10), param_distributions=param_grid, n_iter=3, cv=10, n_jobs=-1, random_state=0) RS_CB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_CB.best_params_ RS_CB = RandomizedSearchCV(estimator=LGBMClassifier(early_stopping_rounds=300), param_distributions=param_grid, n_iter=3, cv=30, n_jobs=-1, random_state=0) RS_CB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0)
code
121153285/cell_22
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from sklearn.metrics import mean_absolute_error, roc_auc_score from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} RS_XGB = RandomizedSearchCV(estimator=XGBClassifier(early_stopping_rounds=30), param_distributions=param_grid, n_iter=3, cv=6, n_jobs=-1, random_state=0) RS_XGB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_XGB.best_params_ xgb_models = [] scores = [] feature_importances = [] skf = StratifiedKFold(n_splits=30, shuffle=True, random_state=0) for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_valid, y_valid = (X.iloc[val_idx], y.iloc[val_idx]) evaluation = [(X_train, y_train), (X_valid, y_valid)] model = XGBClassifier(**RS_XGB.best_params_, n_jobs=-1, early_stopping_rounds=300) model.fit(X_train, y_train, eval_set=evaluation, verbose=0) val_preds = model.predict_proba(X_valid)[:, 1] val_score = roc_auc_score(y_valid, val_preds) best_iter = model.best_iteration feature_importances.append({i: j for i in model.feature_names_in_ for j in model.feature_importances_}) if val_score > 0.9: scores.append(val_score) xgb_models.append(model) mean_val_auc = np.array(scores).mean(0) catboost_models = [] scores = [] feature_importances = [] skf = StratifiedKFold(n_splits=30, shuffle=True, random_state=0) for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_valid, y_valid = (X.iloc[val_idx], y.iloc[val_idx]) evaluation = [(X_train, y_train), (X_valid, y_valid)] model = CatBoostClassifier(subsample=0.5, n_estimators=2000, max_depth=8, learning_rate=0.15, l2_leaf_reg=5, grow_policy='Lossguide', colsample_bylevel=0.8, bagging_temperature=0.0, early_stopping_rounds=300) model.fit(X_train, y_train, eval_set=evaluation, verbose=0) val_preds = model.predict_proba(X_valid)[:, 1] val_score = roc_auc_score(y_valid, val_preds) print(f' auc :{val_score:.5f}') if val_score > 0.9: scores.append(val_score) catboost_models.append(model) break mean_val_auc = np.array(scores).mean(0) print(f'Mean AUC: {mean_val_auc}')
code
121153285/cell_27
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier from sklearn.metrics import mean_absolute_error, roc_auc_score from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) param_grid = {'n_estimators': [1750, 2000, 2250, 2500], 'learning_rate': [0.05, 0.065, 0.08, 0.9], 'max_depth': [3, 4, 5, 6, 8, 10, 12, 15], 'min_child_weight': [3, 4, 5, 6, 7], 'gamma': [0.2, 0.3, 0.4, 0.5, 0.6], 'colsample_bytree': [0.4, 0.5, 0.6, 0.65, 0.7]} RS_XGB = RandomizedSearchCV(estimator=XGBClassifier(early_stopping_rounds=30), param_distributions=param_grid, n_iter=3, cv=6, n_jobs=-1, random_state=0) RS_XGB.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=0) RS_XGB.best_params_ xgb_models = [] scores = [] feature_importances = [] skf = StratifiedKFold(n_splits=30, shuffle=True, random_state=0) for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_valid, y_valid = (X.iloc[val_idx], y.iloc[val_idx]) evaluation = [(X_train, y_train), (X_valid, y_valid)] model = XGBClassifier(**RS_XGB.best_params_, n_jobs=-1, early_stopping_rounds=300) model.fit(X_train, y_train, eval_set=evaluation, verbose=0) val_preds = model.predict_proba(X_valid)[:, 1] val_score = roc_auc_score(y_valid, val_preds) best_iter = model.best_iteration feature_importances.append({i: j for i in model.feature_names_in_ for j in model.feature_importances_}) if val_score > 0.9: scores.append(val_score) xgb_models.append(model) mean_val_auc = np.array(scores).mean(0) catboost_models = [] scores = [] feature_importances = [] skf = StratifiedKFold(n_splits=30, shuffle=True, random_state=0) for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_valid, y_valid = (X.iloc[val_idx], y.iloc[val_idx]) evaluation = [(X_train, y_train), (X_valid, y_valid)] model = CatBoostClassifier(subsample=0.5, n_estimators=2000, max_depth=8, learning_rate=0.15, l2_leaf_reg=5, grow_policy='Lossguide', colsample_bylevel=0.8, bagging_temperature=0.0, early_stopping_rounds=300) model.fit(X_train, y_train, eval_set=evaluation, verbose=0) val_preds = model.predict_proba(X_valid)[:, 1] val_score = roc_auc_score(y_valid, val_preds) if val_score > 0.9: scores.append(val_score) catboost_models.append(model) break mean_val_auc = np.array(scores).mean(0) test = pd.read_csv('/kaggle/input/playground-series-s3e7/test.csv') X_test = test.drop(['id'], axis=1) scaler = RobustScaler() X_test = pd.DataFrame(scaler.fit_transform(X_test)) test_preds = [] for m in catboost_models: preds = m.predict_proba(X_test)[:, 1] test_preds.append(preds) test_preds = np.array(test_preds).mean(0) sub = pd.read_csv('/kaggle/input/playground-series-s3e7/sample_submission.csv', index_col=1) sub[1] = test_preds sub.to_csv('submission.csv') sub.head()
code
121153285/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.preprocessing import RobustScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/playground-series-s3e7/train.csv') corrmat = data.corr() cols = corrmat.nlargest(20, 'booking_status')['booking_status'].index cm = np.corrcoef(data[cols].values.T) fig = plt.gcf() fig.set_size_inches(10, 8) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) g = sns.PairGrid(data, y_vars=['booking_status'], x_vars=data.columns) g = g.map_diag(plt.hist) g = g.map_offdiag(sns.scatterplot) y = data.booking_status X = data.drop(['booking_status', 'id'], axis=1) X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, test_size=0.2) scaler = RobustScaler() X = pd.DataFrame(scaler.fit_transform(X)) X_train = pd.DataFrame(scaler.fit_transform(X_train)) X_val = pd.DataFrame(scaler.fit_transform(X_val)) XGB_model = XGBRegressor(random_state=0) XGB_scores = cross_val_score(XGB_model, X, y, cv=3, scoring='roc_auc') XGB_mean_score = XGB_scores.mean() XGB_std_score = XGB_scores.std() print(f'RMSE = {XGB_scores}') print(f'Mean RMSE = {XGB_mean_score:.2f}') print(f'StDev RMSE = {XGB_std_score:.2f}')
code
34146326/cell_42
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,confusion_matrix import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) pipe = mp(transformer, rfc(n_estimators=150, random_state=42)) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) sns.heatmap(confusion_matrix(y_test, pred), annot=True, fmt='.0f')
code
34146326/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) data.head()
code
34146326/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) sns.countplot(x='feedback', data=data)
code
34146326/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) data.describe()
code
34146326/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
(x_train.shape, y_train.shape)
code
34146326/cell_4
[ "text_plain_output_1.png" ]
import os import os os.listdir('../input/amazon-alexa-reviews')
code
34146326/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) x = data[['rating', 'variation', 'verified_reviews']].copy() y = data.feedback x.head()
code
34146326/cell_33
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score,confusion_matrix import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) sns.heatmap(confusion_matrix(y_test, pred), annot=True, fmt='.0f')
code
34146326/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.head()
code
34146326/cell_41
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,confusion_matrix transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) pipe = mp(transformer, rfc(n_estimators=150, random_state=42)) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) accuracy_score(y_test, pred)
code
34146326/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) sns.countplot(x='rating', data=data, hue='feedback')
code
34146326/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) data.head()
code
34146326/cell_18
[ "text_html_output_1.png" ]
transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe
code
34146326/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,confusion_matrix transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) accuracy_score(y_test, pred)
code
34146326/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) data.info()
code
34146326/cell_38
[ "text_html_output_1.png" ]
transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train) pred = pipe.predict(x_test) pipe = mp(transformer, rfc(n_estimators=150, random_state=42)) pipe.fit(x_train, y_train)
code
34146326/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) plt.figure(figsize=(24, 12)) sns.countplot(x='variation', hue='feedback', data=data)
code
34146326/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
transformer = ct(transformers=[('review_counts', cv(), 'verified_reviews'), ('variation_dummies', ohe(), ['variation'])], remainder='passthrough') pipe = mp(transformer, dtc(random_state=42)) pipe (x_train.shape, y_train.shape) pipe.fit(x_train, y_train)
code
34146326/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/amazon-alexa-reviews/amazon_alexa.tsv', sep='\t') data.drop(columns=['date'], inplace=True) sns.distplot(data['rating'])
code
50224568/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') plt.scatter(dftrain['299'], dftrain['1']) plt.title('My PCA graph') plt.xlabel('0 -{0}%'.format(dftrain['299'])) plt.ylabel('target -{0}%'.format(dftrain['1']))
code
50224568/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y = sns.regplot(x='1', y='target', data=dftrain)
code
50224568/cell_8
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) detailed_features = [] for feature in range(300): y = sns.regplot(x=str(feature), y='target', data=dftrain) detailed_features.append({'feature': feature, 'slope': getCorr(dftrain['target'], dftrain[str(feature)])})
code
50224568/cell_17
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) detailed_features=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) detailed_features.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) detailed_features.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(detailed_features[iteration]['feature']) def getDfProcessed(df_toProcess, features_to_save): df_processed = pd.DataFrame() for iteration in features_to_save: feature = df_toProcess[str(iteration)].values df_processed[str(iteration)] = feature return df_processed dftrain_processed = getDfProcessed(dftrain, features_to_save) y_train = dftrain.pop('target') model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10)
code
50224568/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) detailed_features=[] for feature in range(300): y=sns.regplot(x=str(feature),y='target',data=dftrain) detailed_features.append({'feature':feature,'slope':getCorr(dftrain['target'],dftrain[str(feature)])}) detailed_features.sort(key=getSlope, reverse=True) features_to_save = [] for iteration in range(6): features_to_save.append(detailed_features[iteration]['feature']) def getDfProcessed(df_toProcess, features_to_save): df_processed = pd.DataFrame() for iteration in features_to_save: feature = df_toProcess[str(iteration)].values df_processed[str(iteration)] = feature return df_processed dftrain_processed = getDfProcessed(dftrain, features_to_save) dftest_processed = getDfProcessed(dftest, features_to_save) y_train = dftrain.pop('target') model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(7, activation=tf.nn.relu), tf.keras.layers.Dense(2, activation=tf.nn.softmax)]) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) model.fit(dftrain_processed, y_train, epochs=10) predictions_list = list(model.predict(dftest_processed)) def getPredictions(prediction_list): prediction_list_processced = [] for iteration in prediction_list: prediction_list_processced.append(round(iteration[1])) return prediction_list_processced dfsubmission = pd.DataFrame() dfsubmission['id'] = dftest['id'] dfsubmission['target'] = getPredictions(predictions_list) dfsubmission
code
50224568/cell_5
[ "image_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftrain
code
105197156/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) all_cols = train.columns.to_list() cat_cols = ['B_30', 'B_38', 'D_114', 'D_116', 'D_117', 'D_120', 'D_126', 'D_63', 'D_64', 'D_66', 'D_68'] cat_cols_save_str = ['D_63', 'D_64'] str_cols = ['customer_ID'] date_cols = ['S_2'] df_dtype = {col: 'float16' for col in all_cols if col not in cat_cols + str_cols + date_cols} for col in str_cols + cat_cols_save_str: df_dtype[col] = 'str' nulls = train.isnull().sum().to_frame() count_row = train.shape[0] nulls['Index'] = nulls.index nulls['Class'] = nulls['Index'].str.rpartition('_')[0] nulls['ClassID'] = nulls['Index'].str.rpartition('_')[2] nulls = nulls.reindex(columns=['Index', 'Class', 'ClassID', 0]) nulls = nulls.sort_values(by=['Class', 'ClassID']) for index, row in nulls.iterrows(): if row[0] != 0: train[index] = train[index].fillna(train[index].median()) nulls = train.isnull().sum().to_frame() count_row = train.shape[0] nulls['Index'] = nulls.index nulls['Class'] = nulls['Index'].str.rpartition('_')[0] nulls['ClassID'] = nulls['Index'].str.rpartition('_')[2] nulls = nulls.reindex(columns=['Index', 'Class', 'ClassID', 0]) nulls = nulls.sort_values(by=['Class', 'ClassID']) print('---------------after fill-----------------') for index, row in nulls.iterrows(): if row[0] != 0: train[index] = train[index].fillna(train[index].median()) print('---------------Done fill-----------------')
code
105197156/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) with pd.option_context('display.max_rows', 1000): print('Type of D:') print(train.filter(regex='D_').dtypes) print('Type of S:') print(train.filter(regex='S_').dtypes) print('Type of P:') print(train.filter(regex='P_').dtypes) print('Type of B:') print(train.filter(regex='B_').dtypes) print('Type of R:') print(train.filter(regex='R_').dtypes)
code
105197156/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) all_cols = train.columns.to_list() cat_cols = ['B_30', 'B_38', 'D_114', 'D_116', 'D_117', 'D_120', 'D_126', 'D_63', 'D_64', 'D_66', 'D_68'] cat_cols_save_str = ['D_63', 'D_64'] str_cols = ['customer_ID'] date_cols = ['S_2'] df_dtype = {col: 'float16' for col in all_cols if col not in cat_cols + str_cols + date_cols} for col in str_cols + cat_cols_save_str: df_dtype[col] = 'str' with pd.option_context('display.max_rows', 1000): print('Count of NaN of D: \n' + str(train.filter(regex='D_').isna().sum())) print('Count of NaN of S: \n' + str(train.filter(regex='S_').isna().sum())) print('Count of NaN of P: \n' + str(train.filter(regex='P_').isna().sum())) print('Count of NaN of B: \n' + str(train.filter(regex='B_').isna().sum())) print('Count of NaN of R: \n' + str(train.filter(regex='R_').isna().sum()))
code
105197156/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) all_cols = train.columns.to_list() cat_cols = ['B_30', 'B_38', 'D_114', 'D_116', 'D_117', 'D_120', 'D_126', 'D_63', 'D_64', 'D_66', 'D_68'] cat_cols_save_str = ['D_63', 'D_64'] str_cols = ['customer_ID'] date_cols = ['S_2'] df_dtype = {col: 'float16' for col in all_cols if col not in cat_cols + str_cols + date_cols} for col in str_cols + cat_cols_save_str: df_dtype[col] = 'str' for col in cat_cols_save_str: print(col) print(train[col].unique())
code
105197156/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') train.info()
code
105197156/cell_14
[ "text_plain_output_1.png" ]
""" for col in cat_cols: arr = np.array(train[col].unique()) arr.sort() print (arr) label = [] for val in range(int(arr[0]), int(arr[len(arr)-1])+1): label.append(col + ' ' + str(int(val))) print(label) """
code
105197156/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) all_cols = train.columns.to_list() cat_cols = ['B_30', 'B_38', 'D_114', 'D_116', 'D_117', 'D_120', 'D_126', 'D_63', 'D_64', 'D_66', 'D_68'] cat_cols_save_str = ['D_63', 'D_64'] str_cols = ['customer_ID'] date_cols = ['S_2'] df_dtype = {col: 'float16' for col in all_cols if col not in cat_cols + str_cols + date_cols} for col in str_cols + cat_cols_save_str: df_dtype[col] = 'str' nulls = train.isnull().sum().to_frame() count_row = train.shape[0] nulls['Index'] = nulls.index nulls['Class'] = nulls['Index'].str.rpartition('_')[0] nulls['ClassID'] = nulls['Index'].str.rpartition('_')[2] nulls = nulls.reindex(columns=['Index', 'Class', 'ClassID', 0]) nulls = nulls.sort_values(by=['Class', 'ClassID']) print('---------------before fill-----------------') for index, row in nulls.iterrows(): if row[0] != 0: print(index, row[0], round(row[0] / count_row * 100, 2)) train[index] = train[index].fillna(train[index].median()) print('---------------Done fill-----------------')
code
105197156/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_feather('../input/amex-train-dropped-1/train_dropped.feather') typeTable = pd.concat([train.filter(regex='D_').dtypes, train.filter(regex='S_').dtypes], axis=1) with pd.option_context('display.max_rows', 1000): print('Count of NaN of D: \n' + str(train.filter(regex='D_').isna().sum())) print('Count of NaN of S: \n' + str(train.filter(regex='S_').isna().sum())) print('Count of NaN of P: \n' + str(train.filter(regex='P_').isna().sum())) print('Count of NaN of B: \n' + str(train.filter(regex='B_').isna().sum())) print('Count of NaN of R: \n' + str(train.filter(regex='R_').isna().sum()))
code
104124784/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') df = pd.concat([train, test], axis=0).reset_index(drop=True) df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1) df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0] df.isnull().sum()
code
104124784/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set() import matplotlib.pyplot as plt from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') df = pd.concat([train, test], axis=0).reset_index(drop=True) df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1) df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0] sns.histplot(data=df['Age'], color='teal', kde=True) plt.show()
code