path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105190429/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv')
data.describe().T | code |
90131319/cell_4 | [
"text_html_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
print('Training data shape:', train.shape)
display(train.head()) | code |
90131319/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
plt.figure(figsize=(8, 6))
sns.histplot(data=train, x='data source', hue='class')
plt.show()
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
else:
print(f'Error! Not accounting for {len(data_classes)} no. of classes.')
df_summary_count = pd.concat([df_summary_count, df_new])
display(df_summary_count) | code |
90131319/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
print('Images are saved at:', images_path)
fig, axs = plt.subplots(3, 3, figsize=(18, 14))
for i in range(3):
for j in range(3):
if j == 0:
file_name, class_label = train[train['class'] == data_classes[0]].iloc[i, [1, 2]]
elif j == 1:
file_name, class_label = train[train['class'] == data_classes[1]].iloc[i, [1, 2]]
elif j == 2 and len(data_classes) == 3:
file_name, class_label = train[train['class'] == data_classes[2]].iloc[i, [1, 2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
print('Original:', 3 * i + j, np.asarray(img).shape)
img = img.convert('L')
axs[i, j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img).shape}')
axs[i, j].axis('off')
axs[i, j].imshow(img, cmap='gray')
plt.show() | code |
90131319/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
print('No. of unique patients by data source:')
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
display(num_patients_bydata)
print('No. of unqiue patients by class:')
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
display(num_patients_byclass) | code |
90131319/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
def crop_resize_image(gray_img, final_size=224):
""" Set the new dimensions so the cropped image is a square
"""
width, height = gray_img.size
diff = abs(width - height)
left, right, top, bottom = (0, 0, 0, 0)
if diff % 2 == 0:
if width > height:
bottom = height
left = diff / 2
right = width - left
elif height > width:
top = diff / 2
bottom = height - top
right = width
elif width > height:
bottom = height
left = diff / 2 + 0.5
right = width - left + 1
elif height > width:
top = diff / 2 + 0.5
bottom = height - top + 1
right = width
img_cropped = gray_img.crop((left, top, right, bottom))
img_final = img_cropped.resize((final_size, final_size))
return img_final
### Look at a few images to explore:
# a) what do the scans look like for each class?
# b) what is the image resolution?
# c) is there anything noticeable across classes / images?
# Kaggle dataset
print('Images are saved at:', images_path)
fig, axs = plt.subplots(3, 3, figsize = (18,14))
for i in range(3):
for j in range(3):
if j==0:
file_name, class_label = train[train['class']==data_classes[0]].iloc[i,[1,2]]
elif j==1:
file_name, class_label = train[train['class']==data_classes[1]].iloc[i,[1,2]]
elif j==2 and len(data_classes)==3:
file_name, class_label = train[train['class']==data_classes[2]].iloc[i,[1,2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
print('Original:', (3*i+j), np.asarray(img).shape)
# Greyscale convert
img = img.convert('L')
axs[i,j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img).shape}')
axs[i,j].axis('off')
axs[i,j].imshow(img, cmap = 'gray')
plt.show()
final_size = 224
fig, axs = plt.subplots(3, 3, figsize=(18, 14))
for i in range(3):
for j in range(3):
if j == 0:
file_name, class_label = train[train['class'] == data_classes[0]].iloc[i, [1, 2]]
elif j == 1:
file_name, class_label = train[train['class'] == data_classes[1]].iloc[i, [1, 2]]
elif j == 2 and len(data_classes) == 3:
file_name, class_label = train[train['class'] == data_classes[2]].iloc[i, [1, 2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
img = img.convert('L')
img = crop_resize_image(img, final_size=224)
axs[i, j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img_final).shape}')
axs[i, j].axis('off')
axs[i, j].imshow(img_final, cmap='gray')
plt.show() | code |
90131319/cell_5 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import pandas as pd
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train/'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
print('Classes:\n', train['class'].unique())
print('Data sources:\n', train['data source'].unique())
print('---------------------------------')
print('No. of unique patients:', train['patient id'].nunique(), 'out of', train.shape[0], 'images.') | code |
16154664/cell_9 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.reset_index(drop=True, inplace=True)
train['SalePrice'] = np.log1p(train['SalePrice'])
train['SalePrice'].hist(bins=50)
y = train['SalePrice'].reset_index(drop=True) | code |
16154664/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.describe() | code |
16154664/cell_11 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.reset_index(drop=True, inplace=True)
train['SalePrice'] = np.log1p(train['SalePrice'])
y = train['SalePrice'].reset_index(drop=True)
train = train.drop(['Id', 'SalePrice'], axis=1)
test = test.drop(['Id'], axis=1)
x = pd.concat([train, test]).reset_index(drop=True)
x.info() | code |
16154664/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
original_y = train['SalePrice'].reset_index(drop=True)
train['SalePrice'].hist(bins=50) | code |
16154664/cell_14 | [
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.reset_index(drop=True, inplace=True)
train['SalePrice'] = np.log1p(train['SalePrice'])
y = train['SalePrice'].reset_index(drop=True)
train = train.drop(['Id', 'SalePrice'], axis=1)
test = test.drop(['Id'], axis=1)
x = pd.concat([train, test]).reset_index(drop=True)
x['MSSubClass'] = x['MSSubClass'].apply(str)
x['YrSold'] = x['YrSold'].astype(str)
x['MoSold'] = x['MoSold'].astype(str)
x['Functional'] = x['Functional'].fillna('Typ')
x['Electrical'] = x['Electrical'].fillna('SBrkr')
x['KitchenQual'] = x['KitchenQual'].fillna('TA')
x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0])
x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0])
x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0])
x['MasVnrArea'] = x['MasVnrArea'].fillna(x['MasVnrArea'].mode()[0])
x['LotFrontage'] = x.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
x[col] = x[col].fillna(0)
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
x[col] = x[col].fillna('None')
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
x[col] = x[col].fillna(0)
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
x[col] = x[col].fillna('None')
objects = []
for i in x.columns:
if x[i].dtype == object:
objects.append(i)
x.update(x[objects].fillna('None'))
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerics = []
for i in x.columns:
if x[i].dtype in numeric_dtypes:
numerics.append(i)
x.update(x[numerics].fillna(0))
x['total_sf'] = x['TotalBsmtSF'] + x['BsmtFinSF1'] + x['BsmtFinSF2'] + x['1stFlrSF'] + x['2ndFlrSF']
x['total_bathrooms'] = x['FullBath'] + 0.5 * x['HalfBath'] + x['BsmtFullBath'] + 0.5 * x['BsmtHalfBath']
x['total_porch_sf'] = x['OpenPorchSF'] + x['3SsnPorch'] + x['EnclosedPorch'] + x['ScreenPorch'] + x['WoodDeckSF']
x['hasPool'] = x['PoolArea'].apply(lambda x: 1 if x > 0 else 0)
x['has2ndFloor'] = x['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)
x['hasGarage'] = x['GarageArea'].apply(lambda x: 1 if x > 0 else 0)
x['hasBasement'] = x['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)
x['hasFireplace'] = x['Fireplaces'].apply(lambda x: 1 if x > 0 else 0)
x.describe() | code |
16154664/cell_10 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.reset_index(drop=True, inplace=True)
train['SalePrice'] = np.log1p(train['SalePrice'])
y = train['SalePrice'].reset_index(drop=True)
train = train.drop(['Id', 'SalePrice'], axis=1)
test = test.drop(['Id'], axis=1)
x = pd.concat([train, test]).reset_index(drop=True)
x.describe() | code |
16154664/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.reset_index(drop=True, inplace=True)
train['SalePrice'] = np.log1p(train['SalePrice'])
y = train['SalePrice'].reset_index(drop=True)
train = train.drop(['Id', 'SalePrice'], axis=1)
test = test.drop(['Id'], axis=1)
x = pd.concat([train, test]).reset_index(drop=True)
x['MSSubClass'] = x['MSSubClass'].apply(str)
x['YrSold'] = x['YrSold'].astype(str)
x['MoSold'] = x['MoSold'].astype(str)
x['Functional'] = x['Functional'].fillna('Typ')
x['Electrical'] = x['Electrical'].fillna('SBrkr')
x['KitchenQual'] = x['KitchenQual'].fillna('TA')
x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0])
x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0])
x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0])
x['MasVnrArea'] = x['MasVnrArea'].fillna(x['MasVnrArea'].mode()[0])
x['LotFrontage'] = x.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
x[col] = x[col].fillna(0)
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
x[col] = x[col].fillna('None')
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
x[col] = x[col].fillna(0)
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
x[col] = x[col].fillna('None')
objects = []
for i in x.columns:
if x[i].dtype == object:
objects.append(i)
x.update(x[objects].fillna('None'))
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerics = []
for i in x.columns:
if x[i].dtype in numeric_dtypes:
numerics.append(i)
x.update(x[numerics].fillna(0))
x.info() | code |
16154664/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.describe() | code |
74045329/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
slct_test_df = test_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_test_df | code |
74045329/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df | code |
74045329/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df | code |
74045329/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
train_df.dtypes | code |
74045329/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74045329/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
plt.figure(figsize=(12, 10))
sns.heatmap(cor) | code |
74045329/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
slct_train_df = train_df[['date', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']]
slct_train_df | code |
74045329/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
train_df['date'] = train_df['date'].str.replace('T000000', '')
train_df | code |
74045329/cell_12 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
import matplotlib.pyplot as plt
import seaborn as sns
cor = train_df.corr()
train_df.dtypes
X = train_df.drop({'price', 'yr_renovated', 'date', 'lat', 'waterfront'}, axis=1)
y = train_df['price']
X_train_valid, X_test, y_train_valid, y_test = train_test_split(X, y, test_size=0.33)
X_train_valid | code |
74045329/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/train.csv', index_col=0)
train_df
test_df = pd.read_csv('/kaggle/input/1056lab-house-price-prediction/test.csv')
test_df
test_df['date'] = test_df['date'].str.replace('T000000', '')
test_df | code |
18112246/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.fit_one_cycle(5, max_lr=0.005) | code |
18112246/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.fit_one_cycle(5, max_lr=0.005)
learn.save('stage-2')
learn.lr_find()
learn.recorder.plot() | code |
18112246/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.recorder.plot() | code |
18112246/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.columns
len(df.columns) | code |
18112246/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.fit_one_cycle(5, max_lr=0.005)
learn.save('stage-2')
learn.lr_find()
learn.fit_one_cycle(5, 0.0003)
learn.save('stage-3')
learn.lr_find()
learn.recorder.plot() | code |
18112246/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18112246/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.columns
len(df.columns)
df['BsmtHalfBath'].unique() | code |
18112246/cell_16 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.recorder.plot() | code |
18112246/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.head() | code |
18112246/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01) | code |
18112246/cell_24 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.fit_one_cycle(5, max_lr=0.005)
learn.save('stage-2')
learn.lr_find()
learn.fit_one_cycle(5, 0.0003) | code |
18112246/cell_27 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
df.columns
len(df.columns)
dep_var = 'SalePrice'
cat_vars = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition', 'BsmtQual', 'KitchenQual']
cont_vars = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'EnclosedPorch', 'Fireplaces', 'FullBath', 'GarageYrBlt', 'GrLivArea', 'HalfBath', 'KitchenAbvGr', 'LotArea', 'LotFrontage', 'LowQualFinSF', 'MasVnrArea', 'OpenPorchSF', 'PoolArea', 'ScreenPorch', 'TotRmsAbvGrd', 'WoodDeckSF']
procs = [FillMissing, Categorify, Normalize]
data = TabularList.from_df(df, cat_names=cat_vars, cont_names=cont_vars, procs=procs).split_by_rand_pct().label_from_df(cols=dep_var, label_cls=FloatList, log=True).add_test(TabularList.from_df(test_df, cat_names=cat_vars, cont_names=cont_vars)).databunch()
max_log_y = np.log(np.max(df[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], y_range=y_range, metrics=exp_rmspe)
learn.lr_find()
learn.fit_one_cycle(10, max_lr=0.01)
learn.save('stage-1')
learn.load('stage-1')
learn.lr_find()
learn.fit_one_cycle(5, max_lr=0.005)
learn.save('stage-2')
learn.lr_find()
learn.fit_one_cycle(5, 0.0003)
learn.save('stage-3')
learn.lr_find()
learn.fit_one_cycle(5, 0.001) | code |
18112246/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.columns | code |
18146508/cell_21 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('kurtosis') | code |
18146508/cell_13 | [
"text_html_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
plot_feat_dist(train0, train1, '0', '1', feat) | code |
18146508/cell_9 | [
"image_output_1.png"
] | slot = 1
plt.figure(figsize=(30, 30))
for i in range(2, 102):
plt.subplot(10, 10, slot)
train.iloc[:, i].hist()
slot += 1 | code |
18146508/cell_4 | [
"image_output_1.png"
] | train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv') | code |
18146508/cell_23 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
def train_dist(agg):
t0 = train.loc[train['target'] == 0]
t1 = train.loc[train['target'] == 1]
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_dist('mean') | code |
18146508/cell_20 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('skew') | code |
18146508/cell_6 | [
"image_output_1.png"
] | test.head() | code |
18146508/cell_11 | [
"text_html_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
sns.countplot(train['target'])
print(train.target.value_counts(normalize=True)) | code |
18146508/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('max') | code |
18146508/cell_7 | [
"image_output_1.png"
] | train.describe() | code |
18146508/cell_18 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('min') | code |
18146508/cell_8 | [
"image_output_1.png"
] | test.describe() | code |
18146508/cell_16 | [
"text_html_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('mean') | code |
18146508/cell_17 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_test_dist('std') | code |
18146508/cell_24 | [
"image_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
def train_test_dist(agg):
features = train.columns.values[2:202]
sns.set_style('whitegrid')
def train_dist(agg):
t0 = train.loc[train['target'] == 0]
t1 = train.loc[train['target'] == 1]
features = train.columns.values[2:202]
sns.set_style('whitegrid')
train_dist('std') | code |
18146508/cell_14 | [
"text_html_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
for i in range(102, 202):
slot += 1
# Distribution of target within each feature
# Phân bố target theo từng biến
def plot_feat_dist(df1, df2, label1, label2, feat):
i = 0
sns.set_style('whitegrid')
fig, ax = plt.subplots(10, 10, figsize=(30, 30))
for feat in feat:
i += 1
plt.subplot(10, 10, i)
sns.distplot(df1[feat], hist=False, label=label1)
sns.distplot(df2[feat], hist=False, label=label2)
plt.xlabel(feat)
plt.show()
train0 = train.loc[train.target == 0]
train1 = train.loc[train.target == 1]
feat = train.columns.values[2:102]
feat = train.columns.values[102:202]
plot_feat_dist(train0, train1, '0', '1', feat) | code |
18146508/cell_10 | [
"text_plain_output_1.png"
] | slot = 1
for i in range(2, 102):
slot += 1
slot = 1
plt.figure(figsize=(30, 30))
for i in range(102, 202):
plt.subplot(10, 10, slot)
train.iloc[:, i].hist()
slot += 1 | code |
18146508/cell_5 | [
"image_output_1.png"
] | train.head() | code |
2026028/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
survival_stacked_bar('Pclass') | code |
2026028/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
survival_stacked_bar('SibSp') | code |
2026028/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv') | code |
2026028/cell_56 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median
for i in test.columns:
print(i + ': ' + str(sum(test[i].isnull())) + ' missing values') | code |
2026028/cell_34 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
traintestdata = pd.concat([train, test])
traintestdata.shape | code |
2026028/cell_23 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
survival_stacked_bar('Embarked') | code |
2026028/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt='.2f', ax=ax) | code |
2026028/cell_44 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
for i in test.columns:
print(i + ': ' + str(sum(test[i].isnull())) + ' missing values') | code |
2026028/cell_55 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median
for i in train.columns:
print(i + ': ' + str(sum(train[i].isnull())) + ' missing values') | code |
2026028/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
gender_submission.head() | code |
2026028/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title') | code |
2026028/cell_41 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map) | code |
2026028/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set(style='whitegrid')
import warnings
warnings.filterwarnings('ignore')
from sklearn.linear_model import LinearRegression
import statsmodels.formula.api as sm
from sklearn.cross_validation import train_test_split | code |
2026028/cell_54 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median
train['Cabin'] = train['Cabin'].fillna('U')
test['Cabin'] = test['Cabin'].fillna('U')
train['Cabin'] = train['Cabin'].map(lambda x: x[0])
test['Cabin'] = test['Cabin'].map(lambda x: x[0]) | code |
2026028/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
train.info() | code |
2026028/cell_60 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median
test['Fare'] = test['Fare'].fillna(np.mean(test['Fare']))
for i in test.columns:
print(i + ': ' + str(sum(test[i].isnull())) + ' missing values') | code |
2026028/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
survival_stacked_bar('Sex') | code |
2026028/cell_50 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median
for i in train.columns:
print(i + ': ' + str(sum(train[i].isnull())) + ' missing values') | code |
2026028/cell_49 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median
def fill_age(dataset, dataset_med):
for x in range(len(dataset)):
if dataset['Pclass'][x] == 1:
if dataset['SibSp'][x] == 0:
return dataset_med.loc[1, 0]['Age']
elif dataset['SibSp'][x] == 1:
return dataset_med.loc[1, 1]['Age']
elif dataset['SibSp'][x] == 2:
return dataset_med.loc[1, 2]['Age']
elif dataset['SibSp'][x] == 3:
return dataset_med.loc[1, 3]['Age']
elif dataset['Pclass'][x] == 2:
if dataset['SibSp'][x] == 0:
return dataset_med.loc[2, 0]['Age']
elif dataset['SibSp'][x] == 1:
return dataset_med.loc[2, 1]['Age']
elif dataset['SibSp'][x] == 2:
return dataset_med.loc[2, 2]['Age']
elif dataset['SibSp'][x] == 3:
return dataset_med.loc[2, 3]['Age']
elif dataset['Pclass'][x] == 3:
if dataset['SibSp'][x] == 0:
return dataset_med.loc[3, 0]['Age']
elif dataset['SibSp'][x] == 1:
return dataset_med.loc[3, 1]['Age']
elif dataset['SibSp'][x] == 2:
return dataset_med.loc[3, 2]['Age']
elif dataset['SibSp'][x] == 3:
return dataset_med.loc[3, 3]['Age']
elif dataset['SibSp'][x] == 4:
return dataset_med.loc[3, 4]['Age']
elif dataset['SibSp'][x] == 5:
return dataset_med.loc[3, 5]['Age']
elif dataset['SibSp'][x] == 8:
return dataset_med.loc[3]['Age'].median()
train['Age'] = train['Age'].fillna(fill_age(train, train_set_1_median))
test['Age'] = test['Age'].fillna(fill_age(test, test_set_1_median)) | code |
2026028/cell_51 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median
for i in test.columns:
print(i + ': ' + str(sum(test[i].isnull())) + ' missing values') | code |
2026028/cell_58 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median
train['Embarked'] = train['Embarked'].fillna('S')
for i in train.columns:
print(i + ': ' + str(sum(train[i].isnull())) + ' missing values') | code |
2026028/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
train['Sex'].value_counts().plot(kind='bar') | code |
2026028/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
plt.axis('equal')
plt.show() | code |
2026028/cell_47 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
test_set_1 = test.groupby(['Pclass', 'SibSp'])
test_set_1_median = test_set_1.median()
test_set_1_median | code |
2026028/cell_43 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
for i in train.columns:
print(i + ': ' + str(sum(train[i].isnull())) + ' missing values') | code |
2026028/cell_46 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
train.insert(value=train.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=12, column='Title')
test.insert(value=test.Name.map(lambda name: name.split(',')[1].split('.')[0].strip()), loc=11, column='Title')
title_map = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
train['Title'] = train.Title.map(title_map)
test['Title'] = test.Title.map(title_map)
train_set_1 = train.groupby(['Pclass', 'SibSp'])
train_set_1_median = train_set_1.median()
train_set_1_median | code |
2026028/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
train['Age'].hist(width=6) | code |
2026028/cell_53 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
traintestdata = pd.concat([train, test])
traintestdata.shape
traintestdata.Cabin.unique() | code |
2026028/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape) | code |
2026028/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
survival_stacked_bar('Parch') | code |
2026028/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
test.info() | code |
2026028/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
train.tail() | code |
2026028/cell_36 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/train.csv')
test = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/test.csv')
gender_submission = pd.read_csv('C:/Users/Peng/Documents/APS/Data-Analysis-Blog/Kaggle/Titanic/gender_submission.csv')
(train.shape, test.shape)
labels = ('Cherbourg', 'Queenstown', 'Southampton')
sizes = [sum(train['Embarked'] == 'C'), sum(train['Embarked'] == 'Q'), sum(train['Embarked'] == 'S')]
colors = ['yellow', 'aqua', 'lime']
plt.axis('equal')
def survival_stacked_bar(variable):
Died = train[train['Survived'] == 0][variable].value_counts() / len(train['Survived'] == 0)
Survived = train[train['Survived'] == 1][variable].value_counts() / len(train['Survived'] == 1)
data = pd.DataFrame([Died, Survived])
data.index = ['Did not survived', 'Survived']
return
f,ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
sex_map = {'male': 1, 'female': 0}
train['Sex'] = train['Sex'].map(sex_map)
test['Sex'] = test['Sex'].map(sex_map)
survival_stacked_bar('Sex') | code |
18112986/cell_13 | [
"text_html_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
train.describe() | code |
18112986/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0, sort=False).reset_index(drop=True)
dataset = dataset.fillna(np.nan)
dataset.isnull().sum() | code |
18112986/cell_20 | [
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0, sort=False).reset_index(drop=True)
dataset = dataset.fillna(np.nan)
dataset.isnull().sum()
dataset['Fare'].isnull().sum() | code |
18112986/cell_11 | [
"text_plain_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.head() | code |
18112986/cell_19 | [
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
#correlation matrix between numerical values and Survived feature
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot = True, fmt = ".2f", cmap = "coolwarm")
#Explore SibSp feature vs Survived
g = sns.catplot(x = 'SibSp', y = 'Survived', data = train, kind = 'bar', height = 6, palette = 'muted')
#g.despine(left = True)
g.set_ylabels("Survival Probability")
#explore parch feature vs survived
g = sns.catplot(x = 'Parch', y = 'Survived', data = train, height = 6, kind = 'bar', palette = 'muted')
g = sns.FacetGrid(train, col='Survived')
g = g.map(sns.distplot, 'Age')
g = sns.kdeplot(train['Age'][(train['Survived'] == 0) & train['Age'].notnull()], color='r', shade=True)
g = sns.kdeplot(train['Age'][(train['Survived'] == 1) & train['Age'].notnull()], color='b', shade=True)
g.set_xlabel('Age')
g.set_ylabel('Frequency')
g.legend(['Not Survived', 'Survived']) | code |
18112986/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
sns.set(style='white', context='notebook', palette='deep')
import os
print(os.listdir('../input')) | code |
18112986/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
#correlation matrix between numerical values and Survived feature
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot = True, fmt = ".2f", cmap = "coolwarm")
#Explore SibSp feature vs Survived
g = sns.catplot(x = 'SibSp', y = 'Survived', data = train, kind = 'bar', height = 6, palette = 'muted')
#g.despine(left = True)
g.set_ylabels("Survival Probability")
#explore parch feature vs survived
g = sns.catplot(x = 'Parch', y = 'Survived', data = train, height = 6, kind = 'bar', palette = 'muted')
g = sns.FacetGrid(train, col='Survived')
g = g.map(sns.distplot, 'Age') | code |
18112986/cell_15 | [
"text_plain_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot=True, fmt='.2f', cmap='coolwarm') | code |
18112986/cell_16 | [
"text_html_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
#correlation matrix between numerical values and Survived feature
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot = True, fmt = ".2f", cmap = "coolwarm")
g = sns.catplot(x='SibSp', y='Survived', data=train, kind='bar', height=6, palette='muted')
g.set_ylabels('Survival Probability') | code |
18112986/cell_17 | [
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train.isnull().sum()
train.dtypes
#correlation matrix between numerical values and Survived feature
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot = True, fmt = ".2f", cmap = "coolwarm")
#Explore SibSp feature vs Survived
g = sns.catplot(x = 'SibSp', y = 'Survived', data = train, kind = 'bar', height = 6, palette = 'muted')
#g.despine(left = True)
g.set_ylabels("Survival Probability")
g = sns.catplot(x='Parch', y='Survived', data=train, height=6, kind='bar', palette='muted') | code |
18112986/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
IDtest = test['PassengerId']
def detect_outlier(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col].dropna(), 25)
Q3 = np.percentile(df[col].dropna(), 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outliers_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((k for k, v in outlier_indices.items() if v > n))
return multiple_outliers
outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
train.loc[outliers_to_drop]
train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True)
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0, sort=False).reset_index(drop=True)
dataset = dataset.fillna(np.nan)
dataset.isnull().sum()
train.isnull().sum()
train.dtypes
#correlation matrix between numerical values and Survived feature
g = sns.heatmap(train[['Survived', 'SibSp', 'Parch', 'Age', 'Fare']].corr(), annot = True, fmt = ".2f", cmap = "coolwarm")
#Explore SibSp feature vs Survived
g = sns.catplot(x = 'SibSp', y = 'Survived', data = train, kind = 'bar', height = 6, palette = 'muted')
#g.despine(left = True)
g.set_ylabels("Survival Probability")
#explore parch feature vs survived
g = sns.catplot(x = 'Parch', y = 'Survived', data = train, height = 6, kind = 'bar', palette = 'muted')
g = sns.FacetGrid(train, col='Survived')
g = g.map(sns.distplot, 'Age')
#explore age distribution
g = sns.kdeplot(train['Age'][(train['Survived']==0)&(train['Age'].notnull())], color = 'r', shade = True)
g = sns.kdeplot(train['Age'][(train['Survived']==1)&(train['Age'].notnull())], color = 'b', shade = True)
g.set_xlabel("Age")
g.set_ylabel("Frequency")
g.legend(["Not Survived", "Survived"])
g = sns.distplot(dataset['Fare'], color='m', label='skewness: %2f' % dataset['Fare'].skew())
g.legend(loc='best') | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.