path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
32065763/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1')
alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None)
alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'})
id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None)
id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'})
data.HS.value_counts()
data.Abusive.value_counts()
print('Toxic shape: ', data[(data['HS'] == 1) | (data['Abusive'] == 1)].shape)
print('Non-toxic shape: ', data[(data['HS'] == 0) & (data['Abusive'] == 0)].shape) | code |
32065763/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1')
alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None)
alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'})
id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None)
id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'})
print('Shape: ', alay_dict.shape)
alay_dict.head(15) | code |
2037113/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-surface-below-ring', data=dframe) | code |
2037113/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='odor', data=dframe) | code |
2037113/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='cap-shape', data=dframe) | code |
2037113/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='veil-color', data=dframe) | code |
2037113/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-color-below-ring', data=dframe) | code |
2037113/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-surface-above-ring', data=dframe) | code |
2037113/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
X.columns
X.info() | code |
2037113/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='cap-color', data=dframe) | code |
2037113/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-root', data=dframe) | code |
2037113/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2037113/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-shape', data=dframe) | code |
2037113/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='gill-spacing', data=dframe) | code |
2037113/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='gill-size', data=dframe) | code |
2037113/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dframe = pd.read_csv('../input/mushrooms.csv')
dframe.head() | code |
2037113/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='gill-color', data=dframe) | code |
2037113/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='veil-type', data=dframe) | code |
2037113/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='gill-attachment', data=dframe) | code |
2037113/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='stalk-color-above-ring', data=dframe) | code |
2037113/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='cap-surface', data=dframe) | code |
2037113/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
sns.countplot(x='bruises', data=dframe) | code |
2037113/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dframe = pd.read_csv('../input/mushrooms.csv')
y = dframe['class']
X = dframe.drop('class', axis=1)
X.columns | code |
2043287/cell_13 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(300, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model.evaluate(X_validate, y_validate, batch_size=32)
model2 = Sequential()
model2.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model2.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model2.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model2.add(Dense(50, activation='relu'))
model2.add(Dense(10, activation='softmax'))
model2.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model2.evaluate(X_validate, y_validate, batch_size=32)
model3 = Sequential()
model3.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model3.add(Dense(200, activation='sigmoid'))
model.add(Dropout(0.3))
model3.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.2))
model3.add(Dense(50, activation='sigmoid'))
model3.add(Dense(10, activation='softmax'))
model3.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model3.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model3.evaluate(X_validate, y_validate, batch_size=32)
print("Network's test score [loss, accuracy]: {0}".format(score)) | code |
2043287/cell_6 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(300, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model.evaluate(X_validate, y_validate, batch_size=32)
print("Network's test score [loss, accuracy]: {0}".format(score)) | code |
2043287/cell_1 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import RMSprop
from keras.utils.np_utils import to_categorical
from sklearn.cross_validation import train_test_split
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2043287/cell_16 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.utils.np_utils import to_categorical
from sklearn.cross_validation import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
training_data = pd.read_csv('../input/fashion-mnist_train.csv')
testing_data = pd.read_csv('../input/fashion-mnist_test.csv')
X = np.array(training_data.iloc[:, 1:])
y = to_categorical(np.array(training_data.iloc[:, 0]))
X_train, X_validate, y_train, y_validate = train_test_split(X, y)
model = Sequential()
model.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(300, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model.evaluate(X_validate, y_validate, batch_size=32)
model2 = Sequential()
model2.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model2.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model2.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model2.add(Dense(50, activation='relu'))
model2.add(Dense(10, activation='softmax'))
model2.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model2.evaluate(X_validate, y_validate, batch_size=32)
model3 = Sequential()
model3.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model3.add(Dense(200, activation='sigmoid'))
model.add(Dropout(0.3))
model3.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.2))
model3.add(Dense(50, activation='sigmoid'))
model3.add(Dense(10, activation='softmax'))
model3.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model3.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model3.evaluate(X_validate, y_validate, batch_size=32)
X_test = np.array(testing_data.iloc[:, 1:])
y_test = to_categorical(np.array(testing_data.iloc[:, 0]))
score = model.evaluate(X_test, y_test, batch_size=32)
print("Network one's test score [loss, accuracy]: {0}".format(score)) | code |
2043287/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.utils.np_utils import to_categorical
from sklearn.cross_validation import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
training_data = pd.read_csv('../input/fashion-mnist_train.csv')
testing_data = pd.read_csv('../input/fashion-mnist_test.csv')
X = np.array(training_data.iloc[:, 1:])
y = to_categorical(np.array(training_data.iloc[:, 0]))
X_train, X_validate, y_train, y_validate = train_test_split(X, y)
model = Sequential()
model.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(300, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model.evaluate(X_validate, y_validate, batch_size=32)
model2 = Sequential()
model2.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model2.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model2.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model2.add(Dense(50, activation='relu'))
model2.add(Dense(10, activation='softmax'))
model2.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model2.evaluate(X_validate, y_validate, batch_size=32)
model3 = Sequential()
model3.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model3.add(Dense(200, activation='sigmoid'))
model.add(Dropout(0.3))
model3.add(Dense(100, activation='sigmoid'))
model.add(Dropout(0.2))
model3.add(Dense(50, activation='sigmoid'))
model3.add(Dense(10, activation='softmax'))
model3.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model3.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model3.evaluate(X_validate, y_validate, batch_size=32)
X_test = np.array(testing_data.iloc[:, 1:])
y_test = to_categorical(np.array(testing_data.iloc[:, 0]))
score3 = model3.evaluate(X_test, y_test, batch_size=32)
print("Network three's test score [loss, accuracy]: {0}".format(score3)) | code |
2043287/cell_10 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(300, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model.evaluate(X_validate, y_validate, batch_size=32)
model2 = Sequential()
model2.add(Dense(400, input_dim=784, activation='relu'))
model.add(Dropout(0.4))
model2.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model2.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model2.add(Dense(50, activation='relu'))
model2.add(Dense(10, activation='softmax'))
model2.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
batch_size = 128
model2.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
score = model2.evaluate(X_validate, y_validate, batch_size=32)
print("Network's test score [loss, accuracy]: {0}".format(score)) | code |
90105356/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import shutil
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import OneHotEncoder
sns.set()
pd.set_option('display.max_columns', None)
pth_train = '../input/house-prices-advanced-regression-techniques/train.csv'
pth_test = '../input/house-prices-advanced-regression-techniques/test.csv'
raw_train = pd.read_csv(pth_train)
raw_test = pd.read_csv(pth_test)
categorical_nominal_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'Electrical', 'Functional', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition']
categorical_ordinal_cols = ['OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence']
categorical_bool_cols = ['CentralAir']
categorical_ordinal2encode = {}
categorical_ordinal2encode['ExterQual'] = {'Po': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['ExterCond'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['BsmtQual'] = {'NA': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}
categorical_ordinal2encode['BsmtCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['BsmtExposure'] = {'NA': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
categorical_ordinal2encode['BsmtFinType1'] = {'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
categorical_ordinal2encode['BsmtFinType2'] = categorical_ordinal2encode['BsmtFinType1'].copy()
categorical_ordinal2encode['HeatingQC'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['KitchenQual'] = categorical_ordinal2encode['HeatingQC'].copy()
categorical_ordinal2encode['FireplaceQu'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageFinish'] = {'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
categorical_ordinal2encode['GarageQual'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['PavedDrive'] = {'N': 0, 'P': 1, 'Y': 2}
categorical_ordinal2encode['PoolQC'] = {'NA': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['Fence'] = {'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
total_col = 4
total_row = len(categorical_ordinal_cols)//total_col
if len(categorical_ordinal_cols) % total_col > 0:
total_row += 1
idx = 0
fig, axs = plt.subplots(total_row, total_col, figsize=(15,total_row * 4))
for i in range(total_row):
for j in range(total_col):
if idx < len(categorical_ordinal_cols):
title = categorical_ordinal_cols[idx]
if title in categorical_ordinal2encode:
vc = raw_train[title].value_counts().reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='orange', ax = axs[i][j])
else:
vc = raw_train[title].value_counts().sort_index()
sns.barplot(x=vc.index, y=vc, color='orange', ax = axs[i][j])
axs[i][j].set_ylabel('frequency')
axs[i][j].set_xlabel('level')
axs[i][j].set_title(title)
idx += 1
plt.tight_layout()
plt.show()
ordinal_columns = ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageFinish', 'GarageQual', 'PavedDrive', 'PoolQC', 'Fence']
ordinal_column_transforms = {}
ordinal_column_fillna = {}
ordinal_column_transforms['OverallQual'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['OverallCond'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['ExterQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['ExterCond'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['BsmtQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtCond'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtExposure'] = [['NA', 'No', 'Mn', 'Av', 'Gd'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType1'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType2'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['HeatingQC'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 2, 2]]
ordinal_column_transforms['KitchenQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['FireplaceQu'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['GarageFinish'] = [['NA', 'Unf', 'RFn', 'Fin'], [0, 1, 2, 3]]
ordinal_column_transforms['GarageQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['PavedDrive'] = [['N', 'P', 'Y'], [0, 0, 1]]
ordinal_column_transforms['PoolQC'] = [['NA', 'Fa', 'TA', 'Gd', 'Ex'], [0, 1, 1, 2, 3]]
ordinal_column_transforms['Fence'] = [['NA', 'MnWw', 'GdWo', 'MnPrv', 'GdPrv'], [0, 0, 1, 1, 2]]
for title in ordinal_columns:
print('raw_train', raw_train[title].isna().sum())
print('raw_test', raw_test[title].isna().sum())
vc = raw_train[title].value_counts().sort_index()
raw_train[title].replace(ordinal_column_transforms[title][0], ordinal_column_transforms[title][1], inplace=True)
raw_test[title].replace(ordinal_column_transforms[title][0], ordinal_column_transforms[title][1], inplace=True)
raw_test[title].fillna(0, inplace=True)
raw_test[title].fillna(0, inplace=True)
vc_changed = raw_train[title].value_counts().sort_index()
total_row, total_vc = (raw_train.shape[0], vc.sum())
gap = total_row - total_vc
print('total_row :', total_row)
print('total value count :', total_vc)
print('total null value :', gap, '\n')
fig, axs = plt.subplots(1, 2, figsize=(15, 3))
if title in categorical_ordinal2encode:
vc = vc.reset_index()
vc.rename(columns={'index': 'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc, x='code', y=title, color='violet', ax=axs[0])
else:
sns.barplot(x=vc.index, y=vc, color='violet', ax=axs[0])
axs[0].set_title('BEFORE', fontsize=12)
axs[0].set_ylabel('frequency')
axs[0].set_xlabel('level')
sns.barplot(x=vc_changed.index, y=vc_changed, color='violet', ax=axs[1])
axs[1].set_title('AFTER', fontsize=12)
axs[1].set_ylabel('frequency')
axs[1].set_xlabel('level')
fig.suptitle(title + ' (BEFORE - AFTER)', fontsize=15)
plt.tight_layout()
plt.show() | code |
90105356/cell_9 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import shutil
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import OneHotEncoder
sns.set()
pd.set_option('display.max_columns', None)
pth_train = '../input/house-prices-advanced-regression-techniques/train.csv'
pth_test = '../input/house-prices-advanced-regression-techniques/test.csv'
raw_train = pd.read_csv(pth_train)
raw_test = pd.read_csv(pth_test)
categorical_nominal_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'Electrical', 'Functional', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition']
categorical_ordinal_cols = ['OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence']
categorical_bool_cols = ['CentralAir']
categorical_ordinal2encode = {}
categorical_ordinal2encode['ExterQual'] = {'Po': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['ExterCond'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['BsmtQual'] = {'NA': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}
categorical_ordinal2encode['BsmtCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['BsmtExposure'] = {'NA': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
categorical_ordinal2encode['BsmtFinType1'] = {'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
categorical_ordinal2encode['BsmtFinType2'] = categorical_ordinal2encode['BsmtFinType1'].copy()
categorical_ordinal2encode['HeatingQC'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['KitchenQual'] = categorical_ordinal2encode['HeatingQC'].copy()
categorical_ordinal2encode['FireplaceQu'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageFinish'] = {'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
categorical_ordinal2encode['GarageQual'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['PavedDrive'] = {'N': 0, 'P': 1, 'Y': 2}
categorical_ordinal2encode['PoolQC'] = {'NA': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['Fence'] = {'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
total_col = 4
total_row = len(categorical_ordinal_cols) // total_col
if len(categorical_ordinal_cols) % total_col > 0:
total_row += 1
idx = 0
fig, axs = plt.subplots(total_row, total_col, figsize=(15, total_row * 4))
for i in range(total_row):
for j in range(total_col):
if idx < len(categorical_ordinal_cols):
title = categorical_ordinal_cols[idx]
if title in categorical_ordinal2encode:
vc = raw_train[title].value_counts().reset_index()
vc.rename(columns={'index': 'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc, x='code', y=title, color='orange', ax=axs[i][j])
else:
vc = raw_train[title].value_counts().sort_index()
sns.barplot(x=vc.index, y=vc, color='orange', ax=axs[i][j])
axs[i][j].set_ylabel('frequency')
axs[i][j].set_xlabel('level')
axs[i][j].set_title(title)
idx += 1
plt.tight_layout()
plt.show() | code |
90105356/cell_19 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import shutil
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import OneHotEncoder
sns.set()
pd.set_option('display.max_columns', None)
pth_train = '../input/house-prices-advanced-regression-techniques/train.csv'
pth_test = '../input/house-prices-advanced-regression-techniques/test.csv'
raw_train = pd.read_csv(pth_train)
raw_test = pd.read_csv(pth_test)
categorical_nominal_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'Electrical', 'Functional', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition']
categorical_ordinal_cols = ['OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence']
categorical_bool_cols = ['CentralAir']
categorical_ordinal2encode = {}
categorical_ordinal2encode['ExterQual'] = {'Po': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['ExterCond'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['BsmtQual'] = {'NA': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}
categorical_ordinal2encode['BsmtCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['BsmtExposure'] = {'NA': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
categorical_ordinal2encode['BsmtFinType1'] = {'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
categorical_ordinal2encode['BsmtFinType2'] = categorical_ordinal2encode['BsmtFinType1'].copy()
categorical_ordinal2encode['HeatingQC'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['KitchenQual'] = categorical_ordinal2encode['HeatingQC'].copy()
categorical_ordinal2encode['FireplaceQu'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageFinish'] = {'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
categorical_ordinal2encode['GarageQual'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['PavedDrive'] = {'N': 0, 'P': 1, 'Y': 2}
categorical_ordinal2encode['PoolQC'] = {'NA': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['Fence'] = {'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
total_col = 4
total_row = len(categorical_ordinal_cols)//total_col
if len(categorical_ordinal_cols) % total_col > 0:
total_row += 1
idx = 0
fig, axs = plt.subplots(total_row, total_col, figsize=(15,total_row * 4))
for i in range(total_row):
for j in range(total_col):
if idx < len(categorical_ordinal_cols):
title = categorical_ordinal_cols[idx]
if title in categorical_ordinal2encode:
vc = raw_train[title].value_counts().reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='orange', ax = axs[i][j])
else:
vc = raw_train[title].value_counts().sort_index()
sns.barplot(x=vc.index, y=vc, color='orange', ax = axs[i][j])
axs[i][j].set_ylabel('frequency')
axs[i][j].set_xlabel('level')
axs[i][j].set_title(title)
idx += 1
plt.tight_layout()
plt.show()
ordinal_columns = ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageFinish', 'GarageQual', 'PavedDrive', 'PoolQC', 'Fence']
ordinal_column_transforms = {}
ordinal_column_fillna = {}
ordinal_column_transforms['OverallQual'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['OverallCond'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['ExterQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['ExterCond'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['BsmtQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtCond'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtExposure'] = [['NA', 'No', 'Mn', 'Av', 'Gd'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType1'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType2'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['HeatingQC'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 2, 2]]
ordinal_column_transforms['KitchenQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['FireplaceQu'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['GarageFinish'] = [['NA', 'Unf', 'RFn', 'Fin'], [0, 1, 2, 3]]
ordinal_column_transforms['GarageQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['PavedDrive'] = [['N', 'P', 'Y'], [0, 0, 1]]
ordinal_column_transforms['PoolQC'] = [['NA', 'Fa', 'TA', 'Gd', 'Ex'], [0, 1, 1, 2, 3]]
ordinal_column_transforms['Fence'] = [['NA', 'MnWw', 'GdWo', 'MnPrv', 'GdPrv'], [0, 0, 1, 1, 2]]
for title in ordinal_columns:
print('raw_train',raw_train[title].isna().sum())
print('raw_test',raw_test[title].isna().sum())
vc = raw_train[title].value_counts().sort_index()
raw_train[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].fillna(0, inplace=True)
raw_test[title].fillna(0, inplace=True)
vc_changed = raw_train[title].value_counts().sort_index()
total_row, total_vc = raw_train.shape[0], vc.sum()
gap = total_row - total_vc
print('total_row :',total_row)
print('total value count :',total_vc)
print('total null value :',gap,'\n')
fig, axs = plt.subplots(1,2,figsize=(15,3))
if title in categorical_ordinal2encode:
vc = vc.reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='violet', ax = axs[0])
else:
sns.barplot(x=vc.index, y=vc, color='violet', ax=axs[0])
axs[0].set_title('BEFORE',fontsize=12)
axs[0].set_ylabel('frequency')
axs[0].set_xlabel('level')
sns.barplot(x=vc_changed.index, y=vc_changed, color='violet', ax=axs[1])
axs[1].set_title('AFTER',fontsize=12)
axs[1].set_ylabel('frequency')
axs[1].set_xlabel('level')
fig.suptitle(title+' (BEFORE - AFTER)',fontsize=15)
# plt.title(title+' (BEFORE - AFTER)', fontsize=15)
plt.tight_layout()
plt.show()
raw_test['SalePrice'] = np.zeros(raw_test.shape[0], dtype=np.int64)
raw_all = pd.concat((raw_train, raw_test), axis=0)
check_null_cols = raw_all.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all.shape[0] * 100
check_null_cols_ver3 = check_null_cols_ver2[check_null_cols_ver2 > 50]
raw_all_ver2 = raw_all.drop(columns=check_null_cols_ver3.index)
check_null_cols = raw_all_ver2.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all_ver2.shape[0] * 100
round(check_null_cols_ver2, 2)
raw_all_ver3 = raw_all_ver2.copy()
for column in check_null_cols_ver2.index:
if column in categorical_ordinal_cols:
vc = raw_all_ver3[column].value_counts()
raw_all_ver3[column].fillna(vc.idxmax(), inplace=True)
elif column in categorical_nominal_cols:
raw_all_ver3[column].fillna('unknown', inplace=True)
else:
mean = raw_all_ver3[column].mean()
raw_all_ver3[column].fillna(mean, inplace=True)
print('Check empty cell')
check_null_cols = raw_all_ver3.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all_ver3.shape[0] * 100
round(check_null_cols_ver2, 2) | code |
90105356/cell_15 | [
"image_output_11.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"image_output_17.png",
"text_plain_output_9.png",
"image_output_14.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_8.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_15.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import shutil
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import OneHotEncoder
sns.set()
pd.set_option('display.max_columns', None)
pth_train = '../input/house-prices-advanced-regression-techniques/train.csv'
pth_test = '../input/house-prices-advanced-regression-techniques/test.csv'
raw_train = pd.read_csv(pth_train)
raw_test = pd.read_csv(pth_test)
categorical_nominal_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'Electrical', 'Functional', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition']
categorical_ordinal_cols = ['OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence']
categorical_bool_cols = ['CentralAir']
categorical_ordinal2encode = {}
categorical_ordinal2encode['ExterQual'] = {'Po': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['ExterCond'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['BsmtQual'] = {'NA': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}
categorical_ordinal2encode['BsmtCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['BsmtExposure'] = {'NA': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
categorical_ordinal2encode['BsmtFinType1'] = {'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
categorical_ordinal2encode['BsmtFinType2'] = categorical_ordinal2encode['BsmtFinType1'].copy()
categorical_ordinal2encode['HeatingQC'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['KitchenQual'] = categorical_ordinal2encode['HeatingQC'].copy()
categorical_ordinal2encode['FireplaceQu'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageFinish'] = {'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
categorical_ordinal2encode['GarageQual'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['PavedDrive'] = {'N': 0, 'P': 1, 'Y': 2}
categorical_ordinal2encode['PoolQC'] = {'NA': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['Fence'] = {'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
total_col = 4
total_row = len(categorical_ordinal_cols)//total_col
if len(categorical_ordinal_cols) % total_col > 0:
total_row += 1
idx = 0
fig, axs = plt.subplots(total_row, total_col, figsize=(15,total_row * 4))
for i in range(total_row):
for j in range(total_col):
if idx < len(categorical_ordinal_cols):
title = categorical_ordinal_cols[idx]
if title in categorical_ordinal2encode:
vc = raw_train[title].value_counts().reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='orange', ax = axs[i][j])
else:
vc = raw_train[title].value_counts().sort_index()
sns.barplot(x=vc.index, y=vc, color='orange', ax = axs[i][j])
axs[i][j].set_ylabel('frequency')
axs[i][j].set_xlabel('level')
axs[i][j].set_title(title)
idx += 1
plt.tight_layout()
plt.show()
ordinal_columns = ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageFinish', 'GarageQual', 'PavedDrive', 'PoolQC', 'Fence']
ordinal_column_transforms = {}
ordinal_column_fillna = {}
ordinal_column_transforms['OverallQual'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['OverallCond'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['ExterQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['ExterCond'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['BsmtQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtCond'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtExposure'] = [['NA', 'No', 'Mn', 'Av', 'Gd'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType1'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType2'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['HeatingQC'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 2, 2]]
ordinal_column_transforms['KitchenQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['FireplaceQu'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['GarageFinish'] = [['NA', 'Unf', 'RFn', 'Fin'], [0, 1, 2, 3]]
ordinal_column_transforms['GarageQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['PavedDrive'] = [['N', 'P', 'Y'], [0, 0, 1]]
ordinal_column_transforms['PoolQC'] = [['NA', 'Fa', 'TA', 'Gd', 'Ex'], [0, 1, 1, 2, 3]]
ordinal_column_transforms['Fence'] = [['NA', 'MnWw', 'GdWo', 'MnPrv', 'GdPrv'], [0, 0, 1, 1, 2]]
for title in ordinal_columns:
print('raw_train',raw_train[title].isna().sum())
print('raw_test',raw_test[title].isna().sum())
vc = raw_train[title].value_counts().sort_index()
raw_train[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].fillna(0, inplace=True)
raw_test[title].fillna(0, inplace=True)
vc_changed = raw_train[title].value_counts().sort_index()
total_row, total_vc = raw_train.shape[0], vc.sum()
gap = total_row - total_vc
print('total_row :',total_row)
print('total value count :',total_vc)
print('total null value :',gap,'\n')
fig, axs = plt.subplots(1,2,figsize=(15,3))
if title in categorical_ordinal2encode:
vc = vc.reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='violet', ax = axs[0])
else:
sns.barplot(x=vc.index, y=vc, color='violet', ax=axs[0])
axs[0].set_title('BEFORE',fontsize=12)
axs[0].set_ylabel('frequency')
axs[0].set_xlabel('level')
sns.barplot(x=vc_changed.index, y=vc_changed, color='violet', ax=axs[1])
axs[1].set_title('AFTER',fontsize=12)
axs[1].set_ylabel('frequency')
axs[1].set_xlabel('level')
fig.suptitle(title+' (BEFORE - AFTER)',fontsize=15)
# plt.title(title+' (BEFORE - AFTER)', fontsize=15)
plt.tight_layout()
plt.show()
raw_test['SalePrice'] = np.zeros(raw_test.shape[0], dtype=np.int64)
raw_all = pd.concat((raw_train, raw_test), axis=0)
check_null_cols = raw_all.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all.shape[0] * 100
check_null_cols_ver3 = check_null_cols_ver2[check_null_cols_ver2 > 50]
raw_all_ver2 = raw_all.drop(columns=check_null_cols_ver3.index)
print('Remove feature unnecessary')
print('Before', raw_all.shape)
print('After', raw_all_ver2.shape) | code |
90105356/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import shutil
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from scipy.stats import skew
from sklearn.preprocessing import OneHotEncoder
sns.set()
pd.set_option('display.max_columns', None)
pth_train = '../input/house-prices-advanced-regression-techniques/train.csv'
pth_test = '../input/house-prices-advanced-regression-techniques/test.csv'
raw_train = pd.read_csv(pth_train)
raw_test = pd.read_csv(pth_test)
categorical_nominal_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'Heating', 'Electrical', 'Functional', 'GarageType', 'MiscFeature', 'SaleType', 'SaleCondition']
categorical_ordinal_cols = ['OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence']
categorical_bool_cols = ['CentralAir']
categorical_ordinal2encode = {}
categorical_ordinal2encode['ExterQual'] = {'Po': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['ExterCond'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['BsmtQual'] = {'NA': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}
categorical_ordinal2encode['BsmtCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['BsmtExposure'] = {'NA': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
categorical_ordinal2encode['BsmtFinType1'] = {'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
categorical_ordinal2encode['BsmtFinType2'] = categorical_ordinal2encode['BsmtFinType1'].copy()
categorical_ordinal2encode['HeatingQC'] = categorical_ordinal2encode['ExterQual'].copy()
categorical_ordinal2encode['KitchenQual'] = categorical_ordinal2encode['HeatingQC'].copy()
categorical_ordinal2encode['FireplaceQu'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageFinish'] = {'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
categorical_ordinal2encode['GarageQual'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['GarageCond'] = categorical_ordinal2encode['BsmtQual'].copy()
categorical_ordinal2encode['PavedDrive'] = {'N': 0, 'P': 1, 'Y': 2}
categorical_ordinal2encode['PoolQC'] = {'NA': 0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4}
categorical_ordinal2encode['Fence'] = {'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
total_col = 4
total_row = len(categorical_ordinal_cols)//total_col
if len(categorical_ordinal_cols) % total_col > 0:
total_row += 1
idx = 0
fig, axs = plt.subplots(total_row, total_col, figsize=(15,total_row * 4))
for i in range(total_row):
for j in range(total_col):
if idx < len(categorical_ordinal_cols):
title = categorical_ordinal_cols[idx]
if title in categorical_ordinal2encode:
vc = raw_train[title].value_counts().reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='orange', ax = axs[i][j])
else:
vc = raw_train[title].value_counts().sort_index()
sns.barplot(x=vc.index, y=vc, color='orange', ax = axs[i][j])
axs[i][j].set_ylabel('frequency')
axs[i][j].set_xlabel('level')
axs[i][j].set_title(title)
idx += 1
plt.tight_layout()
plt.show()
ordinal_columns = ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual', 'FireplaceQu', 'GarageFinish', 'GarageQual', 'PavedDrive', 'PoolQC', 'Fence']
ordinal_column_transforms = {}
ordinal_column_fillna = {}
ordinal_column_transforms['OverallQual'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['OverallCond'] = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 1, 2, 3, 4, 4, 4]]
ordinal_column_transforms['ExterQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['ExterCond'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 1]]
ordinal_column_transforms['BsmtQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtCond'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtExposure'] = [['NA', 'No', 'Mn', 'Av', 'Gd'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType1'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['BsmtFinType2'] = [['NA', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'], [0, 0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['HeatingQC'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 0, 1, 2, 2]]
ordinal_column_transforms['KitchenQual'] = [['Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 2, 2]]
ordinal_column_transforms['FireplaceQu'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['GarageFinish'] = [['NA', 'Unf', 'RFn', 'Fin'], [0, 1, 2, 3]]
ordinal_column_transforms['GarageQual'] = [['NA', 'Po', 'Fa', 'TA', 'Gd', 'Ex'], [0, 0, 1, 1, 2, 2]]
ordinal_column_transforms['PavedDrive'] = [['N', 'P', 'Y'], [0, 0, 1]]
ordinal_column_transforms['PoolQC'] = [['NA', 'Fa', 'TA', 'Gd', 'Ex'], [0, 1, 1, 2, 3]]
ordinal_column_transforms['Fence'] = [['NA', 'MnWw', 'GdWo', 'MnPrv', 'GdPrv'], [0, 0, 1, 1, 2]]
for title in ordinal_columns:
print('raw_train',raw_train[title].isna().sum())
print('raw_test',raw_test[title].isna().sum())
vc = raw_train[title].value_counts().sort_index()
raw_train[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].replace(ordinal_column_transforms[title][0],ordinal_column_transforms[title][1], inplace=True)
raw_test[title].fillna(0, inplace=True)
raw_test[title].fillna(0, inplace=True)
vc_changed = raw_train[title].value_counts().sort_index()
total_row, total_vc = raw_train.shape[0], vc.sum()
gap = total_row - total_vc
print('total_row :',total_row)
print('total value count :',total_vc)
print('total null value :',gap,'\n')
fig, axs = plt.subplots(1,2,figsize=(15,3))
if title in categorical_ordinal2encode:
vc = vc.reset_index()
vc.rename(columns={'index':'code'}, inplace=True)
vc['index'] = vc['code'].copy()
vc['index'] = vc['index'].map(categorical_ordinal2encode[title])
vc.set_index('index', inplace=True)
vc = vc.sort_index()
sns.barplot(data=vc ,x='code', y=title, color='violet', ax = axs[0])
else:
sns.barplot(x=vc.index, y=vc, color='violet', ax=axs[0])
axs[0].set_title('BEFORE',fontsize=12)
axs[0].set_ylabel('frequency')
axs[0].set_xlabel('level')
sns.barplot(x=vc_changed.index, y=vc_changed, color='violet', ax=axs[1])
axs[1].set_title('AFTER',fontsize=12)
axs[1].set_ylabel('frequency')
axs[1].set_xlabel('level')
fig.suptitle(title+' (BEFORE - AFTER)',fontsize=15)
# plt.title(title+' (BEFORE - AFTER)', fontsize=15)
plt.tight_layout()
plt.show()
raw_test['SalePrice'] = np.zeros(raw_test.shape[0], dtype=np.int64)
raw_all = pd.concat((raw_train, raw_test), axis=0)
check_null_cols = raw_all.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all.shape[0] * 100
check_null_cols_ver3 = check_null_cols_ver2[check_null_cols_ver2 > 50]
raw_all_ver2 = raw_all.drop(columns=check_null_cols_ver3.index)
print('Check empty cell')
check_null_cols = raw_all_ver2.isna().sum()
check_null_cols_ver2 = check_null_cols[check_null_cols > 0] / raw_all_ver2.shape[0] * 100
round(check_null_cols_ver2, 2) | code |
128016851/cell_25 | [
"image_output_1.png"
] | from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dt_regressor = DecisionTreeRegressor(random_state=1)
dt_regressor.fit(X_train, y_train)
dt_regressor_perf_test = model_performance_regression(dt_regressor, X_val, y_val)
dt_regressor_perf_test | code |
128016851/cell_30 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor,BaggingRegressor
from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dtree_tuned = DecisionTreeRegressor(random_state=1)
parameters = {'max_depth': np.arange(2, 9), 'criterion': ['squared_error', 'friedman_mse'], 'min_samples_leaf': [1, 3, 5, 7], 'max_leaf_nodes': [2, 5, 7] + [None]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(dtree_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
dtree_tuned_regressor = grid_obj.best_estimator_
dtree_tuned_regressor.fit(X_train, y_train)
features = list(X_train.columns)
importances = dtree_tuned_regressor.feature_importances_
indices = np.argsort(importances)
plt.barh(range(len(indices)), importances[indices], color='violet', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
bagging_estimator = BaggingRegressor(random_state=1)
bagging_estimator.fit(X_train, y_train)
bagging_estimator_perf_test = model_performance_regression(bagging_estimator, X_val, y_val)
bagging_estimator_perf_test | code |
128016851/cell_20 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
print(X_train.shape, X_val.shape, y_train.shape, y_val.shape) | code |
128016851/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
summary(train_df) | code |
128016851/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dtree_tuned = DecisionTreeRegressor(random_state=1)
parameters = {'max_depth': np.arange(2, 9), 'criterion': ['squared_error', 'friedman_mse'], 'min_samples_leaf': [1, 3, 5, 7], 'max_leaf_nodes': [2, 5, 7] + [None]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(dtree_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
dtree_tuned_regressor = grid_obj.best_estimator_
dtree_tuned_regressor.fit(X_train, y_train)
print('Best parameters are {} with CV score={}:'.format(grid_obj.best_params_, grid_obj.best_score_)) | code |
128016851/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
128016851/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T | code |
128016851/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dtree_tuned = DecisionTreeRegressor(random_state=1)
parameters = {'max_depth': np.arange(2, 9), 'criterion': ['squared_error', 'friedman_mse'], 'min_samples_leaf': [1, 3, 5, 7], 'max_leaf_nodes': [2, 5, 7] + [None]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(dtree_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
dtree_tuned_regressor = grid_obj.best_estimator_
dtree_tuned_regressor.fit(X_train, y_train)
features = list(X_train.columns)
importances = dtree_tuned_regressor.feature_importances_
indices = np.argsort(importances)
plt.figure(figsize=(5, 5))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='violet', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show() | code |
128016851/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
summary(test_df) | code |
128016851/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize=(13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i // 3, i % 3]
sns.histplot(x=col, data=train_df, kde=True, ax=ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) | code |
128016851/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.figure(figsize=(15, 8))
sns.boxplot(x='variable', y='value', data=pd.melt(train_df.drop(['yield'], axis=1))).set_title('Boxplot of each feature', size=15)
plt.xticks(rotation=90)
plt.show() | code |
128016851/cell_31 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor,BaggingRegressor
from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dtree_tuned = DecisionTreeRegressor(random_state=1)
parameters = {'max_depth': np.arange(2, 9), 'criterion': ['squared_error', 'friedman_mse'], 'min_samples_leaf': [1, 3, 5, 7], 'max_leaf_nodes': [2, 5, 7] + [None]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(dtree_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
dtree_tuned_regressor = grid_obj.best_estimator_
dtree_tuned_regressor.fit(X_train, y_train)
features = list(X_train.columns)
importances = dtree_tuned_regressor.feature_importances_
indices = np.argsort(importances)
plt.barh(range(len(indices)), importances[indices], color='violet', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
bagging_tuned = BaggingRegressor(random_state=1)
parameters = {'n_estimators': [10, 15, 20, 50, 100, 200], 'max_samples': [0.8, 1], 'max_features': [0.8, 1]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(bagging_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
bagging_tuned_regressor = grid_obj.best_estimator_
bagging_tuned_regressor.fit(X_train, y_train)
print('Best parameters are {} with CV score={}:'.format(grid_obj.best_params_, grid_obj.best_score_)) | code |
128016851/cell_14 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize=(13, 13))
sns.heatmap(corr, mask=mask, annot=True, fmt='.3f') | code |
128016851/cell_27 | [
"text_html_output_1.png"
] | from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
train_df.describe().T
# explore correlation of features
corr = train_df.corr()
mask = np.triu(corr)
ax, fig = plt.subplots(figsize = (13,13))
sns.heatmap(corr, mask = mask, annot = True, fmt=".3f")
fig, axes = plt.subplots(6, 3, figsize = (13, 13))
fig.suptitle('Histogram for all numerical variables in the dataset')
for i, col in enumerate(train_df.columns):
ax = axes[i//3, i%3]
sns.histplot(x = col, data = train_df, kde = True, ax = ax)
ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
plt.xticks(rotation=90)
X = train_df.drop(['yield'], axis=1)
y = train_df['yield']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14)
def adj_r2_score(predictors, targets, predictions):
r2 = r2_score(targets, predictions)
n = predictors.shape[0]
k = predictors.shape[1]
return 1 - (1 - r2) * (n - 1) / (n - k - 1)
def mape_score(targets, predictions):
return np.mean(np.abs(targets - predictions) / targets) * 100
def model_performance_regression(model, predictors, target):
"""
Function to compute different metrics to check regression model performance
model: regressor
predictors: independent variables
target: dependent variable
"""
pred = model.predict(predictors)
r2 = r2_score(target, pred)
adjr2 = adj_r2_score(predictors, target, pred)
rmse = np.sqrt(mean_squared_error(target, pred))
mae = mean_absolute_error(target, pred)
mape = mape_score(target, pred)
df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0])
return df_perf
dtree_tuned = DecisionTreeRegressor(random_state=1)
parameters = {'max_depth': np.arange(2, 9), 'criterion': ['squared_error', 'friedman_mse'], 'min_samples_leaf': [1, 3, 5, 7], 'max_leaf_nodes': [2, 5, 7] + [None]}
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
grid_obj = GridSearchCV(dtree_tuned, parameters, scoring=scorer, cv=5)
grid_obj = grid_obj.fit(X_train, y_train)
dtree_tuned_regressor = grid_obj.best_estimator_
dtree_tuned_regressor.fit(X_train, y_train)
dtree_tuned_regressor_perf_test = model_performance_regression(dtree_tuned_regressor, X_val, y_val)
dtree_tuned_regressor_perf_test | code |
128016851/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
def summary(df):
result = pd.DataFrame(df.dtypes, columns=['data type'])
result['#duplicate'] = df.duplicated().sum()
result['#missing'] = df.isnull().sum().values
result['#unique'] = df.nunique().values
return result
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
train_df.drop(['id'], axis=1, inplace=True)
test_df.drop(['id'], axis=1, inplace=True)
test_df.describe().T | code |
74050861/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data['Embarked'].value_counts() | code |
74050861/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any() | code |
74050861/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
test_data.columns | code |
74050861/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False)
train_data.groupby('Survived').mean()
train_data.dtypes | code |
74050861/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape | code |
74050861/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False)
train_data.groupby('Survived').mean()
train_data.dtypes
x = train_data.drop(['Name', 'Cabin', 'Ticket', 'Survived'], axis=1)
y = train_data[['Survived']]
x = pd.get_dummies(x)
rm_model = RandomForestClassifier(n_estimators=10)
rm_model.fit(x, y)
y_pred = rm_model.predict(x)
print(classification_report(y, y_pred)) | code |
74050861/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'].value_counts() | code |
74050861/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False)
train_data.groupby('Survived').mean() | code |
74050861/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74050861/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.info() | code |
74050861/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False) | code |
74050861/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
test_data.columns
test_data = test_data.drop(['Cabin', 'Name', 'Ticket'], axis=1)
test_data.isnull().sum() | code |
74050861/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.describe() | code |
74050861/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns | code |
74050861/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
plt.plot(figsize=(20, 16))
sns.heatmap(train_data.corr(), annot=True) | code |
74050861/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
sns.countplot(x='Survived', hue='Sex', data=train_data) | code |
74050861/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False)
train_data.groupby('Survived').mean()
train_data.dtypes
x = train_data.drop(['Name', 'Cabin', 'Ticket', 'Survived'], axis=1)
y = train_data[['Survived']]
x.head() | code |
74050861/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100) | code |
74050861/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.shape
train_data.isnull().any()
train_data.apply(lambda x: x.isnull().sum() / len(x) * 100)
train_data['Cabin'] = train_data.Cabin.fillna(train_data['Cabin'].mode()[1])
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
train_data.columns
train_data.groupby('Cabin')['Survived'].sum().sort_values(ascending=False)
train_data.groupby('Survived').mean()
train_data.dtypes
x = train_data.drop(['Name', 'Cabin', 'Ticket', 'Survived'], axis=1)
y = train_data[['Survived']]
x = pd.get_dummies(x)
rm_model = RandomForestClassifier(n_estimators=10)
rm_model.fit(x, y) | code |
74050861/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
test_data = pd.read_csv('../input/titanic/test.csv')
train_data.head(10) | code |
73067436/cell_20 | [
"text_plain_output_1.png"
] | from circlify import circlify, Circle
from warnings import filterwarnings
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.gridspec as gridspec
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
plt.rcParams['font.family'] = 'monospace'
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
cmap0 = ['#68595b', '#7098af', '#6f636c', '#907c7b']
cmap1 = ['#484146', '#8da0b3', '#796d72', '#9fa9ba']
cmap2 = ['#545457', '#a79698', '#5284a2', '#bbbcc4']
bg_color = '#fbfbfb'
txt_color = '#5c5c5c'
# check for missing values
fig, ax = plt.subplots(tight_layout=True, figsize=(12,6))
fig.patch.set_facecolor(bg_color)
ax.set_facecolor(bg_color)
mv = df.isna()
ax = sns.heatmap(data=mv, cmap=sns.color_palette(cmap0), cbar=False, ax=ax, )
ax.set_ylabel('')
ax.set_yticks([])
ax.set_xticklabels(labels=mv.columns,rotation=45)
ax.tick_params(length=0)
fig.text(
s=':Missing Values',
x=0, y=1.1,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
we can't see any ...
''',
x=0, y=1.075,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
plt.show()
def despine_ax(ax, spines=['top', 'left', 'right', 'bottom']):
for spine in spines:
ax.spines[spine].set_visible(False)
def get_line(x=[0, 0], y=[0, 0], alpha=0.5, lw=1):
return lines.Line2D(xdata=x, ydata=y, lw=lw, alpha=alpha, color='#aeaeae', transform=fig.transFigure, figure=fig)
fig, (ax0, ax1) = plt.subplots(2, 1, tight_layout=True, sharex=True, figsize=(12,6))
fig.patch.set_facecolor(bg_color)
mean = df['posttest'].mean()
median = df['posttest'].median()
ax0.boxplot(
data=df, x='posttest',
vert=False, patch_artist=True,
boxprops=dict(facecolor=cmap0[1], lw=0, alpha=0.75),
whiskerprops=dict(color='gray', lw=1, ls='--'),
capprops=dict(color='gray', lw=1, ls='--'),
medianprops=dict(color='#fff', lw=0),
flierprops=dict(markerfacecolor=cmap0[0],alpha=0.75),
zorder=0
)
ax1 = sns.kdeplot(
data=df, x='posttest', shade=True,
color=cmap0[0], edgecolor='#000', lw=1,
zorder=0, alpha=0.8, ax=ax1
)
ax0.axvline(x=mean, ymin=0.4, ymax=0.6, color=bg_color, ls=':', zorder=1, label='mean')
ax1.axvline(x=mean, ymin=0, ymax=0.9, color=bg_color, ls=':', zorder=1)
ax0.axvline(x=median, ymin=0.4, ymax=0.6, color=bg_color, ls='--', zorder=1)
ax1.axvline(x=median, ymin=0, ymax=0.9, color=bg_color, ls='--', zorder=1)
ax0.axis('off')
ax0.set_facecolor(bg_color)
ax1.set_ylabel('')
ax1.set_xlabel('')
ax1.set_yticks([])
ax1.tick_params(length=0)
ax1.set_facecolor(bg_color)
despine_ax(ax1, ['top','left','right'])
fig.text(
s=':Posttest - Distribution',
x=0, y=1.05,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
in the plot below we can see signs
of a binominal distribution, with
one peak at around 57-62 and the other
at approx. 72-79 points.
''',
x=0, y=1.02,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
fig.text(
s=f"Mean: {np.round(mean,1)}\nMedian: {np.round(median,1)}",
x=0.56, y=0.925,
fontsize=9, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
l1 = get_line(x=[0.55,0.55], y=[0.85,0.95])
fig.lines.extend([l1])
plt.show()
fig, ax = plt.subplots(tight_layout=True, figsize=(12,2.5))
fig.patch.set_facecolor(bg_color)
uniq_scores = df['posttest'].nunique()
ax.barh(
y=1, width=uniq_scores,
color=cmap0[1], alpha=0.75,lw=1, edgecolor='white'
)
ax.barh(
y=1, width=100-uniq_scores, left=uniq_scores,
color=cmap1[1], alpha=0.25, lw=1, edgecolor='white'
)
ax.axis('off')
ax.annotate(
s=f"{uniq_scores}",
xy=(35,1.05),
va='center', ha='center',
fontsize=36, fontweight='bold', fontfamily='serif',
color='#fff'
)
ax.annotate(
s='unqiue scores',
xy=(35,0.85),
va='center', ha='center',
fontsize=16, fontstyle='italic', fontfamily='serif',
color='#fff'
)
fig.text(
s=':Unique Number of Scores',
x=0, y=1.25,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
68 unique scores have been scored
from a total of 100 possible outcomes.
''',
x=0, y=1.2,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
l1 = get_line(x=[0.645,0.645], y=[0,1], lw=3, alpha=1)
fig.lines.extend([l1])
plt.show()
from circlify import circlify, Circle
schools_by_num_students = df.groupby('school').count()[['posttest']].reset_index().sort_values(by='posttest', ascending=False).rename(columns={'posttest': 'count'})
schools_by_num_students['ratio'] = df['school'].value_counts().values / len(df['school'])
schools_by_num_students = schools_by_num_students[:10]
fig, ax = plt.subplots(tight_layout=True, figsize=(8, 8))
fig.patch.set_facecolor(bg_color)
ax.patch.set_facecolor(bg_color)
circles = circlify(data=schools_by_num_students['count'].tolist(), show_enclosure=False, target_enclosure=Circle(x=0, y=0, r=1))
lim = max((max(abs(circle.x) + circle.r, abs(circle.y) + circle.r) for circle in circles))
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
labels = schools_by_num_students['school'][::-1]
counts = schools_by_num_students['count'][::-1]
ratios = schools_by_num_students['ratio'][::-1]
for circle, label, count, ratio in zip(circles, labels, counts, ratios):
x, y, r = circle
ax.add_patch(plt.Circle((x, y), r, lw=1, fill=True, alpha=1 * (ratio * 10), facecolor=cmap0[1]))
ax.annotate(s=f'{label}', xy=(x, y), fontweight='bold', va='center', ha='center', color='#fff')
ax.annotate(s=f'#{count} ({int(ratio * 100)}%)', xy=(x, y - 0.04), fontstyle='italic', fontsize=9, va='center', ha='center', color='#fff')
ax.axis('off')
fig.text(s=':TOP 10 - Schools', x=0, y=1, fontsize=17, fontweight='bold', color=txt_color, va='top', ha='left')
fig.text(s='\n by number of students\n ', x=0, y=0.985, fontsize=11, fontstyle='italic', color=txt_color, va='top', ha='left')
fig.text(s=f"{df['school'].nunique()}", x=1.04, y=0.8, fontsize=52, fontfamily='serif', color=txt_color, va='top', ha='left')
fig.text(s='\n unique \n schools', x=1.13, y=0.82, fontsize=11, fontfamily='serif', color=txt_color, va='top', ha='left')
fig.text(s='\n The students are nearly \n equally distributed\n among the total of 23 \n different schools\n ', x=1, y=0.65, fontsize=11, fontstyle='italic', color=txt_color, va='top', ha='left')
l1 = get_line(x=[1, 1], y=[0.45, 0.8])
fig.lines.extend([l1])
plt.show() | code |
73067436/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
df.info() | code |
73067436/cell_19 | [
"image_output_1.png"
] | !pip install circlify | code |
73067436/cell_8 | [
"image_output_1.png"
] | import seaborn as sns
cmap0 = ['#68595b', '#7098af', '#6f636c', '#907c7b']
cmap1 = ['#484146', '#8da0b3', '#796d72', '#9fa9ba']
cmap2 = ['#545457', '#a79698', '#5284a2', '#bbbcc4']
bg_color = '#fbfbfb'
txt_color = '#5c5c5c'
sns.palplot(cmap0)
sns.palplot(cmap1)
sns.palplot(cmap2) | code |
73067436/cell_15 | [
"image_output_1.png"
] | from warnings import filterwarnings
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.gridspec as gridspec
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
plt.rcParams['font.family'] = 'monospace'
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
cmap0 = ['#68595b', '#7098af', '#6f636c', '#907c7b']
cmap1 = ['#484146', '#8da0b3', '#796d72', '#9fa9ba']
cmap2 = ['#545457', '#a79698', '#5284a2', '#bbbcc4']
bg_color = '#fbfbfb'
txt_color = '#5c5c5c'
# check for missing values
fig, ax = plt.subplots(tight_layout=True, figsize=(12,6))
fig.patch.set_facecolor(bg_color)
ax.set_facecolor(bg_color)
mv = df.isna()
ax = sns.heatmap(data=mv, cmap=sns.color_palette(cmap0), cbar=False, ax=ax, )
ax.set_ylabel('')
ax.set_yticks([])
ax.set_xticklabels(labels=mv.columns,rotation=45)
ax.tick_params(length=0)
fig.text(
s=':Missing Values',
x=0, y=1.1,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
we can't see any ...
''',
x=0, y=1.075,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
plt.show()
def despine_ax(ax, spines=['top', 'left', 'right', 'bottom']):
for spine in spines:
ax.spines[spine].set_visible(False)
def get_line(x=[0, 0], y=[0, 0], alpha=0.5, lw=1):
return lines.Line2D(xdata=x, ydata=y, lw=lw, alpha=alpha, color='#aeaeae', transform=fig.transFigure, figure=fig)
fig, (ax0, ax1) = plt.subplots(2, 1, tight_layout=True, sharex=True, figsize=(12, 6))
fig.patch.set_facecolor(bg_color)
mean = df['posttest'].mean()
median = df['posttest'].median()
ax0.boxplot(data=df, x='posttest', vert=False, patch_artist=True, boxprops=dict(facecolor=cmap0[1], lw=0, alpha=0.75), whiskerprops=dict(color='gray', lw=1, ls='--'), capprops=dict(color='gray', lw=1, ls='--'), medianprops=dict(color='#fff', lw=0), flierprops=dict(markerfacecolor=cmap0[0], alpha=0.75), zorder=0)
ax1 = sns.kdeplot(data=df, x='posttest', shade=True, color=cmap0[0], edgecolor='#000', lw=1, zorder=0, alpha=0.8, ax=ax1)
ax0.axvline(x=mean, ymin=0.4, ymax=0.6, color=bg_color, ls=':', zorder=1, label='mean')
ax1.axvline(x=mean, ymin=0, ymax=0.9, color=bg_color, ls=':', zorder=1)
ax0.axvline(x=median, ymin=0.4, ymax=0.6, color=bg_color, ls='--', zorder=1)
ax1.axvline(x=median, ymin=0, ymax=0.9, color=bg_color, ls='--', zorder=1)
ax0.axis('off')
ax0.set_facecolor(bg_color)
ax1.set_ylabel('')
ax1.set_xlabel('')
ax1.set_yticks([])
ax1.tick_params(length=0)
ax1.set_facecolor(bg_color)
despine_ax(ax1, ['top', 'left', 'right'])
fig.text(s=':Posttest - Distribution', x=0, y=1.05, fontsize=17, fontweight='bold', color=txt_color, va='top', ha='left')
fig.text(s='\n in the plot below we can see signs\n of a binominal distribution, with \n one peak at around 57-62 and the other \n at approx. 72-79 points.\n ', x=0, y=1.02, fontsize=11, fontstyle='italic', color=txt_color, va='top', ha='left')
fig.text(s=f'Mean: {np.round(mean, 1)}\nMedian: {np.round(median, 1)}', x=0.56, y=0.925, fontsize=9, fontstyle='italic', color=txt_color, va='top', ha='left')
l1 = get_line(x=[0.55, 0.55], y=[0.85, 0.95])
fig.lines.extend([l1])
plt.show() | code |
73067436/cell_16 | [
"image_output_1.png"
] | from warnings import filterwarnings
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.gridspec as gridspec
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
plt.rcParams['font.family'] = 'monospace'
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
cmap0 = ['#68595b', '#7098af', '#6f636c', '#907c7b']
cmap1 = ['#484146', '#8da0b3', '#796d72', '#9fa9ba']
cmap2 = ['#545457', '#a79698', '#5284a2', '#bbbcc4']
bg_color = '#fbfbfb'
txt_color = '#5c5c5c'
# check for missing values
fig, ax = plt.subplots(tight_layout=True, figsize=(12,6))
fig.patch.set_facecolor(bg_color)
ax.set_facecolor(bg_color)
mv = df.isna()
ax = sns.heatmap(data=mv, cmap=sns.color_palette(cmap0), cbar=False, ax=ax, )
ax.set_ylabel('')
ax.set_yticks([])
ax.set_xticklabels(labels=mv.columns,rotation=45)
ax.tick_params(length=0)
fig.text(
s=':Missing Values',
x=0, y=1.1,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
we can't see any ...
''',
x=0, y=1.075,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
plt.show()
def despine_ax(ax, spines=['top', 'left', 'right', 'bottom']):
for spine in spines:
ax.spines[spine].set_visible(False)
def get_line(x=[0, 0], y=[0, 0], alpha=0.5, lw=1):
return lines.Line2D(xdata=x, ydata=y, lw=lw, alpha=alpha, color='#aeaeae', transform=fig.transFigure, figure=fig)
fig, (ax0, ax1) = plt.subplots(2, 1, tight_layout=True, sharex=True, figsize=(12,6))
fig.patch.set_facecolor(bg_color)
mean = df['posttest'].mean()
median = df['posttest'].median()
ax0.boxplot(
data=df, x='posttest',
vert=False, patch_artist=True,
boxprops=dict(facecolor=cmap0[1], lw=0, alpha=0.75),
whiskerprops=dict(color='gray', lw=1, ls='--'),
capprops=dict(color='gray', lw=1, ls='--'),
medianprops=dict(color='#fff', lw=0),
flierprops=dict(markerfacecolor=cmap0[0],alpha=0.75),
zorder=0
)
ax1 = sns.kdeplot(
data=df, x='posttest', shade=True,
color=cmap0[0], edgecolor='#000', lw=1,
zorder=0, alpha=0.8, ax=ax1
)
ax0.axvline(x=mean, ymin=0.4, ymax=0.6, color=bg_color, ls=':', zorder=1, label='mean')
ax1.axvline(x=mean, ymin=0, ymax=0.9, color=bg_color, ls=':', zorder=1)
ax0.axvline(x=median, ymin=0.4, ymax=0.6, color=bg_color, ls='--', zorder=1)
ax1.axvline(x=median, ymin=0, ymax=0.9, color=bg_color, ls='--', zorder=1)
ax0.axis('off')
ax0.set_facecolor(bg_color)
ax1.set_ylabel('')
ax1.set_xlabel('')
ax1.set_yticks([])
ax1.tick_params(length=0)
ax1.set_facecolor(bg_color)
despine_ax(ax1, ['top','left','right'])
fig.text(
s=':Posttest - Distribution',
x=0, y=1.05,
fontsize=17, fontweight='bold',
color=txt_color,
va='top', ha='left'
)
fig.text(
s='''
in the plot below we can see signs
of a binominal distribution, with
one peak at around 57-62 and the other
at approx. 72-79 points.
''',
x=0, y=1.02,
fontsize=11, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
fig.text(
s=f"Mean: {np.round(mean,1)}\nMedian: {np.round(median,1)}",
x=0.56, y=0.925,
fontsize=9, fontstyle='italic',
color=txt_color,
va='top', ha='left'
)
l1 = get_line(x=[0.55,0.55], y=[0.85,0.95])
fig.lines.extend([l1])
plt.show()
fig, ax = plt.subplots(tight_layout=True, figsize=(12, 2.5))
fig.patch.set_facecolor(bg_color)
uniq_scores = df['posttest'].nunique()
ax.barh(y=1, width=uniq_scores, color=cmap0[1], alpha=0.75, lw=1, edgecolor='white')
ax.barh(y=1, width=100 - uniq_scores, left=uniq_scores, color=cmap1[1], alpha=0.25, lw=1, edgecolor='white')
ax.axis('off')
ax.annotate(s=f'{uniq_scores}', xy=(35, 1.05), va='center', ha='center', fontsize=36, fontweight='bold', fontfamily='serif', color='#fff')
ax.annotate(s='unqiue scores', xy=(35, 0.85), va='center', ha='center', fontsize=16, fontstyle='italic', fontfamily='serif', color='#fff')
fig.text(s=':Unique Number of Scores', x=0, y=1.25, fontsize=17, fontweight='bold', color=txt_color, va='top', ha='left')
fig.text(s='\n 68 unique scores have been scored \n from a total of 100 possible outcomes.\n ', x=0, y=1.2, fontsize=11, fontstyle='italic', color=txt_color, va='top', ha='left')
l1 = get_line(x=[0.645, 0.645], y=[0, 1], lw=3, alpha=1)
fig.lines.extend([l1])
plt.show() | code |
73067436/cell_10 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
print(f'Shape: {df.shape}')
print('--' * 20)
df.head(3) | code |
73067436/cell_12 | [
"text_plain_output_1.png"
] | from warnings import filterwarnings
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.gridspec as gridspec
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
plt.rcParams['font.family'] = 'monospace'
df = pd.read_csv('../input/predict-test-scores-of-students/test_scores.csv')
cmap0 = ['#68595b', '#7098af', '#6f636c', '#907c7b']
cmap1 = ['#484146', '#8da0b3', '#796d72', '#9fa9ba']
cmap2 = ['#545457', '#a79698', '#5284a2', '#bbbcc4']
bg_color = '#fbfbfb'
txt_color = '#5c5c5c'
fig, ax = plt.subplots(tight_layout=True, figsize=(12, 6))
fig.patch.set_facecolor(bg_color)
ax.set_facecolor(bg_color)
mv = df.isna()
ax = sns.heatmap(data=mv, cmap=sns.color_palette(cmap0), cbar=False, ax=ax)
ax.set_ylabel('')
ax.set_yticks([])
ax.set_xticklabels(labels=mv.columns, rotation=45)
ax.tick_params(length=0)
fig.text(s=':Missing Values', x=0, y=1.1, fontsize=17, fontweight='bold', color=txt_color, va='top', ha='left')
fig.text(s="\n we can't see any ...\n ", x=0, y=1.075, fontsize=11, fontstyle='italic', color=txt_color, va='top', ha='left')
plt.show() | code |
33115163/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
train.info() | code |
33115163/cell_57 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
model_1 = Sequential()
model_1.add(Dense(25, input_dim=20, activation='relu'))
model_1.add(Dense(25, activation='relu'))
model_1.add(Dense(4, activation='softmax'))
model_1.summary()
model_1.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
hist_1 = model_1.fit(X_train, y_train, epochs=20, batch_size=25, validation_data=(X_val, y_val)) | code |
33115163/cell_56 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
model_1 = Sequential()
model_1.add(Dense(25, input_dim=20, activation='relu'))
model_1.add(Dense(25, activation='relu'))
model_1.add(Dense(4, activation='softmax'))
model_1.summary() | code |
33115163/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15, 20))
for i, col in enumerate(group_skewed.iloc[:, 1:].columns):
ax = plt.subplot(5, 3, i + 1)
group_skewed.iloc[:, 1:][col].plot.bar(ax=ax).tick_params(axis='x', labelrotation=360)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1))
plt.show() | code |
33115163/cell_33 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
sns.catplot('price_range', col='blue', hue='wifi', data=train, kind='count', col_wrap=2) | code |
33115163/cell_44 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler_train = MinMaxScaler()
train_num_scaled = scaler_train.fit_transform(train[numerical])
scaler_train.data_max_
scaler_train.data_min_
train_num_scaled = pd.DataFrame(train_num_scaled, columns=train[numerical].columns)
train_num_scaled | code |
33115163/cell_55 | [
"text_html_output_1.png"
] | import tensorflow.keras
from keras.models import Sequential
from keras.layers import Dense | code |
33115163/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
train.head() | code |
33115163/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15, 20))
for i, col in enumerate(group_no_skewed.iloc[:, 1:].columns):
ax = plt.subplot(5, 3, i + 1)
group_no_skewed.iloc[:, 1:][col].plot.bar(ax=ax).tick_params(axis='x', labelrotation=360)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1))
plt.show() | code |
33115163/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
sns.catplot('price_range', col='touch_screen', data=train, kind='count') | code |
33115163/cell_26 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
train.groupby('price_range').mean()['ram'].plot(kind='bar', legend=True).tick_params(axis='x', labelrotation=360) | code |
33115163/cell_61 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler_train = MinMaxScaler()
train_num_scaled = scaler_train.fit_transform(train[numerical])
scaler_train.data_max_
scaler_train.data_min_
train_num_scaled = pd.DataFrame(train_num_scaled, columns=train[numerical].columns)
train_num_scaled
from sklearn.preprocessing import MinMaxScaler
scaler_test = MinMaxScaler()
test_num_scaled = scaler_test.fit_transform(test[numerical])
scaler_test.data_max_
scaler_test.data_min_
test_num_scaled = pd.DataFrame(test_num_scaled, columns=test[numerical].columns)
test_final = pd.concat([test[categorical], test_num_scaled], axis=1)
import tensorflow as tf
X = pd.concat([train[categorical], train_num_scaled], axis=1)
y = tf.keras.utils.to_categorical(train['price_range'], 4)
model_1 = Sequential()
model_1.add(Dense(25, input_dim=20, activation='relu'))
model_1.add(Dense(25, activation='relu'))
model_1.add(Dense(4, activation='softmax'))
model_1.summary()
model_1.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
hist_1 = model_1.fit(X_train, y_train, epochs=20, batch_size=25, validation_data=(X_val, y_val))
score = model_1.evaluate(X_val, y_val, verbose=0)
prediction_test = np.argmax(model_1.predict(test_final), axis=1)
pd.DataFrame({'id': test['id'], 'price_range': prediction_test}) | code |
33115163/cell_60 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
model_1 = Sequential()
model_1.add(Dense(25, input_dim=20, activation='relu'))
model_1.add(Dense(25, activation='relu'))
model_1.add(Dense(4, activation='softmax'))
model_1.summary()
model_1.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
hist_1 = model_1.fit(X_train, y_train, epochs=20, batch_size=25, validation_data=(X_val, y_val))
score = model_1.evaluate(X_val, y_val, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) | code |
33115163/cell_50 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler_train = MinMaxScaler()
train_num_scaled = scaler_train.fit_transform(train[numerical])
scaler_train.data_max_
scaler_train.data_min_
train_num_scaled = pd.DataFrame(train_num_scaled, columns=train[numerical].columns)
train_num_scaled
from sklearn.preprocessing import MinMaxScaler
scaler_test = MinMaxScaler()
test_num_scaled = scaler_test.fit_transform(test[numerical])
scaler_test.data_max_
scaler_test.data_min_
test_num_scaled = pd.DataFrame(test_num_scaled, columns=test[numerical].columns)
test_final = pd.concat([test[categorical], test_num_scaled], axis=1)
import tensorflow as tf
X = pd.concat([train[categorical], train_num_scaled], axis=1)
y = tf.keras.utils.to_categorical(train['price_range'], 4)
X.head() | code |
33115163/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
test.head() | code |
33115163/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
from sklearn.preprocessing import MinMaxScaler
scaler_test = MinMaxScaler()
test_num_scaled = scaler_test.fit_transform(test[numerical])
scaler_test.data_max_
scaler_test.data_min_ | code |
33115163/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
fig = plt.figure(figsize=(15, 20))
for i, col in enumerate(numerical):
ax = plt.subplot(5, 3, i + 1)
train[col].plot.hist(ax=ax).tick_params(axis='x', labelrotation=360)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1))
plt.show() | code |
33115163/cell_51 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler_train = MinMaxScaler()
train_num_scaled = scaler_train.fit_transform(train[numerical])
scaler_train.data_max_
scaler_train.data_min_
train_num_scaled = pd.DataFrame(train_num_scaled, columns=train[numerical].columns)
train_num_scaled
from sklearn.preprocessing import MinMaxScaler
scaler_test = MinMaxScaler()
test_num_scaled = scaler_test.fit_transform(test[numerical])
scaler_test.data_max_
scaler_test.data_min_
test_num_scaled = pd.DataFrame(test_num_scaled, columns=test[numerical].columns)
test_final = pd.concat([test[categorical], test_num_scaled], axis=1)
import tensorflow as tf
X = pd.concat([train[categorical], train_num_scaled], axis=1)
y = tf.keras.utils.to_categorical(train['price_range'], 4)
y | code |
33115163/cell_59 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
model_1 = Sequential()
model_1.add(Dense(25, input_dim=20, activation='relu'))
model_1.add(Dense(25, activation='relu'))
model_1.add(Dense(4, activation='softmax'))
model_1.summary()
model_1.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
hist_1 = model_1.fit(X_train, y_train, epochs=20, batch_size=25, validation_data=(X_val, y_val))
plt.plot(hist_1.history['loss'])
plt.plot(hist_1.history['val_loss'])
plt.title('Model Loss Progression During Training/Validation')
plt.ylabel('Training and Validation Losses')
plt.xlabel('Epoch Number')
plt.legend(['Training Loss', 'Validation Loss']) | code |
33115163/cell_15 | [
"text_html_output_1.png"
] | numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
print(len(numerical))
print(len(categorical)) | code |
33115163/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
sns.countplot(data=df, x='variable', hue='value') | code |
33115163/cell_47 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler_train = MinMaxScaler()
train_num_scaled = scaler_train.fit_transform(train[numerical])
scaler_train.data_max_
scaler_train.data_min_
train_num_scaled = pd.DataFrame(train_num_scaled, columns=train[numerical].columns)
train_num_scaled
from sklearn.preprocessing import MinMaxScaler
scaler_test = MinMaxScaler()
test_num_scaled = scaler_test.fit_transform(test[numerical])
scaler_test.data_max_
scaler_test.data_min_
test_num_scaled = pd.DataFrame(test_num_scaled, columns=test[numerical].columns)
test_final = pd.concat([test[categorical], test_num_scaled], axis=1)
test_final.head() | code |
33115163/cell_35 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
test = pd.read_csv('../input/mobile-price-classification/test.csv')
train = pd.read_csv('../input/mobile-price-classification/train.csv')
numerical = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']
categorical = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']
df = pd.melt(train[categorical])
#numerical attributes
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(numerical):
ax=plt.subplot(5,3,i+1)
train[col].plot.hist(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
skewed = ['clock_speed', 'fc', 'm_dep', 'px_height', 'sc_w']
no_skewed = ['battery_power', 'int_memory', 'mobile_wt', 'n_cores', 'pc', 'px_width', 'ram', 'sc_h', 'talk_time']
#correlation between attributes
corr = train.corr()
fig, (ax) = plt.subplots(1,1,sharey = True, figsize = (20,10))
sns.heatmap(corr, cmap = 'Blues')
#variables with symmetrical distributions
group_no_skewed = train.groupby('price_range')[no_skewed].mean().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_no_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_no_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
#variables with skewed distributions
group_skewed = train.groupby('price_range')[skewed].median().reset_index()
fig = plt.figure(figsize=(15,20))
for i,col in enumerate(group_skewed.iloc[:,1:].columns):
ax=plt.subplot(5,3,i+1)
group_skewed.iloc[:,1:][col].plot.bar(ax = ax).tick_params(axis = 'x',labelrotation = 360)
ax.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.1))
plt.show()
sns.catplot('price_range', col='three_g', hue='four_g', data=train, kind='count', col_wrap=2) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.