path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
1008563/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.barplot(df['salary'], df['satisfaction_level']) | code |
1008563/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any() | code |
1008563/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.pointplot(df1['average_montly_hours'], df1['satisfaction_level']) | code |
1008563/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
sns.heatmap(df.corr(), vmax=0.8, square=True, annot=True, fmt='.2f') | code |
1008563/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
test_dict = {'last_evaluation': [0.2, 0.6, 0.7, 0.8], 'number_project': [1, 3, 4, 6], 'average_montly_hours': [110, 180, 190, 250], 'time_spend_company': [3, 4, 5, 6], 'Work_accident': [0, 1, 1, 0], 'promotion_last_5years': [0, 0, 1, 1], 'job': [0, 1, 2, 3], 'salary': [0, 1, 1, 0]}
df_test = pd.DataFrame(test_dict)
test_X = np.array(df_test)
model = RandomForestRegressor()
model.fit(X_train, y_train)
model.predict(test_X) | code |
1008563/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.barplot(df['job'], df['satisfaction_level']) | code |
1008563/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True)) | code |
1008563/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.pointplot(df1['last_evaluation'], df1['satisfaction_level']) | code |
1008563/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score,train_test_split
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=7)
models = [['LR', LinearRegression()], ['CART', DecisionTreeRegressor()], ['RF', RandomForestRegressor()]]
scoring = 'neg_mean_squared_error'
result_list = []
for names, model in models:
results = cross_val_score(model, X, y, cv=kfold, scoring=scoring)
print(names, results.mean()) | code |
1008563/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True)) | code |
1008563/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.pointplot(df['number_project'], df['satisfaction_level']) | code |
1008563/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
projects = df['number_project'].unique()
projects = sorted(projects)
for i in projects:
mean_satisfaction_level = df['satisfaction_level'][df['number_project'] == i].mean()
print('project_total', i, ':', mean_satisfaction_level) | code |
1008563/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.describe() | code |
1008563/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n " | code |
1008563/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.barplot(df['Work_accident'], df['satisfaction_level']) | code |
1008563/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
plt.scatter(df['satisfaction_level'], df['last_evaluation'])
plt.xlabel('satisfaction_level')
plt.ylabel('last_evaluation') | code |
1008563/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
df1 = df.copy()
group_name = list(range(20))
df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name)
df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name)
"\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n "
sns.pointplot(df1['last_evaluation'], df['average_montly_hours']) | code |
1008563/cell_10 | [
"text_html_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.barplot(df['left'], df['satisfaction_level']) | code |
74070881/cell_9 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, accuracy_score
from xgboost import XGBClassifier
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
exclude_cols = ['id', 'kfold', 'claim']
useful_cols = [i for i in train_data.columns if i not in exclude_cols]
feature_cols = [col for col in train_data.columns if col.startswith('f')]
num_cols_with_missing = sum(train_data.isnull().sum() > 0)
num_cols_with_missing
final_predictions = []
for fold in range(5):
X_train = train_data[train_data.kfold != fold].reset_index(drop=True)
X_valid = train_data[train_data.kfold == fold].reset_index(drop=True)
my_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
X_train[feature_cols] = pd.DataFrame(my_imputer.fit_transform(X_train[feature_cols]))
X_valid[feature_cols] = pd.DataFrame(my_imputer.transform(X_valid[feature_cols]))
y_train = X_train['claim']
X_train = X_train.drop(exclude_cols, axis=1)
y_valid = X_valid['claim']
X_valid = X_valid.drop(exclude_cols, axis=1)
X_test[feature_cols] = pd.DataFrame(my_imputer.transform(X_test[feature_cols]))
model = XGBClassifier(objective='binary:logistic', random_state=fold, tree_method='gpu_hist', gpu_id=0, n_jobs=4)
model.fit(X_train, y_train)
preds_valid = model.predict(X_valid)
test_preds = model.predict(X_test)
final_predictions.append(test_preds)
len(final_predictions[0]) | code |
74070881/cell_6 | [
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_7.png",
"text_plain_output_4.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
test_data.shape
test_data.head() | code |
74070881/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
sample_solution.head() | code |
74070881/cell_7 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_squared_error, accuracy_score
from xgboost import XGBClassifier
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
exclude_cols = ['id', 'kfold', 'claim']
useful_cols = [i for i in train_data.columns if i not in exclude_cols]
feature_cols = [col for col in train_data.columns if col.startswith('f')]
num_cols_with_missing = sum(train_data.isnull().sum() > 0)
num_cols_with_missing
final_predictions = []
for fold in range(5):
X_train = train_data[train_data.kfold != fold].reset_index(drop=True)
X_valid = train_data[train_data.kfold == fold].reset_index(drop=True)
my_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
X_train[feature_cols] = pd.DataFrame(my_imputer.fit_transform(X_train[feature_cols]))
X_valid[feature_cols] = pd.DataFrame(my_imputer.transform(X_valid[feature_cols]))
y_train = X_train['claim']
X_train = X_train.drop(exclude_cols, axis=1)
y_valid = X_valid['claim']
X_valid = X_valid.drop(exclude_cols, axis=1)
X_test[feature_cols] = pd.DataFrame(my_imputer.transform(X_test[feature_cols]))
model = XGBClassifier(objective='binary:logistic', random_state=fold, tree_method='gpu_hist', gpu_id=0, n_jobs=4)
model.fit(X_train, y_train)
preds_valid = model.predict(X_valid)
test_preds = model.predict(X_test)
final_predictions.append(test_preds)
print(fold, accuracy_score(y_valid, preds_valid)) | code |
74070881/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
test_data.shape
test_data | code |
74070881/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
test_data.shape | code |
74070881/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/sept-tps-5fold-stratified/sept_TPS_train_5_folds.csv')
test_data = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_solution = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X_test = test_data.copy()
X_test = X_test.drop(['id'], axis=1)
exclude_cols = ['id', 'kfold', 'claim']
useful_cols = [i for i in train_data.columns if i not in exclude_cols]
feature_cols = [col for col in train_data.columns if col.startswith('f')]
num_cols_with_missing = sum(train_data.isnull().sum() > 0)
num_cols_with_missing | code |
33097350/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/regional_sources/the_belgian_institute_for_health/dataset-of-confirmed-cases-by-date-and-municipality.csv', encoding='ISO-8859-2')
df.dtypes | code |
33097350/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/uncover/regional_sources/the_belgian_institute_for_health/dataset-of-confirmed-cases-by-date-and-municipality.csv', encoding='ISO-8859-2')
plt.xticks(rotation=90)
sns.countplot(df['tx_adm_dstr_descr_fr'])
plt.xticks(rotation=90)
plt.show() | code |
33097350/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/regional_sources/the_belgian_institute_for_health/dataset-of-confirmed-cases-by-date-and-municipality.csv', encoding='ISO-8859-2')
df.head() | code |
33097350/cell_2 | [
"text_html_output_2.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
import plotly.graph_objs as go
import plotly.offline as py
import plotly.express as px
from plotly.offline import iplot
import seaborn
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33097350/cell_11 | [
"text_plain_output_1.png"
] | df_grp_rl20 = df_grp_rl20.sort_values(by=['yearstart'], ascending=False) | code |
33097350/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/uncover/regional_sources/the_belgian_institute_for_health/dataset-of-confirmed-cases-by-date-and-municipality.csv', encoding='ISO-8859-2')
sns.countplot(df['tx_rgn_descr_nl'])
plt.xticks(rotation=90)
plt.show() | code |
33097350/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
df = pd.read_csv('../input/uncover/regional_sources/the_belgian_institute_for_health/dataset-of-confirmed-cases-by-date-and-municipality.csv', encoding='ISO-8859-2')
fig = px.bar(df[['cases', 'nis5']].sort_values('nis5', ascending=False), y='nis5', x='cases', color='cases', log_y=True, template='ggplot2', title='Covid-19 Belgian Institute for Health ')
fig.show() | code |
2009832/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
labels = [1, 0]
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show() | code |
2009832/cell_4 | [
"text_plain_output_1.png"
] | Weather.head() | code |
2009832/cell_6 | [
"text_plain_output_1.png"
] | Weather['RAIN'].value_counts() | code |
2009832/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
sns.set(style='white', context='notebook', palette='deep')
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
Weather = pd.read_csv('../input/seattleWeather_1948-2017.csv') | code |
2009832/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | Weather['RAIN'] = Weather['RAIN'].map(lambda i: 1 if i == True else 0)
Weather['RAIN'].value_counts() | code |
128011561/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from Bio import SeqIO
from tqdm import tqdm
import pandas as pd
def read_fasta(fastaPath):
fasta_sequences = SeqIO.parse(open(fastaPath), 'fasta')
ids = []
sequences = []
for fasta in fasta_sequences:
ids.append(fasta.id)
sequences.append(str(fasta.seq))
return pd.DataFrame({'Id': ids, 'Sequence': sequences})
def get_top_go_terms(data, num_terms):
term_counts = data['term'].value_counts()
freq_counts = term_counts / len(data)
freq_top = freq_counts.nlargest(num_terms)
return freq_top
train_terms = pd.read_csv('/kaggle/input/cafa-5-protein-function-prediction/Train/train_terms.tsv', sep='\t')
top_terms = get_top_go_terms(train_terms, 10)
test_data = read_fasta('/kaggle/input/cafa-5-protein-function-prediction/Test (Targets)/testsuperset.fasta')
results = []
for index, row in tqdm(test_data.iterrows(), total=test_data.shape[0], position=0):
for term, freq in top_terms.items():
results.append((row['Id'], term, freq))
final_results = pd.DataFrame(results, columns=['Id', 'GO term', 'Confidence'])
final_results.to_csv('submission.tsv', sep='\t', index=False) | code |
329725/cell_4 | [
"text_plain_output_1.png"
] | from scipy.stats import chisquare
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/people.csv')
from scipy.stats import chisquare
chars = [i for i in df.columns.values if 'char_' in i]
flags = []
for feat in df[chars]:
group = df[chars].groupby(feat)
for otherfeat in df[chars].drop(feat, axis=1):
summary = group[otherfeat].count()
if chisquare(summary)[1] < 0.05:
flags.append(feat)
flags.append(otherfeat)
flags = set(flags)
print('It looks like {}% of the characteristics might be related to one another.'.format(len(flags) / len(chars) * 100)) | code |
329725/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df = pd.read_csv('../input/people.csv')
print(df.head()) | code |
74041129/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
X_test_class = test_df['Pclass'].values.reshape(-1, 1)
X_test_sex = test_df['Sex'].values.reshape(-1, 1)
X_test_age = test_df['Age'].values.reshape(-1, 1)
X_test_sib = test_df['SibSp'].values.reshape(-1, 1)
X_test_par = test_df['Parch'].values.reshape(-1, 1)
x_test = np.hstack((X_test_sex, X_test_class, X_test_sib, X_test_age, X_test_par)).astype(np.float64)
print(x_test) | code |
74041129/cell_9 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show() | code |
74041129/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
train_df.describe() | code |
74041129/cell_11 | [
"text_html_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
model.save('model_' + str(1) + '.h5')
model = load_model('./model_1.h5')
model.summary() | code |
74041129/cell_18 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
model.save('model_' + str(1) + '.h5')
model = load_model('./model_1.h5')
model.summary()
X_test_class = test_df['Pclass'].values.reshape(-1, 1)
X_test_sex = test_df['Sex'].values.reshape(-1, 1)
X_test_age = test_df['Age'].values.reshape(-1, 1)
X_test_sib = test_df['SibSp'].values.reshape(-1, 1)
X_test_par = test_df['Parch'].values.reshape(-1, 1)
x_test = np.hstack((X_test_sex, X_test_class, X_test_sib, X_test_age, X_test_par)).astype(np.float64)
y_pred = []
prediction = model.predict(x_test).ravel().tolist()
y_pred += prediction
for i in range(0, len(y_pred)):
if y_pred[i] > 0.8:
y_pred[i] = 1
else:
y_pred[i] = 0
print(y_pred) | code |
74041129/cell_15 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
model.save('model_' + str(1) + '.h5')
model = load_model('./model_1.h5')
model.summary()
X_test_class = test_df['Pclass'].values.reshape(-1, 1)
X_test_sex = test_df['Sex'].values.reshape(-1, 1)
X_test_age = test_df['Age'].values.reshape(-1, 1)
X_test_sib = test_df['SibSp'].values.reshape(-1, 1)
X_test_par = test_df['Parch'].values.reshape(-1, 1)
x_test = np.hstack((X_test_sex, X_test_class, X_test_sib, X_test_age, X_test_par)).astype(np.float64)
y_pred = []
prediction = model.predict(x_test).ravel().tolist()
y_pred += prediction
print(y_pred) | code |
74041129/cell_16 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
model.save('model_' + str(1) + '.h5')
model = load_model('./model_1.h5')
model.summary()
X_test_class = test_df['Pclass'].values.reshape(-1, 1)
X_test_sex = test_df['Sex'].values.reshape(-1, 1)
X_test_age = test_df['Age'].values.reshape(-1, 1)
X_test_sib = test_df['SibSp'].values.reshape(-1, 1)
X_test_par = test_df['Parch'].values.reshape(-1, 1)
x_test = np.hstack((X_test_sex, X_test_class, X_test_sib, X_test_age, X_test_par)).astype(np.float64)
y_pred = []
prediction = model.predict(x_test).ravel().tolist()
y_pred += prediction
for i in range(0, len(y_pred)):
if y_pred[i] > 0.5:
y_pred[i] = 1
else:
y_pred[i] = 0
print(y_pred) | code |
74041129/cell_17 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
n_train = 700
X_train_class = train_df['Pclass'].values.reshape(-1, 1)
X_train_sex = train_df['Sex'].values.reshape(-1, 1)
X_train_age = train_df['Age'].values.reshape(-1, 1)
X_train_sib = train_df['SibSp'].values.reshape(-1, 1)
X_train_par = train_df['Parch'].values.reshape(-1, 1)
y = train_df['Survived'].values.T
X_train = np.hstack((X_train_sex[:n_train, :], X_train_class[:n_train, :], X_train_sib[:n_train, :], X_train_age[:n_train, :], X_train_par[:n_train, :]))
X_test = np.hstack((X_train_sex[n_train:, :], X_train_class[n_train:, :], X_train_sib[n_train:, :], X_train_age[n_train:, :], X_train_par[n_train:, :]))
X_train, X_test = (tf.convert_to_tensor(X_train.astype(np.float64)), tf.convert_to_tensor(X_test.astype(np.float64)))
y_train, y_test = (y[:n_train], y[n_train:])
model = Sequential()
model.add(Dense(300, input_dim=5, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(25, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999), metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
_, train_acc = model.evaluate(X_train, y_train, verbose=2)
_, test_acc = model.evaluate(X_test, y_test, verbose=2)
model.save('model_' + str(1) + '.h5')
model = load_model('./model_1.h5')
model.summary()
X_test_class = test_df['Pclass'].values.reshape(-1, 1)
X_test_sex = test_df['Sex'].values.reshape(-1, 1)
X_test_age = test_df['Age'].values.reshape(-1, 1)
X_test_sib = test_df['SibSp'].values.reshape(-1, 1)
X_test_par = test_df['Parch'].values.reshape(-1, 1)
x_test = np.hstack((X_test_sex, X_test_class, X_test_sib, X_test_age, X_test_par)).astype(np.float64)
y_pred = []
prediction = model.predict(x_test).ravel().tolist()
y_pred += prediction
for i in range(0, len(y_pred)):
if y_pred[i] > 0.5:
y_pred[i] = 1
else:
y_pred[i] = 0
print(y_pred) | code |
50213265/cell_13 | [
"text_html_output_2.png"
] | col = 'Q4'
v2 = df[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2 = v2.sort_values(by='count', ascending=False)
plt.figure(figsize=(20, 8))
barplot = plt.bar(v2.Q4, v2['count'], color='red')
for bar in barplot:
yval = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2.0, yval, int(yval), va='bottom')
plt.title(' Distribution of Eduction')
plt.xticks(rotation=90)
plt.show() | code |
50213265/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
print(df.shape)
df.head(3) | code |
50213265/cell_11 | [
"text_plain_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot
from plotly.offline import init_notebook_mode, iplot
import matplotlib.pyplot as plt
import pandas as pd
import plotly.graph_objs as go
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
col = 'Q2'
grouped = df[col].value_counts().reset_index()
grouped = grouped.rename(columns={col: 'count', 'index': col})
trace = go.Pie(labels=grouped[col], values=grouped['count'], pull=[0.05, 0])
layout = {'title': 'Gender Distribution'}
fig = go.Figure(data=[trace], layout=layout)
d1 = df[df['Q2'] == 'Man']
d2 = df[df['Q2'] == 'Woman']
col = 'Q1'
v1 = d1[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1['percent'] = v1['count'].apply(lambda x: 100 * x / sum(v1['count']))
v1 = v1.sort_values(col)
v2 = d2[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2['percent'] = v2['count'].apply(lambda x: 100 * x / sum(v2['count']))
v2 = v2.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], name='Man', marker=dict(color='rgb(26, 118, 255)'))
trace2 = go.Bar(x=v2[col], y=v2['count'], name='Woman', marker=dict(color='rgb(55, 83, 109)'))
y = [trace1, trace2]
layout = {'title': 'Age Distribution over the Gender', 'bargap': 0.2, 'bargroupgap': 0.1, 'xaxis': {'title': 'Age Distribution'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=y, layout=layout)
fig.layout.template = 'presentation'
plt.style.use('fivethirtyeight')
col='Q3'
v2=df[col].value_counts().reset_index()
v2=v2.rename(columns={col:'count','index':col})
#v2['percent']=v2['count'].apply(lambda x : 100*x/sum(v2['count']))
v2=v2.sort_values(by='count',ascending=False)
plt.figure(figsize=(30,12))
barplot = plt.bar(v2.Q3,v2['count'],color='black')
for bar in barplot:
yval = bar.get_height()
plt.text(bar.get_x() + bar.get_width()/2.0, yval, int(yval), va='bottom') #va: vertical alignment y positional argument
plt.title(" Number of Respondents per Country")
plt.xticks(rotation=90)
plt.show()
col = 'Q3'
v1 = d1[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1['percent'] = v1['count'].apply(lambda x: 100 * x / sum(v1['count']))
v1 = v1.sort_values(by='count')
v2 = d2[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2['percent'] = v2['count'].apply(lambda x: 100 * x / sum(v2['count']))
v2 = v2.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], name='Man', marker=dict(color='black'))
trace2 = go.Bar(x=v2[col], y=v2['count'], name='Woman', marker=dict(color='orange'))
y = [trace1, trace2]
layout = {'title': 'Gender Distribution over the Country', 'barmode': 'relative', 'xaxis_tickangle': -45, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=y, layout=layout)
fig.layout.template = 'presentation'
iplot(fig) | code |
50213265/cell_7 | [
"image_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot
from plotly.offline import init_notebook_mode, iplot
import pandas as pd
import plotly.graph_objs as go
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
col = 'Q2'
grouped = df[col].value_counts().reset_index()
grouped = grouped.rename(columns={col: 'count', 'index': col})
trace = go.Pie(labels=grouped[col], values=grouped['count'], pull=[0.05, 0])
layout = {'title': 'Gender Distribution'}
fig = go.Figure(data=[trace], layout=layout)
iplot(fig) | code |
50213265/cell_8 | [
"text_html_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot
from plotly.offline import init_notebook_mode, iplot
import pandas as pd
import plotly.graph_objs as go
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
col = 'Q2'
grouped = df[col].value_counts().reset_index()
grouped = grouped.rename(columns={col: 'count', 'index': col})
trace = go.Pie(labels=grouped[col], values=grouped['count'], pull=[0.05, 0])
layout = {'title': 'Gender Distribution'}
fig = go.Figure(data=[trace], layout=layout)
d1 = df[df['Q2'] == 'Man']
d2 = df[df['Q2'] == 'Woman']
col = 'Q1'
v1 = d1[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1['percent'] = v1['count'].apply(lambda x: 100 * x / sum(v1['count']))
v1 = v1.sort_values(col)
v2 = d2[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2['percent'] = v2['count'].apply(lambda x: 100 * x / sum(v2['count']))
v2 = v2.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], name='Man', marker=dict(color='rgb(26, 118, 255)'))
trace2 = go.Bar(x=v2[col], y=v2['count'], name='Woman', marker=dict(color='rgb(55, 83, 109)'))
y = [trace1, trace2]
layout = {'title': 'Age Distribution over the Gender', 'bargap': 0.2, 'bargroupgap': 0.1, 'xaxis': {'title': 'Age Distribution'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=y, layout=layout)
fig.layout.template = 'presentation'
iplot(fig) | code |
50213265/cell_16 | [
"text_html_output_1.png"
] | col = 'Q5'
v2 = df[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2 = v2.sort_values(by='count', ascending=False)
plt.figure(figsize=(20, 8))
barplot = plt.bar(v2.Q5, v2['count'], color='green')
for bar in barplot:
yval = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2.0, yval, int(yval), va='bottom')
plt.title(' Title most similar to your current role ')
plt.ylabel('Number of Data Science Enthusiasts')
plt.xticks(rotation=90)
plt.show() | code |
50213265/cell_3 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
data.iloc[0, :].transpose() | code |
50213265/cell_14 | [
"text_html_output_2.png"
] | from plotly.offline import init_notebook_mode, iplot
from plotly.offline import init_notebook_mode, iplot
import matplotlib.pyplot as plt
import pandas as pd
import plotly.graph_objs as go
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
col = 'Q2'
grouped = df[col].value_counts().reset_index()
grouped = grouped.rename(columns={col: 'count', 'index': col})
trace = go.Pie(labels=grouped[col], values=grouped['count'], pull=[0.05, 0])
layout = {'title': 'Gender Distribution'}
fig = go.Figure(data=[trace], layout=layout)
d1 = df[df['Q2'] == 'Man']
d2 = df[df['Q2'] == 'Woman']
col = 'Q1'
v1 = d1[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1['percent'] = v1['count'].apply(lambda x: 100 * x / sum(v1['count']))
v1 = v1.sort_values(col)
v2 = d2[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2['percent'] = v2['count'].apply(lambda x: 100 * x / sum(v2['count']))
v2 = v2.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], name='Man', marker=dict(color='rgb(26, 118, 255)'))
trace2 = go.Bar(x=v2[col], y=v2['count'], name='Woman', marker=dict(color='rgb(55, 83, 109)'))
y = [trace1, trace2]
layout = {'title': 'Age Distribution over the Gender', 'bargap': 0.2, 'bargroupgap': 0.1, 'xaxis': {'title': 'Age Distribution'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=y, layout=layout)
fig.layout.template = 'presentation'
plt.style.use('fivethirtyeight')
col='Q3'
v2=df[col].value_counts().reset_index()
v2=v2.rename(columns={col:'count','index':col})
#v2['percent']=v2['count'].apply(lambda x : 100*x/sum(v2['count']))
v2=v2.sort_values(by='count',ascending=False)
plt.figure(figsize=(30,12))
barplot = plt.bar(v2.Q3,v2['count'],color='black')
for bar in barplot:
yval = bar.get_height()
plt.text(bar.get_x() + bar.get_width()/2.0, yval, int(yval), va='bottom') #va: vertical alignment y positional argument
plt.title(" Number of Respondents per Country")
plt.xticks(rotation=90)
plt.show()
key1 = "Master's degree"
key2 = "Bachelor's degree"
df1 = df[df['Q4'] == 'Master’s degree']
df2 = df[df['Q4'] == 'Bachelor’s degree']
nations = ['United States of America', 'Canada', 'Brazil', 'Mexico', 'Germany', 'Spain', 'France', 'Italy', 'India', 'Japan', 'China', 'South Korea']
nation_map = {'United States of America': 'USA', 'United Kingdom of Great Britain and Northern Ireland': 'UK'}
plt.figure(figsize=(15, 15))
vals = []
for j in range(len(nations)):
country = nations[j]
country_df = df[df['Q3'] == country]
ddf1 = country_df[country_df['Q4'] == 'Master’s degree']
ddf2 = country_df[country_df['Q4'] == 'Bachelor’s degree']
plt.subplot(4, 4, j + 1)
if j < 4:
colors = ['orange', 'yellow']
elif j < 8:
colors = ['red', '#ff8ce0']
else:
colors = ['green', '#827ec4']
vals.append(len(ddf1) / (len(ddf1) + len(ddf2)))
plt.pie([len(ddf1), len(ddf2)], labels=['Mastor Degree', "Bachelor's Degree"], autopct='%1.0f%%', colors=colors, wedgeprops={'linewidth': 5, 'edgecolor': 'white'})
if country in nation_map:
country = nation_map[country]
plt.title('$\\bf{' + country + '}$') | code |
50213265/cell_10 | [
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot
from plotly.offline import init_notebook_mode, iplot
import matplotlib.pyplot as plt
import pandas as pd
import plotly.graph_objs as go
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
import plotly
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
col = 'Q2'
grouped = df[col].value_counts().reset_index()
grouped = grouped.rename(columns={col: 'count', 'index': col})
trace = go.Pie(labels=grouped[col], values=grouped['count'], pull=[0.05, 0])
layout = {'title': 'Gender Distribution'}
fig = go.Figure(data=[trace], layout=layout)
d1 = df[df['Q2'] == 'Man']
d2 = df[df['Q2'] == 'Woman']
col = 'Q1'
v1 = d1[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1['percent'] = v1['count'].apply(lambda x: 100 * x / sum(v1['count']))
v1 = v1.sort_values(col)
v2 = d2[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2['percent'] = v2['count'].apply(lambda x: 100 * x / sum(v2['count']))
v2 = v2.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], name='Man', marker=dict(color='rgb(26, 118, 255)'))
trace2 = go.Bar(x=v2[col], y=v2['count'], name='Woman', marker=dict(color='rgb(55, 83, 109)'))
y = [trace1, trace2]
layout = {'title': 'Age Distribution over the Gender', 'bargap': 0.2, 'bargroupgap': 0.1, 'xaxis': {'title': 'Age Distribution'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=y, layout=layout)
fig.layout.template = 'presentation'
plt.style.use('fivethirtyeight')
col = 'Q3'
v2 = df[col].value_counts().reset_index()
v2 = v2.rename(columns={col: 'count', 'index': col})
v2 = v2.sort_values(by='count', ascending=False)
plt.figure(figsize=(30, 12))
barplot = plt.bar(v2.Q3, v2['count'], color='black')
for bar in barplot:
yval = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2.0, yval, int(yval), va='bottom')
plt.title(' Number of Respondents per Country')
plt.xticks(rotation=90)
plt.show() | code |
50213265/cell_5 | [
"image_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot
import pandas as pd
import plotly.graph_objs as go
data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
df = data.iloc[1:, :]
init_notebook_mode(connected=True)
col = 'Q1'
v1 = df[col].value_counts().reset_index()
v1 = v1.rename(columns={col: 'count', 'index': col})
v1 = v1.sort_values(col)
trace1 = go.Bar(x=v1[col], y=v1['count'], marker=dict())
layout = {'title': 'Age Distribution', 'xaxis': {'title': 'Age Group'}, 'yaxis': {'title': 'Count'}}
fig = go.Figure(data=[trace1], layout=layout)
iplot(fig) | code |
90138657/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_
model.coef_
X_test = df.drop(columns=['price'])[:10]
X_test
y_hat = model.predict(X_test)
dc = pd.concat([df[:10].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns')
dc
data = pd.read_csv('../input/kc-house-data/kc_house_data.csv')
X1 = data.drop(columns=['price', 'id', 'date', 'sqft_above'])[:200]
y1 = data['price'][:200]
X_b = np.c_[np.ones((200, 1)), X1]
X1.info() | code |
90138657/cell_13 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y) | code |
90138657/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_
model.coef_
X_test = df.drop(columns=['price'])[:10]
X_test
y_hat = model.predict(X_test)
dc = pd.concat([df[:10].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns')
dc | code |
90138657/cell_6 | [
"text_html_output_1.png"
] | import seaborn as sns
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
with sns.plotting_context('notebook', font_scale=2.5):
g = sns.pairplot(dataset[['sqft_lot', 'sqft_above', 'price', 'sqft_living', 'bedrooms']], hue='bedrooms', palette='tab20', height=6)
g.set(xticklabels=[]) | code |
90138657/cell_2 | [
"text_plain_output_1.png"
] | dataset.columns | code |
90138657/cell_11 | [
"text_plain_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
y.head() | code |
90138657/cell_19 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_
model.coef_
X_test = df.drop(columns=['price'])[:10]
X_test
y_hat = model.predict(X_test)
y_hat | code |
90138657/cell_1 | [
"text_html_output_1.png"
] | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
dataset = pd.read_csv('../input/kc-house-data/kc_house_data.csv')
dataset.head() | code |
90138657/cell_7 | [
"text_plain_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
len(df) | code |
90138657/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns | code |
90138657/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_ | code |
90138657/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_
model.coef_ | code |
90138657/cell_3 | [
"text_html_output_1.png"
] | dataset.columns
print(dataset.dtypes) | code |
90138657/cell_17 | [
"text_plain_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
X_test = df.drop(columns=['price'])[:10]
X_test | code |
90138657/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y) | code |
90138657/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
model = LinearRegression()
model.fit(X, y)
model.score(X, y)
model.intercept_
model.coef_
X_test = df.drop(columns=['price'])[:10]
X_test
y_hat = model.predict(X_test)
dc = pd.concat([df[:10].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns')
dc
data = pd.read_csv('../input/kc-house-data/kc_house_data.csv')
X1 = data.drop(columns=['price', 'id', 'date', 'sqft_above'])[:200]
y1 = data['price'][:200]
X_b = np.c_[np.ones((200, 1)), X1]
alpha = 0.1
n_iterations = 1000
m = 100
theta = np.random.randn(18, 1)
for iteration in range(n_iterations):
gradients = 2 / m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - alpha * gradients
theta | code |
90138657/cell_10 | [
"text_plain_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
len(X)
len(y) | code |
90138657/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
df.columns
X = df.drop(columns=['price'])[:10]
y = df['price'][:10]
X.head() | code |
90138657/cell_5 | [
"text_plain_output_1.png"
] | import seaborn as sns
dataset.columns
df = dataset.drop(columns=['date', 'id'])
df1 = df.dropna()
sns.lmplot(x='price', y='sqft_living', data=df, ci=None) | code |
72087593/cell_6 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OrdinalEncoder
from xgboost import XGBRegressor
import pandas as pd
df = pd.read_csv('../input/train-folds/train_folds.csv')
test_df = pd.read_csv('../input/30-days-of-ml/test.csv')
test_df.head().T
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
useful_features = [c for c in df.columns if c not in ('id', 'target', 'kfold')]
object_cols = [col for col in useful_features if 'cat' in col]
test_df = test_df[useful_features]
final_predictions = []
for fold in range(5):
xtrain = df[df.kfold != fold].reset_index(drop=True)
xvalid = df[df.kfold == fold].reset_index(drop=True)
xtest = test_df.copy()
ytrain = xtrain.target
yvalid = xvalid.target
xtrain = xtrain[useful_features]
xvalid = xvalid[useful_features]
ordinal_encoder = OrdinalEncoder()
xtrain[object_cols] = ordinal_encoder.fit_transform(xtrain[object_cols])
xvalid[object_cols] = ordinal_encoder.transform(xvalid[object_cols])
xtest[object_cols] = ordinal_encoder.transform(xtest[object_cols])
model = XGBRegressor(random_state=fold, n_jobs=4)
model.fit(xtrain, ytrain)
preds_valid = model.predict(xvalid)
test_preds = model.predict(xtest)
catboost_model = CatBoostRegressor(random_state=fold, verbose=100)
catboost_model.fit(xtrain, ytrain)
cat_preds_valid = catboost_model.predict(xvalid)
cat_preds_test = model.predict(xtest)
final_predictions.append(test_preds)
final_predictions.append(cat_preds_test)
print('XGB:', fold, mean_squared_error(yvalid, preds_valid, squared=False))
print('Catboost:', fold, mean_squared_error(yvalid, cat_preds_valid, squared=False)) | code |
72087593/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72087593/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train-folds/train_folds.csv')
test_df = pd.read_csv('../input/30-days-of-ml/test.csv')
test_df.head().T | code |
2029019/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
sales = pd.read_csv('../input/nyc-rolling-sales.csv', index_col=0)
sales.head(3) | code |
2029019/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.metrics import log_loss
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
sales = pd.read_csv('../input/nyc-rolling-sales.csv', index_col=0)
df = sales[['SALE PRICE', 'TOTAL UNITS']].dropna()
df['SALE PRICE'] = df['SALE PRICE'].str.strip().replace('-', np.nan)
df = df.dropna()
X = df.loc[:, 'TOTAL UNITS'].values[:, np.newaxis].astype(float)
y = df.loc[:, 'SALE PRICE'].astype(int) > 1000000
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X[:1000], y[:1000])
y_hat = clf.predict(X)
from sklearn.metrics import log_loss
log_loss(y, y_hat) | code |
90105911/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
product = ['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts', 'MntGoldProds']
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.figure(figsize=(12, 8))
for i, colName in enumerate(product):
plt.subplot(2, 3, i + 1)
sns.histplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.head() | code |
90105911/cell_30 | [
"image_output_1.png"
] | place = ['NumWebPurchases', 'NumCatalogPurchases', 'NumStorePurchases', 'NumWebVisitsMonth']
place | code |
90105911/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns | code |
90105911/cell_26 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
product = ['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts', 'MntGoldProds']
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
promotion_cat = ['AcceptedCmp1', 'AcceptedCmp2', 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'Response']
plt.figure(figsize=(12, 8))
for i, colName in enumerate(promotion_cat):
plt.subplot(3, 3, i + 1)
sns.countplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90105911/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
plt.tight_layout()
plt.tight_layout()
plt.figure(figsize=(18, 8))
for i, colName in enumerate(people_cat):
plt.subplot(2, 3, i + 1)
sns.countplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_28 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
df[['AcceptedCmp1', 'AcceptedCmp2', 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'Response']] | code |
90105911/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
plt.figure(figsize=(12, 8))
for i, colName in enumerate(people_num):
plt.subplot(2, 3, i + 1)
sns.histplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
plt.tight_layout()
plt.figure(figsize=(12, 8))
for i, colName in enumerate(people_num):
plt.subplot(2, 3, i + 1)
sns.boxplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
product = ['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts', 'MntGoldProds']
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
promotion_num = 'NumDealsPurchases'
promotion_cat = ['AcceptedCmp1', 'AcceptedCmp2', 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'Response']
plt.tight_layout()
sns.countplot(data=df, x='NumWebPurchases') | code |
90105911/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
product = ['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts', 'MntGoldProds']
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.figure(figsize=(12, 8))
for i, colName in enumerate(product):
plt.subplot(2, 3, i + 1)
sns.boxplot(data=df, x=colName)
plt.tight_layout()
plt.show() | code |
90105911/cell_27 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.columns
people_num = ['Year_Birth', 'Income', 'Kidhome', 'Teenhome', 'Recency']
people_cat = ['Education', 'Marital_Status', 'Dt_Customer', 'Complain']
product = ['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts', 'MntGoldProds']
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
promotion_num = 'NumDealsPurchases'
promotion_cat = ['AcceptedCmp1', 'AcceptedCmp2', 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'Response']
plt.tight_layout()
sns.countplot(data=df, x=promotion_num) | code |
90105911/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t')
df.info() | code |
104126253/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
df = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
def trainModelAndAssessMAE(df):
df = df.select_dtypes(exclude=['object'])
df, df_test = train_test_split(df, random_state=0)
mdl = DecisionTreeRegressor(random_state=0)
data = df
answers = df[['Price']]
mdl = mdl.fit(data, answers)
predictions = mdl.predict(df_test.select_dtypes(exclude=['object']))
return mean_absolute_error(predictions, df_test[['Price']])
trainModelAndAssessMAE(df.dropna(axis=0)) | code |
104126253/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
df = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
def trainModelAndAssessMAE(df):
df = df.select_dtypes(exclude=['object'])
df, df_test = train_test_split(df, random_state=0)
mdl = DecisionTreeRegressor(random_state=0)
data = df
answers = df[['Price']]
mdl = mdl.fit(data, answers)
predictions = mdl.predict(df_test.select_dtypes(exclude=['object']))
return mean_absolute_error(predictions, df_test[['Price']])
trainModelAndAssessMAE(df.dropna(axis=0))
trainModelAndAssessMAE(df.dropna(axis=1)) | code |
104126253/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
df = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
def trainModelAndAssessMAE(df):
df = df.select_dtypes(exclude=['object'])
df, df_test = train_test_split(df, random_state=0)
mdl = DecisionTreeRegressor(random_state=0)
data = df
answers = df[['Price']]
mdl = mdl.fit(data, answers)
predictions = mdl.predict(df_test.select_dtypes(exclude=['object']))
return mean_absolute_error(predictions, df_test[['Price']])
trainModelAndAssessMAE(df.dropna(axis=0))
trainModelAndAssessMAE(df.dropna(axis=1))
imputer = SimpleImputer()
df_only_numerical = df.select_dtypes(exclude=['object'])
df_imputed = pd.DataFrame(imputer.fit_transform(df_only_numerical))
df_imputed.columns = df_only_numerical.columns
trainModelAndAssessMAE(df_imputed) | code |
104126253/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
df = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
def trainModelAndAssessMAE(df):
df = df.select_dtypes(exclude=['object'])
df, df_test = train_test_split(df, random_state=0)
mdl = DecisionTreeRegressor(random_state=0)
data = df
answers = df[['Price']]
mdl = mdl.fit(data, answers)
predictions = mdl.predict(df_test.select_dtypes(exclude=['object']))
return mean_absolute_error(predictions, df_test[['Price']])
trainModelAndAssessMAE(df.dropna(axis=0))
trainModelAndAssessMAE(df.dropna(axis=1))
imputer = SimpleImputer()
df_only_numerical = df.select_dtypes(exclude=['object'])
df_imputed = pd.DataFrame(imputer.fit_transform(df_only_numerical))
df_imputed.columns = df_only_numerical.columns
trainModelAndAssessMAE(df_imputed)
df_ei = df.copy()
cols_with_missing = [c for c in df_ei.columns if df_ei[c].isnull().any()]
for column in df_ei:
df_ei[column + '_was_missing'] = df_ei[column].isnull()
imputer = SimpleImputer()
df_ei_only_numerical = df_ei.select_dtypes(exclude=['object'])
df_ei_imputed = pd.DataFrame(imputer.fit_transform(df_ei_only_numerical))
df_ei_imputed.columns = df_ei_only_numerical.columns
trainModelAndAssessMAE(df_ei_imputed) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.