path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16136181/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.info()
code
16136181/cell_29
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False def cat_data(i): pass for i in cat: cat_data(i) from scipy.stats import skew sns.set() def continous_data(i): pass for i in cont: continous_data(i) def cat_bivar(i): sns.barplot(data[i], data1.revenue) print('--' * 60) plt.title('Bar-plot of Revenue against ' + str(i)) plt.show() for i in cat: cat_bivar(i)
code
16136181/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False for i in cont: Q1 = data1[i].quantile(0.25) Q3 = data1[i].quantile(0.75) IQR = Q3 - Q1 upper = Q3 + 1.5 * IQR lower = Q1 - 1.5 * IQR outlier_count = data1[i][(data1[i] < lower) | (data1[i] > upper)].count() total = data1[i].count() percent = outlier_count / total * 100 print('Percentage of Outliers in {} column :: {}%'.format(i, np.round(percent, 2)))
code
16136181/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique()
code
16136181/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import os print(os.listdir('../input'))
code
16136181/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape print('Descriptive statistics of Data') data.describe().T
code
16136181/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data1.head(10)
code
16136181/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data1.info()
code
16136181/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape
code
16136181/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) pd.isnull(data1).sum()
code
16136181/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False def cat_data(i): sns.countplot(data[i]) print('--' * 60) plt.title('Count plot of ' + str(i)) plt.show() for i in cat: cat_data(i)
code
49126907/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from kaggle_environments import make from kaggle_environments.envs.rps.agents import agents from tqdm.auto import tqdm import numpy as np import pandas as pd from kaggle_environments import make from kaggle_environments.envs.rps.agents import agents import numpy as np import pandas as pd from tqdm.auto import tqdm all_agents = ['submission.py', 'submission1.py', 'submission2.py', 'submission3.py', 'nash.py', 'nash2.py', 'nash3.py'] + list(agents.values()) all_agent_names = ['submit', 'submit1', 'submit2', 'submit3', 'nash', 'nash2', 'nash3'] + list((k[:8] for k in agents.keys())) env = make('rps', configuration={'episodeSteps': 1000}, debug=True) L = len(all_agents) results = np.zeros((L, L), dtype=int) N = 100 for it in tqdm(range(N), total=N): for a1 in range(L): a2 = 0 env.run([all_agents[a1], all_agents[a2]]) rewards = env.toJSON()['rewards'] results[a1, a2] += -1 if rewards[0] is None else int(rewards[0] > 0) - int(rewards[0] < 0) results[a2, a1] += -1 if rewards[1] is None else int(rewards[1] > 0) - int(rewards[1] < 0) print(pd.DataFrame(results, columns=all_agent_names, index=all_agent_names))
code
49126907/cell_4
[ "text_plain_output_1.png" ]
import secrets Jmin = 0 Jmax = 5 J = Jmin + secrets.randbelow(Jmax - Jmin + 1) Dmin = 2 Dmax = 5 Hash = [] Map = [] MyMap = [] for D in range(Dmin, Dmax + 1): Hash.append([0, 0, 0]) Map.append([{}, {}, {}]) MyMap.append([{}, {}, {}]) G = 2 R = 0.4 V = 0.7 VM = 0.7 B = 0 def add(map1, hash1, A): if hash1 not in map1: map1[hash1] = {'S': 0} d = map1[hash1] if A not in d: d[A] = 1 else: d[A] += 1 d['S'] += 1 def match(map1, hash1, S): global B global J if hash1 not in map1: return d = map1[hash1] if d['S'] >= G: for A in range(S): if A in d and d[A] >= d['S'] * R + (1 - R) * G and (secrets.randbelow(101) < 100 * V): if secrets.randbelow(101) < 100 * VM: B = (A + 1) % S else: B = A % S J = Jmin + secrets.randbelow(Jmax - Jmin + 1) def agent(observation, configuration): global B global J T = observation.step S = configuration.signs if T > 0: A = observation.lastOpponentAction BA = (B + 1) % S B = secrets.randbelow(S) for D in range(Dmin, Dmax + 1): if T > D: add(Map[D - Dmin][0], Hash[D - Dmin][0], A) add(Map[D - Dmin][1], Hash[D - Dmin][1], A) add(Map[D - Dmin][2], Hash[D - Dmin][2], A) add(MyMap[D - Dmin][0], Hash[D - Dmin][0], BA) add(MyMap[D - Dmin][1], Hash[D - Dmin][1], BA) add(MyMap[D - Dmin][2], Hash[D - Dmin][2], BA) if T > 0: Hash[D - Dmin][0] = Hash[D - Dmin][0] // S ** 2 + (A + S * B) * S ** (2 * D - 1) Hash[D - Dmin][1] = Hash[D - Dmin][1] // S + A * S ** (D - 1) Hash[D - Dmin][2] = Hash[D - Dmin][2] // S + B * S ** (D - 1) if J == 0: match(Map[D - Dmin][0], Hash[D - Dmin][0], S) match(Map[D - Dmin][1], Hash[D - Dmin][1], S) match(Map[D - Dmin][2], Hash[D - Dmin][2], S) if J == 0: match(MyMap[D - Dmin][0], Hash[D - Dmin][0], S) match(MyMap[D - Dmin][1], Hash[D - Dmin][1], S) match(MyMap[D - Dmin][2], Hash[D - Dmin][2], S) if J > 0: J -= 1 return B
code
49126907/cell_6
[ "text_plain_output_1.png" ]
import random rng = random.SystemRandom() def agent(observation, configuration): S = configuration.signs return rng.randrange(0, S)
code
49126907/cell_2
[ "text_plain_output_1.png" ]
import random rng = random.SystemRandom() hash1 = 0 hash2 = 0 hash3 = 0 map1 = {} map2 = {} map3 = {} Jmin = 10 Jmax = 20 J = rng.randrange(Jmin, Jmax + 1) D = 2 G = 3 R = 0.6 B = 0 def add(map1, hash1, A): if hash1 not in map1: map1[hash1] = {'S': 0} d = map1[hash1] if A not in d: d[A] = 1 else: d[A] += 1 d['S'] += 1 def match(map1, hash1, S): global B global J if hash1 not in map1: return d = map1[hash1] if d['S'] >= G: for A in range(S): if A in d and d[A] >= d['S'] * R: B = (A + 1) % S J = rng.randrange(Jmin, Jmax) def agent(observation, configuration): global B global J global hash1 global hash2 global hash3 T = observation.step S = configuration.signs if T > 0: A = observation.lastOpponentAction if T > D: add(map1, hash1, A) add(map2, hash2, A) add(map3, hash3, A) if T > 0: hash1 = hash1 // S + A * S ** (D - 1) hash2 = hash2 // S + B * S ** (D - 1) hash3 = hash3 // S ** 2 + (A + S * B) * S ** (2 * D - 1) B = rng.randrange(0, S) if J == 0: match(map1, hash1, S) match(map2, hash2, S) match(map3, hash3, S) else: J -= 1 return B
code
49126907/cell_1
[ "text_plain_output_1.png" ]
import secrets import math Jmax = 2 J = Jmax - int(math.sqrt(secrets.randbelow((Jmax + 1) ** 2))) Dmin = 2 Dmax = 5 Hash = [] Map = [] MyMap = [] for D in range(Dmin, Dmax + 1): Hash.append([0, 0, 0]) Map.append([{}, {}, {}]) MyMap.append([{}, {}, {}]) G = 2 R = 0.4 V = 0.8 VM = 0.95 B = 0 DT = 200 def add(map1, hash1, A, T): if hash1 not in map1: map1[hash1] = {'S': []} d = map1[hash1] if A not in d: d[A] = [T] else: d[A].append(T) d['S'].append(T) def rank(A, T): return len([a for a in A if a > T - DT]) def match(map1, hash1, S, T): global B global J if hash1 not in map1: return d = map1[hash1] if rank(d['S'], T) >= G: for A in range(S): if A in d and rank(d[A], T) >= rank(d['S'], T) * R + (1 - R) * G and (secrets.randbelow(1001) < 1000 * V): if secrets.randbelow(1001) < 1000 * VM: B = (A + 1) % S else: B = A % S J = Jmax - int(math.sqrt(secrets.randbelow((Jmax + 1) ** 2))) def agent(observation, configuration): global B global J T = observation.step S = configuration.signs if T > 0: A = observation.lastOpponentAction BA = (B + 1) % S B = secrets.randbelow(S) for D in range(Dmin, Dmax + 1): if T > D: add(Map[D - Dmin][0], Hash[D - Dmin][0], A, T) add(Map[D - Dmin][1], Hash[D - Dmin][1], A, T) add(Map[D - Dmin][2], Hash[D - Dmin][2], A, T) add(MyMap[D - Dmin][0], Hash[D - Dmin][0], BA, T) add(MyMap[D - Dmin][1], Hash[D - Dmin][1], BA, T) add(MyMap[D - Dmin][2], Hash[D - Dmin][2], BA, T) if T > 0: Hash[D - Dmin][0] = Hash[D - Dmin][0] // S ** 2 + (A + S * B) * S ** (2 * D - 1) Hash[D - Dmin][1] = Hash[D - Dmin][1] // S + A * S ** (D - 1) Hash[D - Dmin][2] = Hash[D - Dmin][2] // S + B * S ** (D - 1) if J == 0: match(Map[D - Dmin][0], Hash[D - Dmin][0], S, T) match(Map[D - Dmin][1], Hash[D - Dmin][1], S, T) match(Map[D - Dmin][2], Hash[D - Dmin][2], S, T) if J == 0: match(MyMap[D - Dmin][0], Hash[D - Dmin][0], S, T) match(MyMap[D - Dmin][1], Hash[D - Dmin][1], S, T) match(MyMap[D - Dmin][2], Hash[D - Dmin][2], S, T) if J > 0: J -= 1 return B
code
49126907/cell_7
[ "text_plain_output_1.png" ]
import secrets def agent(observation, configuration): S = configuration.signs return secrets.randbelow(S)
code
49126907/cell_3
[ "text_plain_output_1.png" ]
import secrets hash1 = 0 hash2 = 0 hash3 = 0 map1 = {} map2 = {} map3 = {} Jmin = 10 Jmax = 30 J = Jmin + secrets.randbelow(Jmax - Jmin + 1) D = 3 G = 2 R = 0.7 B = 0 def add(map1, hash1, A): if hash1 not in map1: map1[hash1] = {'S': 0} d = map1[hash1] if A not in d: d[A] = 1 else: d[A] += 1 d['S'] += 1 def match(map1, hash1, S): global B global J if hash1 not in map1: return d = map1[hash1] if d['S'] >= G: for A in range(S): if A in d and d[A] >= d['S'] * R: B = (A + 1) % S J = Jmin + secrets.randbelow(Jmax - Jmin + 1) def agent(observation, configuration): global B global J global hash1 global hash2 global hash3 T = observation.step S = configuration.signs if T > 0: A = observation.lastOpponentAction if T > D: add(map1, hash1, A) add(map2, hash2, A) add(map3, hash3, A) if T > 0: hash1 = hash1 // S + A * S ** (D - 1) hash2 = hash2 // S + B * S ** (D - 1) hash3 = hash3 // S ** 2 + (A + S * B) * S ** (2 * D - 1) B = secrets.randbelow(S) if J == 0: match(map1, hash1, S) match(map2, hash2, S) match(map3, hash3, S) else: J -= 1 return B
code
49126907/cell_5
[ "text_plain_output_1.png" ]
import random def agent(observation, configuration): S = configuration.signs return random.randrange(0, S)
code
16117706/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd lb_data = pd.read_csv('../input/leaderboard-4-days-out/jigsaw-unintended-bias-in-toxicity-classification-publicleaderboard.csv') lb_data = lb_data.set_index('SubmissionDate') top_15_teams = lb_data.groupby('TeamId').max().sort_values('Score')[-15:]['TeamName'].values top_15_subs = lb_data.loc[lb_data['TeamName'].isin(top_15_teams)] top_15_subs = top_15_subs.drop('TeamId', axis=1) top_15_subs.pivot(columns='TeamName', values='Score') for i in top_15_subs.pivot(columns='TeamName', values='Score').interpolate(): top_15_subs.pivot(columns='TeamName', values='Score')[i].dropna().plot(legend=True, ylim=(0.93, 0.95), figsize=(12, 12), title=str(i)) top_15_subs.index = pd.to_datetime(top_15_subs.index) top_15_subs_last_7 = top_15_subs.loc[top_15_subs.index > '2019-6-15'] for i in top_15_subs_last_7.pivot(columns='TeamName', values='Score').interpolate(): top_15_subs_last_7.pivot(columns='TeamName', values='Score')[i].dropna().plot(legend=True, ylim=(0.93, 0.95), figsize=(12, 12), title=str(i)) plt.show()
code
16117706/cell_9
[ "image_output_11.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd lb_data = pd.read_csv('../input/leaderboard-4-days-out/jigsaw-unintended-bias-in-toxicity-classification-publicleaderboard.csv') lb_data = lb_data.set_index('SubmissionDate') top_15_teams = lb_data.groupby('TeamId').max().sort_values('Score')[-15:]['TeamName'].values top_15_subs = lb_data.loc[lb_data['TeamName'].isin(top_15_teams)] top_15_subs = top_15_subs.drop('TeamId', axis=1) top_15_subs.pivot(columns='TeamName', values='Score') for i in top_15_subs.pivot(columns='TeamName', values='Score').interpolate(): print(top_15_subs.pivot(columns='TeamName', values='Score')[i].dropna())
code
16117706/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd lb_data = pd.read_csv('../input/leaderboard-4-days-out/jigsaw-unintended-bias-in-toxicity-classification-publicleaderboard.csv') lb_data = lb_data.set_index('SubmissionDate') top_15_teams = lb_data.groupby('TeamId').max().sort_values('Score')[-15:]['TeamName'].values top_15_subs = lb_data.loc[lb_data['TeamName'].isin(top_15_teams)] top_15_subs = top_15_subs.drop('TeamId', axis=1) top_15_subs.pivot(columns='TeamName', values='Score')
code
16117706/cell_8
[ "image_output_11.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import pandas as pd lb_data = pd.read_csv('../input/leaderboard-4-days-out/jigsaw-unintended-bias-in-toxicity-classification-publicleaderboard.csv') lb_data = lb_data.set_index('SubmissionDate') top_15_teams = lb_data.groupby('TeamId').max().sort_values('Score')[-15:]['TeamName'].values top_15_subs = lb_data.loc[lb_data['TeamName'].isin(top_15_teams)] top_15_subs = top_15_subs.drop('TeamId', axis=1) top_15_subs.pivot(columns='TeamName', values='Score') top_15_subs.pivot(columns='TeamName', values='Score').interpolate().plot(legend=True, ylim=(0.93, 0.95), figsize=(12, 12))
code
16117706/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd lb_data = pd.read_csv('../input/leaderboard-4-days-out/jigsaw-unintended-bias-in-toxicity-classification-publicleaderboard.csv') lb_data = lb_data.set_index('SubmissionDate') top_15_teams = lb_data.groupby('TeamId').max().sort_values('Score')[-15:]['TeamName'].values top_15_subs = lb_data.loc[lb_data['TeamName'].isin(top_15_teams)] top_15_subs = top_15_subs.drop('TeamId', axis=1) top_15_subs.pivot(columns='TeamName', values='Score') for i in top_15_subs.pivot(columns='TeamName', values='Score').interpolate(): top_15_subs.pivot(columns='TeamName', values='Score')[i].dropna().plot(legend=True, ylim=(0.93, 0.95), figsize=(12, 12), title=str(i)) plt.show()
code
17110052/cell_13
[ "text_html_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input')) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) data2015 = pd.read_csv('../input/2015.csv') data_updated = data2015.rename(index=str, columns={'Happiness Rank': 'Happiness_Rank'}) data_2015U = data_updated.rename(index=str, columns={'Happiness Score': 'Happiness_Score'}) data_2015U = data_2015U.rename(index=str, columns={'Economy (GDP per Capita)': 'Economy', 'Health (Life Expectancy)': 'Health', 'Trust (Government Corruption)': 'Trust'}) f, ax = plt.subplots(figsize=(20, 12)) Western_Europe = data_2015U[data_2015U.Region == 'Western Europe'] North_America = data_2015U[data_2015U.Region == 'North America'] Australian_New_Zealand = data_2015U[data_2015U.Region == 'Australia and New Zealand'] Middle_East_and_Northern_Africa = data_2015U[data_2015U.Region == 'Middle East and Northern Africa'] Latin_America_and_Caribbean = data_2015U[data_2015U.Region == 'Latin America and Caribbean'] Southeastern_Asia = data_2015U[data_2015U.Region == 'Southeastern Asia'] Central_and_Eastern_Europe = data_2015U[data_2015U.Region == 'Central and Eastern Europe'] Eastern_Asia = data_2015U[data_2015U.Region == 'Eastern_Asia'] Southern_Asia = data_2015U[data_2015U.Region == 'Southern Asia'] for each in range(0, len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter(Western_Europe.Happiness_Score, Western_Europe.Freedom, color='red', linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0, len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter(North_America.Happiness_Score, North_America.Freedom, color='blue', linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0, len(Middle_East_and_Northern_Africa.Country)): x = Middle_East_and_Northern_Africa.Happiness_Score[each] y = Middle_East_and_Northern_Africa.Freedom[each] plt.scatter(Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom, color='purple', linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title('Happiness Score-Freedom Scatter Plot') plt.xlabel('Happiness Score') plt.ylabel('Freedom')
code
17110052/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() for each in data2015.columns: print(each)
code
17110052/cell_4
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.head()
code
17110052/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax) plt.show()
code
17110052/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... fig, axes = plt.subplots(figsize=(10, 10), nrows=2, ncols=2) data_updated = data2015.rename(index=str, columns={'Happiness Rank': 'Happiness_Rank', 'Standard Error': 'Standard_Error'}) data_2015U = data_updated.rename(index=str, columns={'Happiness Score': 'Happiness_Score'}) data_2015U = data_2015U.rename(index=str, columns={'Economy (GDP per Capita)': 'Economy', 'Dystopia Residual': 'Dystopia_Residual', 'Health (Life Expectancy)': 'Health', 'Trust (Government Corruption)': 'Trust'}) plt.legend(loc='upper right') data_2015U = data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0, 0], kind='line', color='red', title='Happiness Score', linewidth=1, grid=True, linestyle=':') data_2015U.Family.plot(ax=axes[0, 1], kind='line', color='green', title='Family', linewidth=1, grid=True, linestyle=':') data_2015U.Economy.plot(ax=axes[1, 0], kind='line', color='yellow', title='Economy', linewidth=1, grid=True, linestyle=':') data_2015U.Health.plot(ax=axes[1, 1], kind='line', color='blue', title='Health', linewidth=1, grid=True, linestyle=':')
code
17110052/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input')) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17110052/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() data2015.head()
code
17110052/cell_18
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. data2015=pd.read_csv('../input/2015.csv') #fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) f,ax = plt.subplots(figsize=(20, 12)) Western_Europe=data_2015U[ data_2015U.Region=='Western Europe'] North_America=data_2015U[ data_2015U.Region=='North America'] Australian_New_Zealand=data_2015U[ data_2015U.Region=='Australia and New Zealand'] Middle_East_and_Northern_Africa=data_2015U[ data_2015U.Region=='Middle East and Northern Africa'] Latin_America_and_Caribbean=data_2015U[ data_2015U.Region=='Latin America and Caribbean'] Southeastern_Asia=data_2015U[ data_2015U.Region=='Southeastern Asia'] Central_and_Eastern_Europe=data_2015U[ data_2015U.Region=='Central and Eastern Europe'] Eastern_Asia=data_2015U[ data_2015U.Region=='Eastern_Asia'] #Sub_Saharan_Africa=data_2015U[ data_2015U.Region=='Sub Saharan Africa'] Southern_Asia=data_2015U[ data_2015U.Region=='Southern Asia'] for each in range(0,len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter( Western_Europe.Happiness_Score,Western_Europe.Freedom,color='red',linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0,len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter( North_America.Happiness_Score,North_America.Freedom,color='blue',linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0,len( Middle_East_and_Northern_Africa.Country)): x =Middle_East_and_Northern_Africa.Happiness_Score[each] y =Middle_East_and_Northern_Africa.Freedom[each] plt.scatter( Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom,color='purple',linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title("Happiness Score-Freedom Scatter Plot") plt.xlabel("Happiness Score") plt.ylabel("Freedom") melted = pd.melt(frame=data_2015U, id_vars='Country', value_vars=['Generosity', 'Dystopia_Residual']) melted.loc[:10] data_2015U1=data_2015U.head() data_2015U2=data_2015U.tail() concat_data_row=pd.concat([data_2015U1,data_2015U2],axis=0,ignore_index=True) concat_data_row data1 = data_2015U.loc[:, ['Health', 'Trust', 'Freedom']] fig, axes = plt.subplots(nrows=2, ncols=2) data_2015U.plot(ax=axes[0, 0], kind='scatter', x='Happiness_Score', y='Freedom', color='blue') data_2015U.plot(ax=axes[0, 1], kind='scatter', x='Happiness_Score', y='Family', color='red') data_2015U.plot(ax=axes[1, 0], kind='scatter', x='Happiness_Score', y='Economy', color='yellow') data_2015U.plot(ax=axes[1, 1], kind='scatter', x='Happiness_Score', y='Generosity', color='pink')
code
17110052/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() data2015.tail()
code
17110052/cell_15
[ "image_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. data2015=pd.read_csv('../input/2015.csv') #fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) f,ax = plt.subplots(figsize=(20, 12)) Western_Europe=data_2015U[ data_2015U.Region=='Western Europe'] North_America=data_2015U[ data_2015U.Region=='North America'] Australian_New_Zealand=data_2015U[ data_2015U.Region=='Australia and New Zealand'] Middle_East_and_Northern_Africa=data_2015U[ data_2015U.Region=='Middle East and Northern Africa'] Latin_America_and_Caribbean=data_2015U[ data_2015U.Region=='Latin America and Caribbean'] Southeastern_Asia=data_2015U[ data_2015U.Region=='Southeastern Asia'] Central_and_Eastern_Europe=data_2015U[ data_2015U.Region=='Central and Eastern Europe'] Eastern_Asia=data_2015U[ data_2015U.Region=='Eastern_Asia'] #Sub_Saharan_Africa=data_2015U[ data_2015U.Region=='Sub Saharan Africa'] Southern_Asia=data_2015U[ data_2015U.Region=='Southern Asia'] for each in range(0,len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter( Western_Europe.Happiness_Score,Western_Europe.Freedom,color='red',linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0,len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter( North_America.Happiness_Score,North_America.Freedom,color='blue',linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0,len( Middle_East_and_Northern_Africa.Country)): x =Middle_East_and_Northern_Africa.Happiness_Score[each] y =Middle_East_and_Northern_Africa.Freedom[each] plt.scatter( Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom,color='purple',linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title("Happiness Score-Freedom Scatter Plot") plt.xlabel("Happiness Score") plt.ylabel("Freedom") melted = pd.melt(frame=data_2015U, id_vars='Country', value_vars=['Generosity', 'Dystopia_Residual']) melted.loc[:10] data_2015U1 = data_2015U.head() data_2015U2 = data_2015U.tail() concat_data_row = pd.concat([data_2015U1, data_2015U2], axis=0, ignore_index=True) concat_data_row
code
17110052/cell_16
[ "text_html_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. data2015=pd.read_csv('../input/2015.csv') #fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) f,ax = plt.subplots(figsize=(20, 12)) Western_Europe=data_2015U[ data_2015U.Region=='Western Europe'] North_America=data_2015U[ data_2015U.Region=='North America'] Australian_New_Zealand=data_2015U[ data_2015U.Region=='Australia and New Zealand'] Middle_East_and_Northern_Africa=data_2015U[ data_2015U.Region=='Middle East and Northern Africa'] Latin_America_and_Caribbean=data_2015U[ data_2015U.Region=='Latin America and Caribbean'] Southeastern_Asia=data_2015U[ data_2015U.Region=='Southeastern Asia'] Central_and_Eastern_Europe=data_2015U[ data_2015U.Region=='Central and Eastern Europe'] Eastern_Asia=data_2015U[ data_2015U.Region=='Eastern_Asia'] #Sub_Saharan_Africa=data_2015U[ data_2015U.Region=='Sub Saharan Africa'] Southern_Asia=data_2015U[ data_2015U.Region=='Southern Asia'] for each in range(0,len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter( Western_Europe.Happiness_Score,Western_Europe.Freedom,color='red',linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0,len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter( North_America.Happiness_Score,North_America.Freedom,color='blue',linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0,len( Middle_East_and_Northern_Africa.Country)): x =Middle_East_and_Northern_Africa.Happiness_Score[each] y =Middle_East_and_Northern_Africa.Freedom[each] plt.scatter( Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom,color='purple',linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title("Happiness Score-Freedom Scatter Plot") plt.xlabel("Happiness Score") plt.ylabel("Freedom") melted = pd.melt(frame=data_2015U, id_vars='Country', value_vars=['Generosity', 'Dystopia_Residual']) melted.loc[:10] data_2015U1=data_2015U.head() data_2015U2=data_2015U.tail() concat_data_row=pd.concat([data_2015U1,data_2015U2],axis=0,ignore_index=True) concat_data_row data1 = data_2015U.loc[:, ['Health', 'Trust', 'Freedom']] data1.plot()
code
17110052/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') print(data2017.info())
code
17110052/cell_17
[ "text_html_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. data2015=pd.read_csv('../input/2015.csv') #fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) f,ax = plt.subplots(figsize=(20, 12)) Western_Europe=data_2015U[ data_2015U.Region=='Western Europe'] North_America=data_2015U[ data_2015U.Region=='North America'] Australian_New_Zealand=data_2015U[ data_2015U.Region=='Australia and New Zealand'] Middle_East_and_Northern_Africa=data_2015U[ data_2015U.Region=='Middle East and Northern Africa'] Latin_America_and_Caribbean=data_2015U[ data_2015U.Region=='Latin America and Caribbean'] Southeastern_Asia=data_2015U[ data_2015U.Region=='Southeastern Asia'] Central_and_Eastern_Europe=data_2015U[ data_2015U.Region=='Central and Eastern Europe'] Eastern_Asia=data_2015U[ data_2015U.Region=='Eastern_Asia'] #Sub_Saharan_Africa=data_2015U[ data_2015U.Region=='Sub Saharan Africa'] Southern_Asia=data_2015U[ data_2015U.Region=='Southern Asia'] for each in range(0,len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter( Western_Europe.Happiness_Score,Western_Europe.Freedom,color='red',linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0,len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter( North_America.Happiness_Score,North_America.Freedom,color='blue',linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0,len( Middle_East_and_Northern_Africa.Country)): x =Middle_East_and_Northern_Africa.Happiness_Score[each] y =Middle_East_and_Northern_Africa.Freedom[each] plt.scatter( Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom,color='purple',linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title("Happiness Score-Freedom Scatter Plot") plt.xlabel("Happiness Score") plt.ylabel("Freedom") melted = pd.melt(frame=data_2015U, id_vars='Country', value_vars=['Generosity', 'Dystopia_Residual']) melted.loc[:10] data_2015U1=data_2015U.head() data_2015U2=data_2015U.tail() concat_data_row=pd.concat([data_2015U1,data_2015U2],axis=0,ignore_index=True) concat_data_row data1 = data_2015U.loc[:, ['Health', 'Trust', 'Freedom']] data1.plot(subplots=True) plt.show()
code
17110052/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from subprocess import check_output data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') data2015.corr() f,ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data2015.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) plt.show() import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, ncols=2) df1.plot(ax=axes[0,0]) df2.plot(ax=axes[0,1]) ... #try to set index to dataframe fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank","Standard Error":"Standard_Error"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Dystopia Residual":"Dystopia_Residual","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) plt.legend(loc='upper right') data_2015U=data_2015U.set_index('Happiness_Rank') data_2015U.Happiness_Score.plot(ax=axes[0,0],kind = 'line', color = 'red',title = 'Happiness Score',linewidth=1,grid = True,linestyle = ':') data_2015U.Family.plot( ax=axes[0,1],kind='line' ,color='green' ,title='Family' ,linewidth=1 , grid=True ,linestyle=':' ) data_2015U.Economy.plot( ax=axes[1,0],kind='line' ,color='yellow', title='Economy',linewidth=1,grid=True ,linestyle=':' ) data_2015U.Health.plot( ax=axes[1,1],kind='line' ,color='blue', title='Health',linewidth=1,grid=True ,linestyle=':' ) # legend = puts label into plot # label = name of label # title = title of plot rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(50) plt.colorbar() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. data2015=pd.read_csv('../input/2015.csv') #fig, axes = plt.subplots(figsize=(10, 10),nrows=2, ncols=2) data_updated=data2015.rename( index=str ,columns={"Happiness Rank":"Happiness_Rank"}) data_2015U=data_updated.rename( index=str ,columns={"Happiness Score":"Happiness_Score"}) data_2015U=data_2015U.rename( index=str,columns={"Economy (GDP per Capita)":"Economy","Health (Life Expectancy)":"Health","Trust (Government Corruption)":"Trust"}) f,ax = plt.subplots(figsize=(20, 12)) Western_Europe=data_2015U[ data_2015U.Region=='Western Europe'] North_America=data_2015U[ data_2015U.Region=='North America'] Australian_New_Zealand=data_2015U[ data_2015U.Region=='Australia and New Zealand'] Middle_East_and_Northern_Africa=data_2015U[ data_2015U.Region=='Middle East and Northern Africa'] Latin_America_and_Caribbean=data_2015U[ data_2015U.Region=='Latin America and Caribbean'] Southeastern_Asia=data_2015U[ data_2015U.Region=='Southeastern Asia'] Central_and_Eastern_Europe=data_2015U[ data_2015U.Region=='Central and Eastern Europe'] Eastern_Asia=data_2015U[ data_2015U.Region=='Eastern_Asia'] #Sub_Saharan_Africa=data_2015U[ data_2015U.Region=='Sub Saharan Africa'] Southern_Asia=data_2015U[ data_2015U.Region=='Southern Asia'] for each in range(0,len(Western_Europe.Country)): x = Western_Europe.Happiness_Score[each] y = Western_Europe.Freedom[each] plt.scatter( Western_Europe.Happiness_Score,Western_Europe.Freedom,color='red',linewidth=1) plt.text(x, y, Western_Europe.Country[each], fontsize=12) for each in range(0,len(North_America.Country)): x = North_America.Happiness_Score[each] y = North_America.Freedom[each] plt.scatter( North_America.Happiness_Score,North_America.Freedom,color='blue',linewidth=1) plt.text(x, y, North_America.Country[each], fontsize=12) for each in range(0,len( Middle_East_and_Northern_Africa.Country)): x =Middle_East_and_Northern_Africa.Happiness_Score[each] y =Middle_East_and_Northern_Africa.Freedom[each] plt.scatter( Middle_East_and_Northern_Africa.Happiness_Score, Middle_East_and_Northern_Africa.Freedom,color='purple',linewidth=1) plt.text(x, y, Middle_East_and_Northern_Africa.Country[each], fontsize=12) plt.title("Happiness Score-Freedom Scatter Plot") plt.xlabel("Happiness Score") plt.ylabel("Freedom") melted = pd.melt(frame=data_2015U, id_vars='Country', value_vars=['Generosity', 'Dystopia_Residual']) melted.loc[:10]
code
17110052/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data2015 = pd.read_csv('../input/2015.csv') data2016 = pd.read_csv('../input/2016.csv') data2017 = pd.read_csv('../input/2017.csv') print(' 2015 Correlation of data ') data2015.corr()
code
130020662/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates hr_no_duplicates.boxplot(column='time_spend_company') plt.show()
code
130020662/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum()
code
130020662/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.info()
code
130020662/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.head(10)
code
130020662/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates _, flier_dict = hr_no_duplicates.boxplot(column='time_spend_company', return_type='both') outliers = [flier.get_ydata()[0] for flier in flier_dict['fliers']] outlier_rows = hr_no_duplicates['time_spend_company'].isin(outliers) num_outliers = outlier_rows.sum()
code
130020662/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns
code
130020662/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns pd.set_option('display.max_columns', None) from xgboost import XGBClassifier from xgboost import XGBRegressor from xgboost import plot_importance from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ConfusionMatrixDisplay, classification_report from sklearn.metrics import roc_auc_score, roc_curve from sklearn.tree import plot_tree
code
130020662/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates hr_no_duplicates.describe()
code
130020662/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum()
code
130020662/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates
code
130020662/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates # Determine the number of rows containing outliers # create a boxplot of the 'time_spend_company' column _, flier_dict = hr_no_duplicates.boxplot(column='time_spend_company', return_type='both') # get the values of the outliers outliers = [flier.get_ydata()[0] for flier in flier_dict['fliers']] # select the rows that contain the outliers outlier_rows = hr_no_duplicates['time_spend_company'].isin(outliers) # count the number of rows that contain the outliers num_outliers = outlier_rows.sum() percentile25 = hr_no_duplicates['time_spend_company'].quantile(0.25) percentile75 = hr_no_duplicates['time_spend_company'].quantile(0.75) iqr = percentile75 - percentile25 print('IQR:', iqr) upper_limit = percentile75 + 1.5 * iqr lower_limit = percentile25 - 1.5 * iqr print('Lower limit:', lower_limit) print('Upper limit:', upper_limit) outliers = hr_no_duplicates[(hr_no_duplicates['time_spend_company'] > upper_limit) | (hr_no_duplicates['time_spend_company'] < lower_limit)] print('Number of rows in the data containing outliers in `time_spend_company`:', len(outliers))
code
130020662/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape
code
130020662/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates fig, ax = plt.subplots() ax.boxplot(hr_no_duplicates['time_spend_company']) whiskers = ax.lines[2:4] whisker_values = [whisk.get_ydata()[1] for whisk in whiskers] print('Whisker values:', whisker_values)
code
130020662/cell_10
[ "text_html_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.describe()
code
130020662/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns hr.isna().sum() duplicates_mask = hr.duplicated() duplicates = hr[duplicates_mask] duplicates.shape hr.duplicated().sum() hr_no_duplicates = hr.drop_duplicates() hr_no_duplicates # Determine the number of rows containing outliers # create a boxplot of the 'time_spend_company' column _, flier_dict = hr_no_duplicates.boxplot(column='time_spend_company', return_type='both') # get the values of the outliers outliers = [flier.get_ydata()[0] for flier in flier_dict['fliers']] # select the rows that contain the outliers outlier_rows = hr_no_duplicates['time_spend_company'].isin(outliers) # count the number of rows that contain the outliers num_outliers = outlier_rows.sum() sns.pairplot(hr_no_duplicates)
code
130020662/cell_12
[ "text_html_output_1.png" ]
import pandas as pd dataset_path = '/kaggle/input/hr-analytics-and-job-prediction/HR_comma_sep.csv' hr = pd.read_csv(dataset_path) hr.columns hr.columns = hr.columns.str.lower() hr.columns = hr.columns.str.replace(' ', '_') hr.columns = hr.columns.str.replace('\\W', '_', regex=True) hr.columns
code
90135235/cell_19
[ "text_plain_output_1.png" ]
from PIL import Image from torch.utils.data import Dataset,DataLoader from torchvision import transforms,datasets import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn torch.manual_seed(42) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]) test_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.ToTensor()]) dataset = datasets.ImageFolder('../input/brain-mri-images-for-brain-tumor-detection/brain_tumor_dataset', train_transforms) dataloader = DataLoader(dataset, batch_size=64, shuffle=True) dataiter = iter(dataloader) X,Y = dataiter.next() classes = {0:'Negative',1:'Positive'} fig,axes = plt.subplots(3,3,figsize=(14,14)) for i in range(3): for j in range(3): plt.sca(axes[i,j]) idx = np.random.randint(0,31) image = np.moveaxis(X[idx].numpy(),0,2) plt.title(classes[Y[idx].item()]) plt.imshow(image,cmap='gray') plt.axis('off'); def convBlock(ni, no): return nn.Sequential(nn.Dropout(0.2), nn.Conv2d(ni, no, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(no), nn.MaxPool2d(2)) class BrainCancerClassifier(nn.Module): def __init__(self): super().__init__() self.model = nn.Sequential(convBlock(1, 64), convBlock(64, 64), convBlock(64, 128), convBlock(128, 256), convBlock(256, 512), convBlock(512, 64), nn.Flatten(), nn.Linear(576, 256), nn.Dropout(0.3), nn.ReLU(inplace=True), nn.Linear(256, 128), nn.Dropout(0.2), nn.ReLU(inplace=True), nn.Linear(128, 64), nn.Dropout(0.2), nn.ReLU(inplace=True), nn.Linear(64, 2)) def forward(self, x): return self.model(x) def compute_metrics(preds, targets): loss = nn.CrossEntropyLoss() acc = (torch.max(preds, 1)[1] == targets).float().mean() return (loss(preds, targets), acc) def train_batch(model, data, optimizer): model.train() images, labels = data images = images.to(device) labels = labels.to(device) optimizer.zero_grad() loss, acc = compute_metrics(model(images), labels) loss.backward() optimizer.step() return (loss.item(), acc.item()) model = BrainCancerClassifier().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) im2fmap = nn.Sequential(*list(model.model[:5].children()) + list(model.model[5][:2].children())) def load_image(path): image = Image.open(path) image = test_transforms(image) image = image.unsqueeze(0) return image.to(device) def get_heatmap(image_path): model.eval() for module in model.modules(): if isinstance(module, nn.BatchNorm2d): module.track_running_stats = False image = load_image(image_path) logits = model(image) activations = im2fmap(image) prediction = logits.argmax(axis=1) model.zero_grad() logits[0, prediction].backward(retain_graph=True) pooled_grads = model.model[5][1].weight.grad.data.mean((0, 2, 3)) for i in range(activations.shape[1]): activations[:, i, :, :] *= -pooled_grads[i] heatmap = torch.mean(activations, dim=1)[0].detach().to('cpu') return (heatmap, prediction.item()) size = 224 def upsampleHeatmap(map, img): m, M = (map.min(), map.max()) map = 255 * ((map - m) / (M - m)) map = np.uint8(map) map = cv2.resize(map, (size, size)) map = cv2.applyColorMap(255 - map, cv2.COLORMAP_JET) map = np.uint8(map) map = np.uint8(map * 0.5 + img * 0.5) return map image_paths = ['../input/brain-mri-images-for-brain-tumor-detection/no/19 no.jpg', '../input/brain-mri-images-for-brain-tumor-detection/yes/Y1.jpg'] cols = 2 fig, axes = plt.subplots(1, cols, figsize=(8, 10)) for i in range(cols): image = cv2.imread(image_paths[i]) image = cv2.resize(image, (224, 224)) heatmap, pred = get_heatmap(image_paths[i]) result = upsampleHeatmap(heatmap, image) plt.sca(axes[i]) plt.imshow(result, cmap='gray') plt.title(classes[pred]) plt.axis('off')
code
90135235/cell_14
[ "image_output_1.png" ]
from torch.utils.data import Dataset,DataLoader from torchvision import transforms,datasets import torch import torch.nn as nn torch.manual_seed(42) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]) test_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.ToTensor()]) dataset = datasets.ImageFolder('../input/brain-mri-images-for-brain-tumor-detection/brain_tumor_dataset', train_transforms) dataloader = DataLoader(dataset, batch_size=64, shuffle=True) def convBlock(ni, no): return nn.Sequential(nn.Dropout(0.2), nn.Conv2d(ni, no, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(no), nn.MaxPool2d(2)) class BrainCancerClassifier(nn.Module): def __init__(self): super().__init__() self.model = nn.Sequential(convBlock(1, 64), convBlock(64, 64), convBlock(64, 128), convBlock(128, 256), convBlock(256, 512), convBlock(512, 64), nn.Flatten(), nn.Linear(576, 256), nn.Dropout(0.3), nn.ReLU(inplace=True), nn.Linear(256, 128), nn.Dropout(0.2), nn.ReLU(inplace=True), nn.Linear(128, 64), nn.Dropout(0.2), nn.ReLU(inplace=True), nn.Linear(64, 2)) def forward(self, x): return self.model(x) def compute_metrics(preds, targets): loss = nn.CrossEntropyLoss() acc = (torch.max(preds, 1)[1] == targets).float().mean() return (loss(preds, targets), acc) def train_batch(model, data, optimizer): model.train() images, labels = data images = images.to(device) labels = labels.to(device) optimizer.zero_grad() loss, acc = compute_metrics(model(images), labels) loss.backward() optimizer.step() return (loss.item(), acc.item()) model = BrainCancerClassifier().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) im2fmap = nn.Sequential(*list(model.model[:5].children()) + list(model.model[5][:2].children())) epochs = 20 for epoch in range(epochs): epoch_loss = [] epoch_acc = [] for data in dataloader: loss, acc = train_batch(model, data, optimizer) epoch_loss.append(loss) epoch_acc.append(acc) print(f'Epoch: {epoch + 1}..Loss: {sum(epoch_loss) / len(epoch_loss):.3f}..Accuracy: {sum(epoch_acc) / len(epoch_acc):.3f}..')
code
90135235/cell_5
[ "image_output_1.png" ]
from torch.utils.data import Dataset,DataLoader from torchvision import transforms,datasets import matplotlib.pyplot as plt import numpy as np import torch torch.manual_seed(42) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]) test_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.Grayscale(), transforms.ToTensor()]) dataset = datasets.ImageFolder('../input/brain-mri-images-for-brain-tumor-detection/brain_tumor_dataset', train_transforms) dataloader = DataLoader(dataset, batch_size=64, shuffle=True) dataiter = iter(dataloader) X, Y = dataiter.next() classes = {0: 'Negative', 1: 'Positive'} fig, axes = plt.subplots(3, 3, figsize=(14, 14)) for i in range(3): for j in range(3): plt.sca(axes[i, j]) idx = np.random.randint(0, 31) image = np.moveaxis(X[idx].numpy(), 0, 2) plt.title(classes[Y[idx].item()]) plt.imshow(image, cmap='gray') plt.axis('off')
code
105194300/cell_21
[ "text_plain_output_1.png" ]
import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) d = DecisionTree(x, y, MAX_DEPTH, METRIC) d.fit() acc(y, d.predict(x))
code
105194300/cell_9
[ "image_output_1.png" ]
import numpy as np import seaborn as sns def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) plotDists(y)
code
105194300/cell_23
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results d = DecisionTree(x, y, MAX_DEPTH, METRIC) d.fit() m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y) acc(y, d.predict(x)) acc(y, m.predict(x)) print(acc(y, m.predict(x)) - acc(y, d.predict(x)))
code
105194300/cell_26
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results d = DecisionTree(x, y, MAX_DEPTH, METRIC) d.fit() m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y) acc(y, d.predict(x)) acc(y, m.predict(x)) d.predict([x[1]], True)
code
105194300/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y) plotDists(m.predict(x))
code
105194300/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y)
code
105194300/cell_28
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results d = DecisionTree(x, y, MAX_DEPTH, METRIC) d.fit() m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y) acc(y, d.predict(x)) acc(y, m.predict(x)) fig, ax = plt.subplots(figsize=(20, 16)) _ = tree.plot_tree(m, ax=ax)
code
105194300/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.datasets import make_classification import pandas as pd NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' x, y = make_classification(NUM_POINTS, NUM_FEATURES, n_informative=NUM_FEATURES // 2, n_classes=NUM_CLASSES) df = pd.DataFrame(x) df['y'] = y df.head()
code
105194300/cell_17
[ "text_html_output_1.png" ]
import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) d = DecisionTree(x, y, MAX_DEPTH, METRIC) d.fit() plotDists(d.predict(x))
code
105194300/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import numpy as np import seaborn as sns NUM_FEATURES = 10 NUM_CLASSES = 3 NUM_POINTS = 100 MAX_DEPTH = 5 METRIC = 'gini' def plotDists(y): return def acc(true, pred): assert len(true) == len(pred), 'Truth and Pred Lengths not same' true = np.array(true) pred = np.array(pred).astype(np.int32) return np.sum(true == pred) / len(true) class Node: def __init__(self): self.left = None self.right = None self.feature_id = None self.feature_thresh = None self.is_leaf = False self.result = None self.metric = 0 self.level = -1 def set_leaf(self, result, level): self.is_leaf = True self.result = result self.level = level def set_branch(self, best_feature, best_feature_thresh, best_metric, left, right, level): self.feature_id = best_feature self.feature_thresh = best_feature_thresh self.metric = best_metric self.left = left self.right = right self.level = level def traverse(self, inp): if self.is_leaf: return (self.result, True) if inp[self.feature_id] <= self.feature_thresh: return (self.left, False) else: return (self.right, False) def __repr__(self): s = '' s += f'Level : {self.level}\n' s += f'Leaf : {self.is_leaf}\n' if self.is_leaf: s += f'Result : {self.result}\n' else: s += f'Feature : {self.feature_id}\n' s += f'Thresh : {self.feature_thresh}\n' return s class DecisionTree: def __init__(self, x, y, MAX_LEVELS=5, metric='gini'): self.x = np.array(x) self.y = np.array(y) self.data = np.concatenate((self.x, self.y.reshape(-1, 1)), axis=1) self.n_features = self.x.shape[1] self.classes = set(y) self.n_classes = len(self.classes) self.MAX_LEVELS = MAX_LEVELS self.root = None assert metric in ['gini'], 'Invalid metric' self.metric = metric def fit(self): self.root = self.buildTree(0, self.data) def get_metric(self, y): _, counts = np.unique(y, return_counts=True) total = len(y) if self.metric == 'gini': return 1 - np.sum((counts / total) ** 2) elif self.metric == 'entropy': return 0 def split_data(self, data, feature_idx, thresh): return (data[data[:, feature_idx] <= thresh], data[data[:, feature_idx] > thresh]) def buildTree(self, level, data): n = Node() if level == self.MAX_LEVELS or len(set(data[:, -1])) == 1: n.set_leaf(self.max_reps_class(data[:, -1]), level) elif self.metric == 'gini': best_metric = float('inf') left_partition = None right_partition = None best_feature = None best_feature_thresh = None for feature_idx in range(self.n_features): for thresh in set(self.data[:, feature_idx]): left, right = self.split_data(data, feature_idx, thresh) m = len(left) * self.get_metric(left[:, -1]) + len(right) * self.get_metric(right[:, -1]) if m < best_metric: best_metric = m left_partition = left right_partition = right best_feature = feature_idx best_feature_thresh = thresh n.set_branch(best_feature, best_feature_thresh, best_metric, self.buildTree(level + 1, left_partition), self.buildTree(level + 1, right_partition), level) return n def max_reps_class(self, y): classes, counts = np.unique(y, return_counts=True) return classes[np.argmax(counts)] def predict(self, inputs, log_track=False): if not self.root: raise Exception('Tree Not Fit Yet!') results = [] for inp in inputs: res = self.root fin = False while not fin: res, fin = res.traverse(inp) results.append(int(res)) return results m = DecisionTreeClassifier(max_depth=MAX_DEPTH, criterion=METRIC) m.fit(x, y) acc(y, m.predict(x))
code
16115331/cell_4
[ "text_plain_output_1.png" ]
from collections import Counter from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for sentence in sentence_tokenized: word_tokenized.append(word_tokenize(sentence)) print(word_tokenized[10]) print(len(word_tokenized))
code
16115331/cell_6
[ "text_plain_output_1.png" ]
from collections import Counter from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for sentence in sentence_tokenized: word_tokenized.append(word_tokenize(sentence)) pos_tagged_text = list() for sentence in word_tokenized: pos_tagged_text.append(pos_tag(sentence)) chunk_grammar = 'NP: {<DT>?<JJ>*<NN>}' vp_chunk_grammar = 'VP: {<VB.*><DT>?<JJ>*<NN><RB.?>?}' chunk_parser = RegexpParser(chunk_grammar) vp_chunk_parser = RegexpParser(vp_chunk_grammar) chunked_sentence = chunk_parser.parse(pos_tagged_text[10]) print(chunked_sentence) vp_chunked_sentence = vp_chunk_parser.parse(pos_tagged_text[10]) print(vp_chunked_sentence)
code
16115331/cell_7
[ "text_plain_output_1.png" ]
from collections import Counter from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for sentence in sentence_tokenized: word_tokenized.append(word_tokenize(sentence)) pos_tagged_text = list() for sentence in word_tokenized: pos_tagged_text.append(pos_tag(sentence)) chunk_grammar = 'NP: {<DT>?<JJ>*<NN>}' vp_chunk_grammar = 'VP: {<VB.*><DT>?<JJ>*<NN><RB.?>?}' chunk_parser = RegexpParser(chunk_grammar) vp_chunk_parser = RegexpParser(vp_chunk_grammar) chunked_sentence = chunk_parser.parse(pos_tagged_text[10]) vp_chunked_sentence = vp_chunk_parser.parse(pos_tagged_text[10]) np_chunked_sentences = list() vp_chunked_sentences = list() for sentence in pos_tagged_text: np_chunked_sentences.append(chunk_parser.parse(sentence)) vp_chunked_sentences.append(vp_chunk_parser.parse(sentence)) print(np_chunked_sentences[222]) print(vp_chunked_sentences[222])
code
16115331/cell_8
[ "text_plain_output_1.png" ]
from collections import Counter from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for sentence in sentence_tokenized: word_tokenized.append(word_tokenize(sentence)) pos_tagged_text = list() for sentence in word_tokenized: pos_tagged_text.append(pos_tag(sentence)) chunk_grammar = 'NP: {<DT>?<JJ>*<NN>}' vp_chunk_grammar = 'VP: {<VB.*><DT>?<JJ>*<NN><RB.?>?}' chunk_parser = RegexpParser(chunk_grammar) vp_chunk_parser = RegexpParser(vp_chunk_grammar) chunked_sentence = chunk_parser.parse(pos_tagged_text[10]) vp_chunked_sentence = vp_chunk_parser.parse(pos_tagged_text[10]) np_chunked_sentences = list() vp_chunked_sentences = list() for sentence in pos_tagged_text: np_chunked_sentences.append(chunk_parser.parse(sentence)) vp_chunked_sentences.append(vp_chunk_parser.parse(sentence)) most_common_np_chunks = np_chunk_counter(np_chunked_sentences) print('NP chunks') print(most_common_np_chunks) most_common_vp_chunks = vp_chunk_counter(vp_chunked_sentences) print('VP chunks') print(most_common_vp_chunks)
code
16115331/cell_3
[ "text_plain_output_1.png" ]
from collections import Counter from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) print(sentence_tokenized[10]) print(len(sentence_tokenized))
code
16115331/cell_5
[ "text_plain_output_1.png" ]
from collections import Counter from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize import os import nltk from nltk import pos_tag, RegexpParser from nltk.tokenize import PunktSentenceTokenizer, word_tokenize from collections import Counter def word_sentence_tokenize(text): sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for tokenized_sentence in sentence_tokenized: word_tokenized.append(word_tokenize(tokenized_sentence)) return word_tokenized def np_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'NP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) def vp_chunk_counter(chunked_sentences): chunks = list() for chunked_sentence in chunked_sentences: for subtree in chunked_sentence.subtrees(filter=lambda t: t.label() == 'VP'): chunks.append(tuple(subtree)) chunk_counter = Counter() for chunk in chunks: chunk_counter[chunk] += 1 return chunk_counter.most_common(30) text = open('../input/the_wizard_of_oz.txt', encoding='utf-8').read().lower() sentence_tokenizer = PunktSentenceTokenizer(text) sentence_tokenized = sentence_tokenizer.tokenize(text) word_tokenized = list() for sentence in sentence_tokenized: word_tokenized.append(word_tokenize(sentence)) pos_tagged_text = list() for sentence in word_tokenized: pos_tagged_text.append(pos_tag(sentence)) print(pos_tagged_text[10])
code
2002501/cell_21
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape features = ['Pclass', 'Age', 'Sex', 'Cabin', 'Embarked'] x = df[features].copy() del x['Cabin'] def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 x['Sex'] = x.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 x['Embarked'] = x.apply(replace_1, axis=1) test = pd.read_csv('../input/test.csv') def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 test['Sex'] = test.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 test['Embarked'] = test.apply(replace_1, axis=1) test.dropna() features = ['Pclass', 'Sex', 'Age', 'Embarked'] test = test[features] test.isnull().any() test = test.dropna() test = test.dropna() test.head()
code
2002501/cell_13
[ "text_html_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape outcome = df[['Survived']].copy() features = ['Pclass', 'Age', 'Sex', 'Cabin', 'Embarked'] x = df[features].copy() del x['Cabin'] def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 x['Sex'] = x.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 x['Embarked'] = x.apply(replace_1, axis=1) result = DecisionTreeClassifier(max_leaf_nodes=10, random_state=0) result.fit(x, outcome)
code
2002501/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any()
code
2002501/cell_23
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape outcome = df[['Survived']].copy() outcome.shape
code
2002501/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape
code
2002501/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape df.shape d = df[['PassengerId']].copy() d.shape
code
2002501/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape features = ['Pclass', 'Age', 'Sex', 'Cabin', 'Embarked'] x = df[features].copy() del x['Cabin'] def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 x['Sex'] = x.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 x['Embarked'] = x.apply(replace_1, axis=1) test = pd.read_csv('../input/test.csv') def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 test['Sex'] = test.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 test['Embarked'] = test.apply(replace_1, axis=1) test.dropna() features = ['Pclass', 'Sex', 'Age', 'Embarked'] test = test[features] test.isnull().any()
code
2002501/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape features = ['Pclass', 'Age', 'Sex', 'Cabin', 'Embarked'] x = df[features].copy() del x['Cabin'] def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 x['Sex'] = x.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 x['Embarked'] = x.apply(replace_1, axis=1) test = pd.read_csv('../input/test.csv') def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 test['Sex'] = test.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 test['Embarked'] = test.apply(replace_1, axis=1) test.dropna() test.head()
code
2002501/cell_28
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape df.shape d = df[['PassengerId']].copy() d.shape d.head()
code
2002501/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape outcome = df[['Survived']].copy() outcome
code
2002501/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code
2002501/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.head()
code
2002501/cell_24
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape df.shape
code
2002501/cell_22
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.shape outcome = df[['Survived']].copy() features = ['Pclass', 'Age', 'Sex', 'Cabin', 'Embarked'] x = df[features].copy() del x['Cabin'] def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 x['Sex'] = x.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 x['Embarked'] = x.apply(replace_1, axis=1) result = DecisionTreeClassifier(max_leaf_nodes=10, random_state=0) result.fit(x, outcome) test = pd.read_csv('../input/test.csv') def replace(x): Sex = x['Sex'] if Sex in ['female']: return 0 else: return 1 test['Sex'] = test.apply(replace, axis=1) def replace_1(x): Embarked = x['Embarked'] if Embarked in ['E']: return 0 elif Embarked in ['C']: return 1 else: return 2 test['Embarked'] = test.apply(replace_1, axis=1) test.dropna() features = ['Pclass', 'Sex', 'Age', 'Embarked'] test = test[features] test.isnull().any() test = test.dropna() test = test.dropna() outcomes = result.predict(test) outcomes
code
2002501/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.isnull().any() df = df.dropna() df.head()
code
72100024/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] newList = list(missing_cols.index) newList.append('SalePrice') train[newList].corr() cols_to_be_removed = ['LotFrontage', 'GarageYrBlt', 'MasVnrArea'] train.drop(columns=cols_to_be_removed, inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] print(f'Columns with missing values: {len(missing_cols)}') missing_cols.sort_values(ascending=False)
code
72100024/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.info()
code
72100024/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] print('No. of columns with missing values:', len(missing_cols)) missing_cols.sort_values(ascending=False)
code
72100024/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.info()
code
72100024/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] newList = list(missing_cols.index) newList.append('SalePrice') train[newList].corr() cols_to_be_removed = ['LotFrontage', 'GarageYrBlt', 'MasVnrArea'] train.drop(columns=cols_to_be_removed, inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] print(f'No. of columns with missing values: {len(missing_cols)}') missing_cols.sort_values(ascending=False)
code
72100024/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72100024/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] newList = list(missing_cols.index) newList.append('SalePrice') train[newList].corr() cols_to_be_removed = ['LotFrontage', 'GarageYrBlt', 'MasVnrArea'] train.drop(columns=cols_to_be_removed, inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) ordinal_cols = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'GarageQual', 'GarageCond'] for item in range(len(ordinal_cols)): sns.barplot(x=ordinal_cols[item], y='SalePrice', data=train) plt.show()
code
72100024/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) sns.barplot(x='FireplaceQu', y='SalePrice', data=train)
code
72100024/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] newList = list(missing_cols.index) newList.append('SalePrice') train[newList].corr() cols_to_be_removed = ['LotFrontage', 'GarageYrBlt', 'MasVnrArea'] train.drop(columns=cols_to_be_removed, inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] print('No. of columns with missing values:', len(missing_cols)) missing_cols.sort_values(ascending=False)
code
72100024/cell_3
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape
code
72100024/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train-dataset/train.csv') train.shape train.drop(columns=['Id'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] missing_cols.sort_values(ascending=False) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] newList = list(missing_cols.index) newList.append('SalePrice') train[newList].corr() cols_to_be_removed = ['LotFrontage', 'GarageYrBlt', 'MasVnrArea'] train.drop(columns=cols_to_be_removed, inplace=True) missing_cols = train.isna().sum() missing_cols = missing_cols[missing_cols != 0] print('No. of columns with missing values:', len(missing_cols)) missing_cols.sort_values(ascending=False)
code