path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18161218/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import pandas_profiling as pp import os print(os.listdir('../input'))
code
18161218/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas_profiling as pp data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape profile = pp.ProfileReport(data) profile.to_file('HousingSales.html') pp.ProfileReport(data)
code
18161218/cell_18
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape missing_list = data.columns[data.isna().any()].tolist() data.columns[data.isna().any()].tolist() data.shape data_org = data data_org.shape data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1) data.shape numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical = ['object'] for cols in list(data.select_dtypes(include=numerics).columns.values): data[cols] = data[cols].replace(np.nan, data[cols].median()) for cols in list(data.select_dtypes(include=categorical).columns.values): data[cols] = data[cols].replace(np.nan, 'Not_Available') data.columns[data.isna().any()].tolist() a = data.select_dtypes(include=numerics) a.drop(['Id'], inplace=True, axis=1) df = a.iloc[:, 2:3] df.shape a = data.select_dtypes(include=numerics) df = pd.DataFrame(data=a.iloc[:, 1:2]) sns.boxplot(pd.melt(df))
code
18161218/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape missing_list = data.columns[data.isna().any()].tolist() data.columns[data.isna().any()].tolist() data.shape data_org = data data_org.shape data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1) data.shape numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical = ['object'] for cols in list(data.select_dtypes(include=numerics).columns.values): data[cols] = data[cols].replace(np.nan, data[cols].median()) for cols in list(data.select_dtypes(include=categorical).columns.values): data[cols] = data[cols].replace(np.nan, 'Not_Available') data.columns[data.isna().any()].tolist() a = data.select_dtypes(include=numerics) a.drop(['Id'], inplace=True, axis=1) df = a.iloc[:, 2:3] df.shape
code
18161218/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape
code
18161218/cell_17
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape missing_list = data.columns[data.isna().any()].tolist() data.columns[data.isna().any()].tolist() data.shape data_org = data data_org.shape data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1) data.shape numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical = ['object'] for cols in list(data.select_dtypes(include=numerics).columns.values): data[cols] = data[cols].replace(np.nan, data[cols].median()) for cols in list(data.select_dtypes(include=categorical).columns.values): data[cols] = data[cols].replace(np.nan, 'Not_Available') data.columns[data.isna().any()].tolist() a = data.select_dtypes(include=numerics) a.drop(['Id'], inplace=True, axis=1) df = a.iloc[:, 2:3] df.shape sns.boxplot(df) plt.show()
code
18161218/cell_14
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape missing_list = data.columns[data.isna().any()].tolist() data.columns[data.isna().any()].tolist() data.shape data_org = data data_org.shape data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1) data.shape numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical = ['object'] for cols in list(data.select_dtypes(include=numerics).columns.values): data[cols] = data[cols].replace(np.nan, data[cols].median()) for cols in list(data.select_dtypes(include=categorical).columns.values): data[cols] = data[cols].replace(np.nan, 'Not_Available') data.columns[data.isna().any()].tolist()
code
18161218/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'}) data.shape missing_list = data.columns[data.isna().any()].tolist() data.columns[data.isna().any()].tolist() data.shape data_org = data data_org.shape data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1) data.shape
code
2021553/cell_13
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['players.left.deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['players.right.deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['right_troop_' + str(i + 1) for i in range(8)], ['right_troop_count_' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['left_troop_' + str(i + 1) for i in range(8)], ['left_troop_count_' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.right.deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.left.deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data.head()
code
2021553/cell_9
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) LD = [len(left_deck) for left_deck in df_cr['players.left.deck']] RD = [len(right_deck) for right_deck in df_cr['players.right.deck']] (set(LD), set(RD))
code
2021553/cell_11
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['players.left.deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['players.right.deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops)
code
2021553/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2021553/cell_7
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) df_cr.head()
code
2021553/cell_16
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['players.left.deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['players.right.deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['right_troop_' + str(i + 1) for i in range(8)], ['right_troop_count_' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['left_troop_' + str(i + 1) for i in range(8)], ['left_troop_count_' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.right.deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.left.deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data.info()
code
2021553/cell_17
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] deserialize_cr = [json_normalize(eval(r1)) for r1 in CR[0:10000]] df_cr = pd.concat(deserialize_cr, ignore_index=True) Left_Troops = list(np.hstack([[x[0] for x in left_deck] for left_deck in df_cr['players.left.deck']])) Right_Troops = list(np.hstack([[x[0] for x in right_deck] for right_deck in df_cr['players.right.deck']])) distinct_troops = set(np.hstack([Left_Troops, Right_Troops])) len(distinct_troops) RightArmy_colNames = np.hstack([['right_troop_' + str(i + 1) for i in range(8)], ['right_troop_count_' + str(i + 1) for i in range(8)]]) LeftArmy_colNames = np.hstack([['left_troop_' + str(i + 1) for i in range(8)], ['left_troop_count_' + str(i + 1) for i in range(8)]]) RightArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.right.deck']], columns=RightArmy_colNames) LeftArmy = pd.DataFrame(data=[np.hstack([[army[0] for army in x], [army[1] for army in x]]) for x in df_cr['players.left.deck']], columns=LeftArmy_colNames) finalCR_data = pd.concat([df_cr, LeftArmy, RightArmy], axis=1, join='inner') finalCR_data.head()
code
2021553/cell_5
[ "text_html_output_1.png" ]
with open('../input/matches.txt') as file: CR = [x.strip() for x in file.readlines()] len(CR)
code
50235721/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) def top20(column): return data.nlargest(20, column)[['name', column]] print('Project with the biggest number of supporters (top 20):') top20('backers')
code
50235721/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) data['category'].value_counts().plot.bar(figsize=(32, 9), ylabel='Amount').grid()
code
50235721/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)]
code
50235721/cell_4
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data.info()
code
50235721/cell_23
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) def top20(column): return data.nlargest(20, column)[['name', column]] duration = data['deadline'] - data['launched'] duration.describe()
code
50235721/cell_20
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) def top20(column): return data.nlargest(20, column)[['name', column]] print('Project with the biggest goal in $ (top 20):') top20('usd_goal_real')
code
50235721/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data.isnull().sum()
code
50235721/cell_19
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) def top20(column): return data.nlargest(20, column)[['name', column]] print('Project with the most pledged money in $ (top 20):') top20('usd_pledged_real')
code
50235721/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data.isnull().sum() data[data['usd pledged'].isnull()].sample(10)
code
50235721/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) data['state'].value_counts().plot.pie(autopct='%1.1f%%', figsize=(16, 9), ylabel='')
code
50235721/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import squarify import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) x = data['currency'].value_counts().keys() y = list(data['currency'].value_counts()) plt.gcf().set_size_inches(16, 9) for i in range(len(y)): plt.annotate(str(y[i]), xy=(x[i], y[i]), ha='center', va='bottom') sizes = list(data['country'].value_counts()) label = zip(data['country'].value_counts().keys()[:14], list(data['country'].value_counts())[:14]) squarify.plot(sizes=sizes, label=label, alpha=0.6, text_kwargs={'fontsize': 8}) plt.axis('off') plt.gcf().set_size_inches(16, 9) plt.show()
code
50235721/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5)
code
50235721/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) print('Days with the most launched projects (top 20):') data['launched'].value_counts().nlargest(20)
code
50235721/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) x = data['currency'].value_counts().keys() y = list(data['currency'].value_counts()) plt.bar(x, y) plt.gcf().set_size_inches(16, 9) plt.xlabel('Currency') plt.ylabel('Amount') for i in range(len(y)): plt.annotate(str(y[i]), xy=(x[i], y[i]), ha='center', va='bottom') plt.show()
code
50235721/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import squarify import matplotlib.pyplot as plt pd.options.display.float_format = '{:,}'.format data = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', index_col='ID') data.sample(5) data['deadline'] = pd.to_datetime(data['deadline']) data['launched'] = pd.to_datetime(data['launched']) data['launched'] = data['launched'].dt.date data['launched'] = pd.to_datetime(data['launched']) data.isnull().sum() data = data[pd.notna(data['usd pledged'])] data[data.duplicated(keep=False)] data = data.drop_duplicates(subset=['name', 'category', 'deadline', 'launched'], keep='last') data = data.drop(columns=['usd pledged', 'pledged']) data['main_category'].value_counts().plot.pie(autopct='%1.1f%%', figsize=(16, 9), ylabel='')
code
17111721/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape)
code
17111721/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) le = LabelEncoder() le_count = 0 for col in app_train: if app_train[col].dtype == 'object': if len(list(app_train[col].unique())) <= 2: le.fit(app_train[col]) app_train[col] = le.transform(app_train[col]) app_test[col] = le.transform(app_test[col]) le_count += 1 print(col + ' column was label encoded.') print('%d columns were label encoded.' % le_count)
code
17111721/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels (app_train['DAYS_EMPLOYED'] / -365.25).describe()
code
17111721/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') print('Training data shape: ', app_train.shape) app_train.head()
code
17111721/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_BIRTH'].describe()
code
17111721/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) app_train['DAYS_EMPLOYED'].plot.hist(title='Days Employment Histogram') plt.xlabel('Days Employment')
code
17111721/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape)
code
17111721/cell_48
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) correlations = app_train.corr()['TARGET'].sort_values() plt.style.use('fivethirtyeight') plt.figure(figsize=(10, 8)) sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, 'DAYS_BIRTH'] / 365, label='target =0') sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, 'DAYS_BIRTH'] / 365, label='target = 1') plt.xlabel('Age (years)') plt.ylabel = 'Density' plt.title = 'Distribution of Ages'
code
17111721/cell_41
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) app_test['DAYS_EMPLOYED_ANOM'] = app_test['DAYS_EMPLOYED'] == 365243 app_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) print('There are %d anomalies in the test data out of %d entries' % (app_test['DAYS_EMPLOYED_ANOM'].sum(), len(app_test)))
code
17111721/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_train['TARGET'].value_counts()
code
17111721/cell_19
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0)
code
17111721/cell_50
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) app_test['DAYS_EMPLOYED_ANOM'] = app_test['DAYS_EMPLOYED'] == 365243 app_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) correlations = app_train.corr()['TARGET'].sort_values() plt.style.use('fivethirtyeight') plt.ylabel = 'Density' plt.title = 'Distribution of Ages' age_data = app_train[['TARGET', 'DAYS_BIRTH']] age_data['YEARS_BIRTH'] = age_data['DAYS_BIRTH'] / 365 age_data['YEARS_BINNED'] = pd.cut(age_data['YEARS_BIRTH'], bins=np.linspace(20, 70, num=11)) age_groups = age_data.groupby('YEARS_BINNED').mean() age_groups
code
17111721/cell_49
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) app_test['DAYS_EMPLOYED_ANOM'] = app_test['DAYS_EMPLOYED'] == 365243 app_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) correlations = app_train.corr()['TARGET'].sort_values() plt.style.use('fivethirtyeight') plt.ylabel = 'Density' plt.title = 'Distribution of Ages' age_data = app_train[['TARGET', 'DAYS_BIRTH']] age_data['YEARS_BIRTH'] = age_data['DAYS_BIRTH'] / 365 age_data['YEARS_BINNED'] = pd.cut(age_data['YEARS_BIRTH'], bins=np.linspace(20, 70, num=11)) age_data.head()
code
17111721/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_train.dtypes.value_counts()
code
17111721/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED'].describe()
code
17111721/cell_8
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') print('Testing data shape:', app_test.shape) app_test.head()
code
17111721/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols missing_values = missing_values_table(app_train) missing_values.head(20)
code
17111721/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED'].plot.hist(title='Days Employment Histogram') plt.xlabel('Days Employment')
code
17111721/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels correlations = app_train.corr()['TARGET'].sort_values() print('Most Positive Correlations:\n', correlations.tail(15)) print('\nMost Negative Correlations:\n', correlations.head(15))
code
17111721/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels (app_train['DAYS_BIRTH'] / -365.25).describe()
code
17111721/cell_46
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) correlations = app_train.corr()['TARGET'].sort_values() plt.style.use('fivethirtyeight') plt.hist(app_train['DAYS_BIRTH'] / 365, edgecolor='k', bins=25) plt.title('Age of Client') plt.xlabel('Age (years)') plt.ylabel('Count')
code
17111721/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_test = pd.read_csv('../input/application_test.csv') def missing_values_table(df): """ Function to calculate missing values by column Arguments: df: dataframe Output: miss_val_table_renamed_cols: dataframe with missing values """ miss_value = df.isnull().sum() miss_value_pct = 100 * df.isnull().sum() / len(df) miss_val_table = pd.concat([miss_value, miss_value_pct], axis=1) miss_val_table_renamed_cols = miss_val_table.rename(columns={0: 'Missing Values', 1: '% of Total Values'}) miss_val_table_renamed_cols = miss_val_table_renamed_cols[miss_val_table_renamed_cols.iloc[:, 1] != 0].sort_values('% of Total Values', ascending=False).round(1) return miss_val_table_renamed_cols app_train.dtypes.value_counts() app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0) app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join='inner', axis=1) app_train['TARGET'] = train_labels anom = app_train[app_train['DAYS_EMPLOYED'] == 365243] non_anom = app_train[app_train['DAYS_EMPLOYED'] != 365243] print('The non-anomalies default on %0.2f%% of loans' % (100 * non_anom['TARGET'].mean())) print('The anomalies default on %0.2f%% of loans' % (100 * anom['TARGET'].mean())) print('There are %d anomalous days of employment' % len(anom))
code
17111721/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) app_train = pd.read_csv('../input/application_train.csv') app_train['TARGET'].astype(int).plot.hist()
code
17111721/cell_5
[ "image_output_1.png" ]
import os print(os.listdir('../input/'))
code
90108087/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv') def number_of_previous_games(row, home): for i in range(1, 11): col = f'{home}_team_history_match_date_{i}' if pd.isna(row[col]): return i - 1 return i train_df['number_home_previous_games'] = train_df.apply(lambda row: number_of_previous_games(row, 'home'), axis=1) train_df['number_away_previous_games'] = train_df.apply(lambda row: number_of_previous_games(row, 'away'), axis=1) test_df['number_home_previous_games'] = test_df.apply(lambda row: number_of_previous_games(row, 'home'), axis=1) test_df['number_away_previous_games'] = test_df.apply(lambda row: number_of_previous_games(row, 'away'), axis=1) train_df['number_home_previous_games'].describe()
code
90108087/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv')
code
90108087/cell_23
[ "text_plain_output_1.png" ]
""" train_df[train_df["number_na10_away_previous_games"]>0] list_na_columns = [] for index, row in train_df[train_df["number_na10_away_previous_games"]>0].iterrows(): historical_columns_template_home = [x for x in historical_columns_template if x.startswith("away")] for column in historical_columns_template_home: if pd.isna(row[column+"_10"]): list_na_columns.append(column) """
code
90108087/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv') train_df.describe()
code
90108087/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv') def number_of_previous_games(row, home): for i in range(1, 11): col = f'{home}_team_history_match_date_{i}' if pd.isna(row[col]): return i - 1 return i train_df['number_home_previous_games'] = train_df.apply(lambda row: number_of_previous_games(row, 'home'), axis=1) train_df['number_away_previous_games'] = train_df.apply(lambda row: number_of_previous_games(row, 'away'), axis=1) test_df['number_home_previous_games'] = test_df.apply(lambda row: number_of_previous_games(row, 'home'), axis=1) test_df['number_away_previous_games'] = test_df.apply(lambda row: number_of_previous_games(row, 'away'), axis=1) historical_columns_template = list(set(['_'.join(x.split('_')[:-1]) for x in train_df.columns.values.tolist() if x[-1].isnumeric()])) is_cup_columns = [x for x in historical_columns_template if 'is_cup' in x] print(is_cup_columns) for col in is_cup_columns: for i in range(1, 11): train_df[col + f'_{i}'].fillna(0, inplace=True) test_df[col + f'_{i}'].fillna(0, inplace=True)
code
90108087/cell_19
[ "text_html_output_1.png" ]
""" train_df[train_df["number_na10_home_previous_games"]>0] list_na_columns = [] for index, row in train_df[train_df["number_na10_home_previous_games"]>0].iterrows(): historical_columns_template_home = [x for x in historical_columns_template if x.startswith("home")] for column in historical_columns_template_home: if pd.isna(row[column+"_10"]): list_na_columns.append(column) """
code
90108087/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90108087/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv') print(train_df.columns.values.tolist())
code
90108087/cell_15
[ "text_plain_output_1.png" ]
""" def count_not_na(row, home): number_previous_games = row[f"number_{home}_previous_games"] if number_previous_games!=10: number_previous_games+=1 else: return 0 count_notna = 0 historical_columns_template_home = [x for x in historical_columns_template if x.startswith(home)] for column in historical_columns_template_home: if pd.notna(row[column+f"_{number_previous_games}"]): count_notna+=1 return count_notna def count_na_10(row, home): number_previous_games = row[f"number_{home}_previous_games"] if number_previous_games!=10: return 0 else: count_na = 0 historical_columns_template_home = [x for x in historical_columns_template if x.startswith(home)] for column in historical_columns_template_home: if pd.isna(row[column+f"_{number_previous_games}"]): count_na+=1 return count_na train_df["number_notna_home_previous_games"] = train_df.apply(lambda row: count_not_na(row, "home"), axis=1) train_df["number_notna_away_previous_games"] = train_df.apply(lambda row: count_not_na(row, "away"), axis=1) train_df["number_na10_home_previous_games"] = train_df.apply(lambda row: count_na_10(row, "home"), axis=1) train_df["number_na10_away_previous_games"] = train_df.apply(lambda row: count_na_10(row, "away"), axis=1) """
code
90108087/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option('display.max_columns', None) train_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/train.csv') test_df = pd.read_csv('/kaggle/input/football-match-probability-prediction/test.csv') sample_sub = pd.read_csv('/kaggle/input/football-match-probability-prediction/sample_submission.csv') print(train_df.shape) print(test_df.shape) print(sample_sub.shape)
code
32068788/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np data = pd.read_csv('../input/digit-recognizer/train.csv') X = data.drop('label', axis=1).values s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] one_hot = pd.get_dummies(s) one_hot['label'] = one_hot.index data = pd.merge(data, one_hot) data = data.sample(frac=1) data = data.drop('label', axis=1) y = np.array(data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].values) X = np.array(data.drop([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], axis=1).values) X = X.T y = y.T y
code
32068788/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np data = pd.read_csv('../input/digit-recognizer/train.csv') X = data.drop('label', axis=1).values s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] one_hot = pd.get_dummies(s) one_hot['label'] = one_hot.index data = pd.merge(data, one_hot) data = data.sample(frac=1) data = data.drop('label', axis=1) y = np.array(data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].values) X = np.array(data.drop([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], axis=1).values) X = X.T y = y.T print(X.shape) print(y.shape)
code
32068788/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068788/cell_7
[ "text_plain_output_1.png" ]
(106862.31418188661 - 101041.82401718189) / 101041.82401718189
code
32068788/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import pandas as pd import numpy as np data = pd.read_csv('../input/digit-recognizer/train.csv') X = data.drop('label', axis=1).values s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] one_hot = pd.get_dummies(s) one_hot['label'] = one_hot.index data = pd.merge(data, one_hot) data = data.sample(frac=1) data = data.drop('label', axis=1) y = np.array(data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].values) X = np.array(data.drop([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], axis=1).values) X = X.T y = y.T import time np.random.seed(42) w1 = np.random.rand(32, 784) b0 = np.ones((32, 42000)) w2 = np.random.rand(10, 32) b1 = np.ones((10, 42000)) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) alpha = 0.005 loss = [] for i in range(10000): start = time.time() alpha = 0.5 x1 = np.tanh(w1 @ X + b0) x2 = softmax(w2 @ x1 + b1) if i % 10 == 0: print('Mean Cross Entropy Loss Loss') print('Iteration Number :', i) loss_ = -np.mean(np.sum(np.multiply(y, np.log(x2)))) if i > 0: if loss_ > loss[len(loss) - 1]: print('Loss has increased') alpha = 0.5 elif (loss[len(loss) - 1] - loss_) / loss_ < 0.01: alpha = 10 print(loss_) print(alpha) loss.append(loss_) delta_2 = np.multiply((x2 - y) / 42000, 1) delta_1 = np.multiply(w2.T @ delta_2, 1 - np.power(x1, 2)) w1 = w1 - alpha * (delta_1 @ X.T) b0 = b0 - alpha * delta_1 w2 = w2 - alpha * (delta_2 @ x1.T) b1 = b1 - alpha * delta_2
code
32068788/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import pandas as pd import numpy as np data = pd.read_csv('../input/digit-recognizer/train.csv') X = data.drop('label', axis=1).values s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] one_hot = pd.get_dummies(s) one_hot['label'] = one_hot.index data = pd.merge(data, one_hot) data = data.sample(frac=1) data = data.drop('label', axis=1) y = np.array(data[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].values) X = np.array(data.drop([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], axis=1).values) X = X.T y = y.T import time np.random.seed(42) w1 = np.random.rand(32, 784) b0 = np.ones((32, 42000)) w2 = np.random.rand(10, 32) b1 = np.ones((10, 42000)) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) alpha = 0.005 loss = [] for i in range(10000): start = time.time() alpha = 0.5 x1 = np.tanh(w1 @ X + b0) x2 = softmax(w2 @ x1 + b1) if i % 10 == 0: loss_ = -np.mean(np.sum(np.multiply(y, np.log(x2)))) if i > 0: if loss_ > loss[len(loss) - 1]: alpha = 0.5 elif (loss[len(loss) - 1] - loss_) / loss_ < 0.01: alpha = 10 loss.append(loss_) delta_2 = np.multiply((x2 - y) / 42000, 1) delta_1 = np.multiply(w2.T @ delta_2, 1 - np.power(x1, 2)) w1 = w1 - alpha * (delta_1 @ X.T) b0 = b0 - alpha * delta_1 w2 = w2 - alpha * (delta_2 @ x1.T) b1 = b1 - alpha * delta_2 x2
code
88101762/cell_13
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os import tensorflow as tf stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape X_train_raw = X_train_raw.astype('float32') X_test_raw = X_test_raw.astype('float32') X_train_raw /= 255 X_test_raw /= 255 y_train = tf.keras.utils.to_categorical(y_train, len(CLASS_NAME)) y_test = tf.keras.utils.to_categorical(y_test, len(CLASS_NAME)) type(X_train_raw[0][0])
code
88101762/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape y_all = np.array(y_all) y_all
code
88101762/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape
code
88101762/cell_20
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from sklearn.metrics import classification_report from tensorflow.keras.layers import Conv2D, MaxPooling2D,GlobalAveragePooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img,img_to_array import keras_tuner as kt import numpy as np import os import tensorflow as tf stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape y_all = np.array(y_all) y_all X_train_raw = X_train_raw.astype('float32') X_test_raw = X_test_raw.astype('float32') X_train_raw /= 255 X_test_raw /= 255 y_train = tf.keras.utils.to_categorical(y_train, len(CLASS_NAME)) y_test = tf.keras.utils.to_categorical(y_test, len(CLASS_NAME)) BATCH_SIZE = 64 EPOCH = 50 model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(224, 224, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(len(CLASS_NAME))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) history = model.fit(X_train_raw, y_train, batch_size=BATCH_SIZE, epochs=EPOCH, validation_data=(X_test_raw, y_test), shuffle=True) y_pred = model.predict(X_test_raw) from tensorflow import keras from tensorflow.keras import layers import keras_tuner as kt class MyHyperModel(kt.HyperModel): def build(self, hp): model = Sequential() filter_num = hp.Int('num_filter', min_value=3, max_value=100, step=2) kernel_num = hp.Int('size_kernal', min_value=3, max_value=10, step=2) model.add(Conv2D(filter_num, (kernel_num, kernel_num), input_shape=(300, 300, 3))) model.add(Activation('relu')) pool_num = hp.Int('size_pool', min_value=2, max_value=10, step=2) model.add(MaxPooling2D(pool_size=(pool_num, pool_num))) model.add(Flatten()) model.add(Dense(len(CLASS_NAME))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model def fit(self, hp, model, *args, **kwargs): return model.fit(*args, shuffle=True, batch_size=hp.Int('size_batch', min_value=10, max_value=100, step=10), epochs=hp.Int('num_epoch', min_value=10, max_value=100, step=10), **kwargs) tuner = kt.RandomSearch(MyHyperModel(), objective='val_accuracy', max_trials=10) tuner.search(X_train_raw, y_train_cat, validation_data=(X_test_raw, y_test_cat)) tuner.results_summary()
code
88101762/cell_2
[ "text_plain_output_1.png" ]
import os print(os.listdir('../input/magneticTileDefect'))
code
88101762/cell_11
[ "text_plain_output_1.png" ]
print('Shape X_train_raw', X_train_raw.shape) print('Shape y_train', y_train.shape) print('Shape X_test_raw', X_test_raw.shape) print('Shape y_test', y_test.shape)
code
88101762/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape type(X_all_raw)
code
88101762/cell_18
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import array_to_img from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from sklearn.metrics import classification_report from tensorflow.keras.layers import Conv2D, MaxPooling2D,GlobalAveragePooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img,img_to_array import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape y_all = np.array(y_all) y_all X_train_raw = X_train_raw.astype('float32') X_test_raw = X_test_raw.astype('float32') X_train_raw /= 255 X_test_raw /= 255 y_train = tf.keras.utils.to_categorical(y_train, len(CLASS_NAME)) y_test = tf.keras.utils.to_categorical(y_test, len(CLASS_NAME)) BATCH_SIZE = 64 EPOCH = 50 model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(224, 224, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(len(CLASS_NAME))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) history = model.fit(X_train_raw, y_train, batch_size=BATCH_SIZE, epochs=EPOCH, validation_data=(X_test_raw, y_test), shuffle=True) y_pred = model.predict(X_test_raw) plt.figure(figsize=[20, 8]) plt.subplot(1, 2, 1) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model Accuracy', size=25, pad=20) plt.ylabel('Accuracy', size=15) plt.xlabel('Epoch', size=15) plt.legend(['train', 'test'], loc='upper left') plt.subplot(1, 2, 2) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss', size=25, pad=20) plt.ylabel('Loss', size=15) plt.xlabel('Epoch', size=15) plt.legend(['train', 'test'], loc='upper left') plt.show()
code
88101762/cell_8
[ "image_output_1.png" ]
from keras.preprocessing.image import array_to_img from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import matplotlib.pyplot as plt import numpy as np import os stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape plt.imshow(array_to_img(X_all_raw[99, :]))
code
88101762/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from sklearn.metrics import classification_report from tensorflow.keras.layers import Conv2D, MaxPooling2D,GlobalAveragePooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os import tensorflow as tf stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape y_all = np.array(y_all) y_all X_train_raw = X_train_raw.astype('float32') X_test_raw = X_test_raw.astype('float32') X_train_raw /= 255 X_test_raw /= 255 y_train = tf.keras.utils.to_categorical(y_train, len(CLASS_NAME)) y_test = tf.keras.utils.to_categorical(y_test, len(CLASS_NAME)) BATCH_SIZE = 64 EPOCH = 50 model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(224, 224, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(len(CLASS_NAME))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) history = model.fit(X_train_raw, y_train, batch_size=BATCH_SIZE, epochs=EPOCH, validation_data=(X_test_raw, y_test), shuffle=True) y_pred = model.predict(X_test_raw) print(classification_report(np.argmax(y_test, axis=-1), np.argmax(y_pred, axis=-1), target_names=CLASS_NAME))
code
88101762/cell_14
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import load_img,img_to_array import numpy as np import os import tensorflow as tf stored_data_path = '../input/magnetictiledefect' CLASS_NAME = ['Blowhole', 'Free', 'Break', 'Fray', 'Uneven', 'Crack'] CLASS_NAME_NUM = [0, 1, 2, 3, 4, 5] file_path = '../input/magneticTileDefect/' X_all_raw = [] y_all = [] for ani_class in CLASS_NAME: for filename in os.listdir(file_path + ani_class): for subfilename in os.listdir(file_path + ani_class + '/' + filename + '/Imgs'): img = load_img(file_path + ani_class + '/' + filename + '/Imgs/' + subfilename, target_size=(224, 224)) img_array = img_to_array(img) X_all_raw.append(img_array) y_all.append(CLASS_NAME.index(ani_class)) X_all_raw = np.array(X_all_raw) X_all_raw.shape X_train_raw = X_train_raw.astype('float32') X_test_raw = X_test_raw.astype('float32') X_train_raw /= 255 X_test_raw /= 255 y_train = tf.keras.utils.to_categorical(y_train, len(CLASS_NAME)) y_test = tf.keras.utils.to_categorical(y_test, len(CLASS_NAME)) y_train[:8]
code
2003917/cell_2
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd test_corpus = np.load('../input/preprocessing/test_corp.npy') train_corpus = np.load('../input/preprocessing/train_corp.npy') glove_table = pd.read_csv('../input/preprocessing/filled_glove_table.csv', index_col=0) glove_table.describe()
code
2003917/cell_7
[ "text_plain_output_1.png" ]
from hmmlearn import hmm import numpy as np import pandas as pd import warnings test_corpus = np.load('../input/preprocessing/test_corp.npy') train_corpus = np.load('../input/preprocessing/train_corp.npy') glove_table = pd.read_csv('../input/preprocessing/filled_glove_table.csv', index_col=0) glove_table.loc[['man', 'woman', 'man']].as_matrix().shape train_sequence = [] train_sequence_lengths = [] for sentence_chunk in train_corpus: words = [word for word, tag in sentence_chunk] train_sequence.append(glove_table.loc[words].as_matrix()) train_sequence_lengths.append(len(words)) from hmmlearn import hmm def hmm_train_model(train_sequence, train_sequence_lengths): num_hidden_states = 20 num_dims = train_sequence[0].shape[1] model = hmm.GaussianHMM(n_components=num_hidden_states, covariance_type='spherical', n_iter=10) start_prob = np.random.rand(num_hidden_states) start_prob = start_prob / np.sum(start_prob) model.startprob_ = start_prob transmat = np.random.rand(num_hidden_states, num_hidden_states) transmat = transmat / np.sum(transmat, axis=1) model.transmat_ = transmat model.means_ = np.random.rand(num_hidden_states, num_dims) model.covars_ = np.tile(np.identity(num_dims), (num_hidden_states, 1, 1)) return model.fit(np.concatenate(train_sequence), train_sequence_lengths) import warnings with warnings.catch_warnings(): warnings.simplefilter('ignore') model = hmm_train_model(train_sequence[0:10], train_sequence_lengths[0:10]) model
code
2003917/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd test_corpus = np.load('../input/preprocessing/test_corp.npy') train_corpus = np.load('../input/preprocessing/train_corp.npy') glove_table = pd.read_csv('../input/preprocessing/filled_glove_table.csv', index_col=0) glove_table.loc[['man', 'woman', 'man']].as_matrix().shape
code
74069086/cell_6
[ "text_plain_output_1.png" ]
start_page = 'https://www.checkthepolice.org/database' trio.run(main, start_page)
code
74069086/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
pip uninstall -y typing # trouble for gazpacho pip install asks trio gazpacho
code
74069086/cell_7
[ "text_plain_output_1.png" ]
! zip -r dept_contracts.zip /home/contracts/*.pdf
code
74069086/cell_8
[ "text_plain_output_1.png" ]
! ls -U /home/contracts | head -10
code
73083545/cell_20
[ "text_plain_output_1.png" ]
from functools import partial from scipy.stats.mstats import winsorize from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from tabular import TabularTransformer, DataGenerator from tabular import gelu, Mish, mish from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D from tensorflow.keras.layers import Reshape, MaxPooling1D,BatchNormalization, AveragePooling1D, Conv1D from tensorflow.keras.models import Model, load_model from tensorflow.keras.models import load_model from tensorflow.keras.optimizers import Adam, Nadam from tensorflow.keras.optimizers import SGD, Adam, Nadam import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf X_train = pd.read_csv('../input/30-days-of-ml/train.csv') X_test = pd.read_csv('../input/30-days-of-ml/test.csv') y_train = X_train.target X_train = X_train.set_index('id').drop('target', axis='columns') X_test = X_test.set_index('id') y_stratified = pd.cut(y_train.rank(method='first'), bins=10, labels=False) categoricals = [item for item in X_train.columns if 'cat' in item] dummies = pd.get_dummies(X_train.append(X_test)[categoricals]) X_train[dummies.columns] = dummies.iloc[:len(X_train), :] X_test[dummies.columns] = dummies.iloc[len(X_train):, :] del dummies important_features = ['cat8_E', 'cont0', 'cont5', 'cont7', 'cont8', 'cat1_A', 'cont2', 'cont13', 'cont3', 'cont10', 'cont1', 'cont9', 'cont11', 'cat1', 'cat8_C', 'cont6', 'cont12', 'cat5', 'cat3_C', 'cont4', 'cat8'] X_train = X_train[important_features] X_test = X_test[important_features] numeric_variables = [item for item in X_train.columns if 'cont' in item] low_card_categoricals = ['cat1', 'cat8_E', 'cat1_A', 'cat8_C', 'cat3_C'] high_card_categoricals = ['cat5', 'cat8'] ordinals = ['cat5', 'cat8'] def tabular_dnn(numeric_variables, categorical_variables=None, categorical_counts=None, feature_selection_dropout=0.2, categorical_dropout=0.1, first_dense=256, second_dense=256, dense_dropout=0.2, activation_type=gelu): numerical_inputs = Input(shape=(numeric_variables,)) numerical_normalization = BatchNormalization()(numerical_inputs) numerical_feature_selection = Dropout(feature_selection_dropout)(numerical_normalization) if categorical_variables is not None: categorical_inputs = [] categorical_embeddings = [] for category in categorical_variables: categorical_inputs.append(Input(shape=[1], name=category)) category_counts = categorical_counts[category] categorical_embeddings.append(Embedding(category_counts + 1, int(np.log1p(category_counts) + 1), name=category + '_embed')(categorical_inputs[-1])) if len(categorical_embeddings) == 1: categorical_logits = Flatten()(SpatialDropout1D(categorical_dropout)(categorical_embeddings[0])) else: categorical_logits = Concatenate(name='categorical_conc')([Flatten()(SpatialDropout1D(categorical_dropout)(cat_emb)) for cat_emb in categorical_embeddings]) xs = concatenate([numerical_feature_selection, categorical_logits]) else: xs = numerical_feature_selection x = Dense(first_dense, activation=activation_type)(xs) x = Dropout(dense_dropout)(x) x = Dense(second_dense, activation=activation_type)(x) x = concatenate([x, xs]) x = Dropout(dense_dropout)(x) output = Dense(1)(x) if categorical_variables is not None: model = Model([numerical_inputs] + categorical_inputs, output) else: model = Model([numerical_inputs], output) return model # Useful functions def RMSE(y_true, y_pred): return tf.py_function(partial(mean_squared_error, squared=False), (y_true, y_pred), tf.double) def MSE(y_true, y_pred): return tf.py_function(partial(mean_squared_error, squared=True), (y_true, y_pred), tf.double) def compile_model(model, loss, metrics, optimizer): model.compile(loss=loss, metrics=metrics, optimizer=optimizer) return model def plot_keras_history(history, measures): """ history: Keras training history measures = list of names of measures """ rows = len(measures) // 2 + len(measures) % 2 fig, panels = plt.subplots(rows, 2, figsize=(15, 5)) plt.subplots_adjust(top = 0.99, bottom=0.01, hspace=0.4, wspace=0.2) try: panels = [item for sublist in panels for item in sublist] except: pass for k, measure in enumerate(measures): panel = panels[k] panel.set_title(measure + ' history') panel.plot(history.epoch, history.history[measure], label="Train "+measure) panel.plot(history.epoch, history.history["val_"+measure], label="Validation "+measure) panel.set(xlabel='epochs', ylabel=measure) panel.legend() plt.show(fig) measure_to_monitor = 'val_RMSE' modality = 'min' early_stopping = EarlyStopping(monitor=measure_to_monitor, mode=modality, patience=5, verbose=0) reduce_lr_on_plateau = ReduceLROnPlateau(monitor=measure_to_monitor, mode=modality, patience=2, factor=0.5, verbose=0) model_checkpoint = ModelCheckpoint('best.model', monitor=measure_to_monitor, mode=modality, save_best_only=True, verbose=0) skf = StratifiedKFold(n_splits=Config.folds, shuffle=True, random_state=Config.seed) score = list() oof = np.zeros(len(X_train)) best_iteration = list() test_preds = np.zeros(len(X_test)) for fold, (train_idx, test_idx) in enumerate(skf.split(X_train, y_stratified)): tb = TabularTransformer(numeric=numeric_variables, ordinal=ordinals, lowcat=low_card_categoricals, highcat=high_card_categoricals) tb.fit(X_train.iloc[train_idx]) sizes = tb.shape(X_train.iloc[train_idx]) categorical_levels = dict(zip(high_card_categoricals, sizes[1:])) print(f'Input array sizes: {sizes}') print(f'Categorical levels: {categorical_levels}\n') model = tabular_dnn(numeric_variables=sizes[0], categorical_variables=high_card_categoricals, categorical_counts=categorical_levels, feature_selection_dropout=0.0, categorical_dropout=0.0, first_dense=128, second_dense=64, dense_dropout=0.1, activation_type='relu') model = compile_model(model, loss='mean_squared_error', metrics=[MSE, RMSE], optimizer=Adam(learning_rate=0.001)) train_batch = DataGenerator(X_train.iloc[train_idx], np.array(winsorize(y_train[train_idx], [0.002, 0.0])), tabular_transformer=tb, batch_size=Config.batch_size, shuffle=True) history = model.fit(train_batch, validation_data=(tb.transform(X_train.iloc[test_idx]), y_train[test_idx]), epochs=Config.epochs, callbacks=[model_checkpoint, reduce_lr_on_plateau, early_stopping], verbose=1) print('\nFOLD %i' % fold) plot_keras_history(history, measures=['RMSE', 'MSE']) best_iteration.append(np.argmin(history.history['val_RMSE']) + 1) model = load_model('best.model', custom_objects={'gelu': gelu, 'mish': mish, 'MSE': MSE, 'RMSE': RMSE}) preds = model.predict(tb.transform(X_train.iloc[test_idx]), verbose=1, batch_size=1024).flatten() oof[test_idx] = preds score.append(mean_squared_error(y_true=y_train[test_idx], y_pred=preds, squared=False)) test_preds += model.predict(tb.transform(X_test[X.columns]), verbose=1, batch_size=1024).flatten() test_preds /= Config.folds
code
73083545/cell_6
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd from functools import partial import matplotlib.pyplot as plt from scipy.stats.mstats import winsorize from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import OrdinalEncoder from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import Adam, Nadam from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D from tensorflow.keras.layers import Reshape, MaxPooling1D, BatchNormalization, AveragePooling1D, Conv1D from tensorflow.keras.layers import Activation, LeakyReLU from tensorflow.keras.optimizers import SGD, Adam, Nadam from tensorflow.keras.models import Model, load_model from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.regularizers import l2, l1_l2 from keras.losses import MeanSquaredError from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from tensorflow.keras.layers import Activation, LeakyReLU from tabular import gelu, Mish, mish from tabular import TabularTransformer, DataGenerator
code
130003860/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data.columns
code
130003860/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data['EJ'].nunique()
code
130003860/cell_4
[ "text_plain_output_1.png" ]
import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore')
code
130003860/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data.columns data[['BQ', 'DU', 'EL', 'FC', 'FL', 'FS', 'GL', 'CB', 'CC']].isnull().sum()
code
130003860/cell_33
[ "text_plain_output_1.png" ]
(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
code
130003860/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.head()
code
130003860/cell_39
[ "text_plain_output_1.png" ]
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier from sklearn.metrics import log_loss (x_train.shape, y_train.shape, x_test.shape, y_test.shape) gb = GradientBoostingClassifier(n_estimators=1000, max_depth=9, subsample=0.8, max_features='log2', min_samples_leaf=9, random_state=42) gb.fit(x_train, y_train) y_pred = gb.predict(x_test) log_loss(y_test, y_pred)
code
130003860/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull()
code
130003860/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape
code