path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34150026/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) train.head()
code
73070475/cell_4
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg', 50) edges = cv2.Canny(plat1, 100, 200) (plt.subplot(121), plt.imshow(plat1, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plt.show()
code
73070475/cell_6
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg', 50) edges = cv2.Canny(plat1, 100, 200) (plt.subplot(121), plt.imshow(plat1, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plat2 = cv2.imread('/kaggle/input/plat-nomer/3.png', 50) edges = cv2.Canny(plat2, 100, 200) (plt.subplot(121), plt.imshow(plat2, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg') edges = cv2.Canny(plat1, 50, 255, L2gradient=False) plt.imshow(edges, cmap='gray') plt.show()
code
73070475/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73070475/cell_7
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg', 50) edges = cv2.Canny(plat1, 100, 200) (plt.subplot(121), plt.imshow(plat1, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plat2 = cv2.imread('/kaggle/input/plat-nomer/3.png', 50) edges = cv2.Canny(plat2, 100, 200) (plt.subplot(121), plt.imshow(plat2, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg') edges = cv2.Canny(plat1, 50, 255, L2gradient=False) plat2 = cv2.imread('/kaggle/input/plat-nomer/3.png') edges = cv2.Canny(plat2, 50, 255, L2gradient=False) plt.imshow(edges, cmap='gray') plt.show()
code
73070475/cell_5
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt plat1 = cv2.imread('/kaggle/input/plat-nomer/4.jpg', 50) edges = cv2.Canny(plat1, 100, 200) (plt.subplot(121), plt.imshow(plat1, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plat2 = cv2.imread('/kaggle/input/plat-nomer/3.png', 50) edges = cv2.Canny(plat2, 100, 200) (plt.subplot(121), plt.imshow(plat2, cmap='gray')) (plt.title('Gambar Asli'), plt.xticks([]), plt.yticks([])) (plt.subplot(122), plt.imshow(edges, cmap='gray')) (plt.title('Gambar Edge'), plt.xticks([]), plt.yticks([])) plt.show()
code
325101/cell_4
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y)
code
325101/cell_6
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y) gb.score(test, test_y)
code
325101/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.svm import SVC df = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) df.columns
code
325101/cell_7
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y) sv.score(test, test_y)
code
325101/cell_5
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] X, test = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y) rf.score(test, test_y)
code
106214685/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() sns.set_theme() sns.set(rc={'figure.figsize': (6, 4)}) data.isnull().sum() data.duplicated().sum() data.loc[data.duplicated(), :]
code
106214685/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique()
code
106214685/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS']
code
106214685/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() sns.set_theme() sns.set(rc={'figure.figsize': (6, 4)}) data.isnull().sum() data.duplicated().sum() data.loc[data.duplicated(), :] nv = [] for i in data.columns: if data[i].dtypes != 'object': nv.append(i) print(nv)
code
106214685/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() sns.set_theme() sns.set(rc={'figure.figsize': (6, 4)}) data.isnull().sum() data.duplicated().sum()
code
106214685/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns
code
106214685/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique()
code
106214685/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106214685/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique()
code
106214685/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() sns.set_theme() sns.set(rc={'figure.figsize': (6, 4)}) data.isnull().sum()
code
106214685/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean()
code
106214685/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() data.info()
code
106214685/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() sns.set_theme() sns.set(rc={'figure.figsize': (6, 4)}) print(f'This DataSet Contains {data.shape[0]} rows & {data.shape[1]} columns.')
code
106214685/cell_3
[ "text_plain_output_1.png" ]
import os import warnings warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings('ignore') def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn import pandas as pd import datetime import math import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import matplotlib.cm as cm from pandasql import sqldf pysqldf = lambda q: sqldf(q, globals()) import seaborn as sns sns.set(style='ticks', color_codes=True, font_scale=1.5) color = sns.color_palette() sns.set_style('darkgrid') from mpl_toolkits.mplot3d import Axes3D import plotly as py import plotly.graph_objs as go py.offline.init_notebook_mode() from scipy import stats from scipy.stats import skew, norm, probplot, boxcox from sklearn import preprocessing import math from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.metrics import mean_squared_error from sklearn.linear_model import LinearRegression from sklearn.compose import make_column_transformer from sklearn import set_config from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report import seaborn as dg
code
106214685/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum() data.CustomerID.unique() data.describe()
code
106214685/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.columns data.Date.unique() data.groupby('Itemname').mean() data[data.Itemname == '12 COLOURED PARTY BALLOONS'] data.isna().sum() data.Itemname.unique() data.CustomerID.unique() data.fillna({'Itemname': 'no', 'CustomerID': 'Others'}, inplace=True) data.isna().sum()
code
106214685/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_excel('/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx') data.head()
code
32068762/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T df = population_raw.copy() df = df.rename({'Province.State': 'Province', 'Country.Region': 'Country'}, axis=1) cols = ['Country', 'Province', 'Population'] df = df.loc[:, cols].fillna('-') df.loc[df['Country'] == df['Province'], 'Province'] = '-' _total_df = df.loc[df['Province'] != '-', :].groupby('Country').sum() _total_df = _total_df.reset_index().assign(Province='-') df = pd.concat([df, _total_df], axis=0, sort=True) df = df.drop_duplicates(subset=['Country', 'Province'], keep='first') global_value = df.loc[df['Province'] == '-', 'Population'].sum() df = df.append(pd.Series(['Global', '-', global_value], index=cols), ignore_index=True) df = df.sort_values('Population', ascending=False).reset_index(drop=True) df = df.loc[:, cols] population_df = df.copy() df = population_df.loc[population_df['Province'] == '-', :] population_dict = df.set_index('Country').to_dict()['Population'] population_dict pyramid_csv_list = list() for dirname, _, filenames in os.walk('/kaggle/input/population-pyramid-2019/'): for filename in filenames: name = os.path.join(dirname, filename) df = pd.read_csv(name) df['Country'], df['Year'], _ = filename.replace('.', '-').split('-') pyramid_csv_list.append(df) pyramid_raw = pd.concat(pyramid_csv_list, sort=True) pyramid_raw.head()
code
32068762/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068762/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T df = population_raw.copy() df = df.rename({'Province.State': 'Province', 'Country.Region': 'Country'}, axis=1) cols = ['Country', 'Province', 'Population'] df = df.loc[:, cols].fillna('-') df.loc[df['Country'] == df['Province'], 'Province'] = '-' _total_df = df.loc[df['Province'] != '-', :].groupby('Country').sum() _total_df = _total_df.reset_index().assign(Province='-') df = pd.concat([df, _total_df], axis=0, sort=True) df = df.drop_duplicates(subset=['Country', 'Province'], keep='first') global_value = df.loc[df['Province'] == '-', 'Population'].sum() df = df.append(pd.Series(['Global', '-', global_value], index=cols), ignore_index=True) df = df.sort_values('Population', ascending=False).reset_index(drop=True) df = df.loc[:, cols] population_df = df.copy() df = population_df.loc[population_df['Province'] == '-', :] population_dict = df.set_index('Country').to_dict()['Population'] population_dict pyramid_csv_list = list() for dirname, _, filenames in os.walk('/kaggle/input/population-pyramid-2019/'): for filename in filenames: name = os.path.join(dirname, filename) df = pd.read_csv(name) df['Country'], df['Year'], _ = filename.replace('.', '-').split('-') pyramid_csv_list.append(df) pyramid_raw = pd.concat(pyramid_csv_list, sort=True) df = pyramid_raw.copy() df['Country'] = df['Country'].replace({'United States of America': 'US', 'United Kingdom': 'UK'}) _male = [349432556, 342927576, 331497486, 316642222, 308286775, 306059387, 309236984, 276447037, 249389688, 241232876, 222609691, 192215395, 157180267, 128939392, 87185982, 54754941, 33648953, 15756942, 5327866, 1077791, 124144] _female = [328509234, 321511867, 309769906, 295553758, 289100903, 288632766, 296293748, 268371754, 244399176, 238133281, 223162982, 195633743, 164961323, 140704320, 101491347, 69026831, 48281201, 26429329, 11352182, 3055845, 449279] _df = pd.DataFrame({'Age': df['Age'].unique(), 'Country': 'Global', 'F': _female, 'M': _male, 'Year': 2019}) df = pd.concat([df, _df], axis=0, ignore_index=True, sort=True) _male = [307116, 304759, 296771, 270840, 291723, 376952, 343311, 315086, 312017, 336452, 342117, 306949, 279609, 265511, 273061, 195029, 113166, 61775, 26170, 6768, 415] _female = [290553, 288817, 280944, 257677, 274760, 361526, 330153, 300752, 301288, 327453, 331458, 300084, 280009, 272149, 286879, 212480, 143654, 97633, 52624, 18130, 1771] _df = pd.DataFrame({'Age': df['Age'].unique(), 'Country': 'Sweden', 'F': _female, 'M': _male, 'Year': 2019}) df = pd.concat([df, _df], axis=0, ignore_index=True, sort=True) _male = [5534962, 5820604, 5538414, 5383822, 5149849, 4710777, 4061897, 3581091, 3237426, 2832825, 2482953, 2015857, 1556935, 1082875, 668107, 364200, 199400, 73508, 17327, 3035, 208] _female = [5240508, 5541514, 5273495, 5029137, 4896316, 4589506, 3982681, 3544279, 3191565, 2825286, 2521463, 2112380, 1714689, 1285782, 895866, 567282, 360751, 155294, 57969, 13376, 1411] _df = pd.DataFrame({'Age': df['Age'].unique(), 'Country': 'Philippines', 'F': _female, 'M': _male, 'Year': 2019}) df = pd.concat([df, _df], axis=0, ignore_index=True, sort=True) df['Population'] = df['F'] + df['M'] df = df.pivot_table(index='Age', columns=['Country'], values='Population', aggfunc='last') df = df.astype(np.int64).reset_index().rename({'Age': 'Age_bin'}, axis=1) series = df['Age_bin'].str.replace('+', '-122') df[['Age_first', 'Age_last']] = series.str.split('-', expand=True).astype(np.int64) df = df.drop('Age_bin', axis=1) series = df['Age_last'] df = df.apply(lambda x: x[:-2] / (x[-1] - x[-2] + 1), axis=1) df['Age'] = series df = pd.merge(df, pd.DataFrame({'Age': np.arange(0, 123, 1)}), on='Age', how='right', sort=True) df = df.fillna(method='bfill').astype(np.int64) df = df.set_index('Age') pyramid_df = df.copy() pyramid_df
code
32068762/cell_2
[ "text_plain_output_1.png" ]
from datetime import datetime from datetime import datetime time_format = '%d%b%Y %H:%M' datetime.now().strftime(time_format)
code
32068762/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T df = population_raw.copy() df = df.rename({'Province.State': 'Province', 'Country.Region': 'Country'}, axis=1) cols = ['Country', 'Province', 'Population'] df = df.loc[:, cols].fillna('-') df.loc[df['Country'] == df['Province'], 'Province'] = '-' _total_df = df.loc[df['Province'] != '-', :].groupby('Country').sum() _total_df = _total_df.reset_index().assign(Province='-') df = pd.concat([df, _total_df], axis=0, sort=True) df = df.drop_duplicates(subset=['Country', 'Province'], keep='first') global_value = df.loc[df['Province'] == '-', 'Population'].sum() df = df.append(pd.Series(['Global', '-', global_value], index=cols), ignore_index=True) df = df.sort_values('Population', ascending=False).reset_index(drop=True) df = df.loc[:, cols] population_df = df.copy() df = population_df.loc[population_df['Province'] == '-', :] population_dict = df.set_index('Country').to_dict()['Population'] population_dict
code
32068762/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T df = population_raw.copy() df = df.rename({'Province.State': 'Province', 'Country.Region': 'Country'}, axis=1) cols = ['Country', 'Province', 'Population'] df = df.loc[:, cols].fillna('-') df.loc[df['Country'] == df['Province'], 'Province'] = '-' _total_df = df.loc[df['Province'] != '-', :].groupby('Country').sum() _total_df = _total_df.reset_index().assign(Province='-') df = pd.concat([df, _total_df], axis=0, sort=True) df = df.drop_duplicates(subset=['Country', 'Province'], keep='first') global_value = df.loc[df['Province'] == '-', 'Population'].sum() df = df.append(pd.Series(['Global', '-', global_value], index=cols), ignore_index=True) df = df.sort_values('Population', ascending=False).reset_index(drop=True) df = df.loc[:, cols] population_df = df.copy() population_df.head()
code
32068762/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') population_raw.head()
code
32068762/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T
code
32068762/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd np.random.seed(2019) os.environ['PYTHONHASHSEED'] = '2019' plt.style.use('seaborn-ticks') plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['font.size'] = 11.0 plt.rcParams['figure.figsize'] = (9, 6) pd.set_option('display.max_colwidth', 1000) population_raw = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') pd.DataFrame(population_raw.isnull().sum()).T df = population_raw.copy() df = df.rename({'Province.State': 'Province', 'Country.Region': 'Country'}, axis=1) cols = ['Country', 'Province', 'Population'] df = df.loc[:, cols].fillna('-') df.loc[df['Country'] == df['Province'], 'Province'] = '-' _total_df = df.loc[df['Province'] != '-', :].groupby('Country').sum() _total_df = _total_df.reset_index().assign(Province='-') df = pd.concat([df, _total_df], axis=0, sort=True) df = df.drop_duplicates(subset=['Country', 'Province'], keep='first') global_value = df.loc[df['Province'] == '-', 'Population'].sum() df = df.append(pd.Series(['Global', '-', global_value], index=cols), ignore_index=True) df = df.sort_values('Population', ascending=False).reset_index(drop=True) df = df.loc[:, cols] population_df = df.copy() df = population_df.loc[population_df['Province'] == '-', :] population_dict = df.set_index('Country').to_dict()['Population'] population_dict pyramid_csv_list = list() for dirname, _, filenames in os.walk('/kaggle/input/population-pyramid-2019/'): for filename in filenames: name = os.path.join(dirname, filename) df = pd.read_csv(name) df['Country'], df['Year'], _ = filename.replace('.', '-').split('-') pyramid_csv_list.append(df) pyramid_raw = pd.concat(pyramid_csv_list, sort=True) pyramid_raw['Country'].unique()
code
17108052/cell_9
[ "image_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes
code
17108052/cell_6
[ "image_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train['Price'].describe()
code
17108052/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) print(train_csv.columns)
code
17108052/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price'] corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=0.8, square=True)
code
17108052/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) sns.distplot(train['Price'])
code
17108052/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) #box plot overallqual/saleprice var = 'Fuel_Type' data = pd.concat([train['Price'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="Price", data=data) fig.axis(ymin=0, ymax=160); #correlation matrix corrmat = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True); total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 36].index, 1) train = train.drop(train.loc[train['Engine'].isnull()].index) train = train.drop(train.loc[train['Mileage'].isnull()].index) train.isnull().sum().max()
code
17108052/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) print('Skewness: %f' % train['Price'].skew()) print('Kurtosis: %f' % train['Price'].kurt())
code
17108052/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) #box plot overallqual/saleprice var = 'Fuel_Type' data = pd.concat([train['Price'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="Price", data=data) fig.axis(ymin=0, ymax=160); corrmat = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=0.8, square=True)
code
17108052/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) #box plot overallqual/saleprice var = 'Fuel_Type' data = pd.concat([train['Price'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="Price", data=data) fig.axis(ymin=0, ymax=160); #correlation matrix corrmat = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True); k = 7 cols = corrmat.nlargest(k, 'Price')['Price'].index cm = np.corrcoef(train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
17108052/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) #box plot overallqual/saleprice var = 'Fuel_Type' data = pd.concat([train['Price'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="Price", data=data) fig.axis(ymin=0, ymax=160); #correlation matrix corrmat = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True); total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20)
code
17108052/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) var = 'Fuel_Type' data = pd.concat([train['Price'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y='Price', data=data) fig.axis(ymin=0, ymax=160)
code
17108052/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) data.plot.scatter(x=var, y='Price', ylim=(0, 702))
code
17108052/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) train.dtypes var = 'Aroon Up' data = pd.concat([train['Price'], train[var]], axis=1) corcolm = ['SMA', 'MACD', 'MACD_Hist', 'SlowD', 'FastK', 'RSI', 'FatD', 'FatK', 'WILLR', 'ADX', 'ADXR', 'PPO', 'MOM', 'BOP', 'CCI', 'CMO', 'ROC', 'ROCR', 'Aroon Down', 'Aroon Up', 'MFI', 'TRIX', 'ULTOSC', 'DX', 'MINUS_DI', 'PLUS_DI', 'MINUS_DM', 'PLUS_DM', 'Real Lower Band', 'MIDPOINT', 'MIDPRICE', 'SAR', 'ATR', 'Chaikin A/D', 'ADOSC', 'OBV', 'HT_TRENDLINE', 'LEAD SINE', 'SINE', 'TRENDMODE', 'DCPERIOD', 'HT_DCPHASE', 'PHASE', 'QUADRATURE', 'Price']; #correlation matrix corrmat = train[corcolm].corr() f, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(corrmat, vmax=.8, square=True); train = train[corcolm] total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(45)
code
17108052/cell_5
[ "image_output_1.png" ]
import pandas as pd import warnings import numpy as np import pandas as pd import torch from torch.utils.data import TensorDataset, DataLoader, Dataset import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import torch.optim as optim from torch.optim import lr_scheduler from sklearn.metrics import accuracy_score import json from sklearn.tree import DecisionTreeRegressor from sklearn import tree from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.impute import SimpleImputer from collections import Counter from sklearn.preprocessing import LabelEncoder, scale from sklearn.datasets import load_boston from sklearn.metrics import r2_score from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import torch.utils.data from sklearn.model_selection import train_test_split import torch import matplotlib import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) train_csv = pd.read_csv('../input/train.csv', keep_default_na=False) test_csv = pd.read_csv('../input/test.csv', keep_default_na=False) def preprocess_data(dataset): dataset = dataset.replace('NaN', '') for col in list(dataset.columns): if col != 'Company ' and col != 'Date': dataset[col] = pd.to_numeric(dataset[col]) dataset = dataset.drop(['ID'], axis=1) return dataset train = preprocess_data(train_csv) test = preprocess_data(test_csv) display(train.head())
code
73063106/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer
code
73063106/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.head(5)
code
73063106/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True)
code
73063106/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape
code
73063106/cell_34
[ "image_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) scaler = StandardScaler() scaled_features = scaler.fit_transform(df_customer) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) dff = scaled_features from sklearn.cluster import KMeans wcss = [] for k in range(2, 11): kmeanModel = KMeans(n_clusters=k, init='k-means++') kmeanModel.fit(dff) wcss.append(kmeanModel.inertia_) silhouette_coefficients = [] for k in range(2, 11): kmeans = KMeans(n_clusters=k, init='random', n_init=10, max_iter=300, random_state=42) kmeans.fit(dff) score = silhouette_score(scaled_features, kmeans.labels_) silhouette_coefficients.append(score) from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=7, random_state=0) df_customer['cluster'] = kmeans.fit_predict(df_customer[['food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent']])
code
73063106/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1)
code
73063106/cell_33
[ "image_output_1.png" ]
from kneed import KneeLocator from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) scaler = StandardScaler() scaled_features = scaler.fit_transform(df_customer) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) dff = scaled_features from sklearn.cluster import KMeans wcss = [] for k in range(2, 11): kmeanModel = KMeans(n_clusters=k, init='k-means++') kmeanModel.fit(dff) wcss.append(kmeanModel.inertia_) silhouette_coefficients = [] for k in range(2, 11): kmeans = KMeans(n_clusters=k, init='random', n_init=10, max_iter=300, random_state=42) kmeans.fit(dff) score = silhouette_score(scaled_features, kmeans.labels_) silhouette_coefficients.append(score) from kneed import KneeLocator kl = KneeLocator(x=range(2, 11), y=silhouette_coefficients, curve='convex', direction='decreasing') kl.elbow kl = KneeLocator(x=range(2, 11), y=wcss, curve='convex', direction='decreasing') kl.elbow
code
73063106/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum()
code
73063106/cell_29
[ "text_html_output_1.png" ]
pip install kneed
code
73063106/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) df_customer
code
73063106/cell_26
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) scaler = StandardScaler() scaled_features = scaler.fit_transform(df_customer) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) dff = scaled_features from sklearn.cluster import KMeans wcss = [] for k in range(2, 11): kmeanModel = KMeans(n_clusters=k, init='k-means++') kmeanModel.fit(dff) wcss.append(kmeanModel.inertia_) plt.figure(figsize=(16, 8)) plt.plot(range(2, 11), wcss, 'bx-') plt.xlabel('k') plt.ylabel('Distortion') plt.title('The Elbow Method showing the optimal k') plt.show()
code
73063106/cell_41
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) temp_df = df_customer[df_customer.cluster == 1] dff = df_customer.drop('cluster', axis=1) df_customer
code
73063106/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean()
code
73063106/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73063106/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.info()
code
73063106/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates())
code
73063106/cell_28
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) scaler = StandardScaler() scaled_features = scaler.fit_transform(df_customer) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) dff = scaled_features from sklearn.cluster import KMeans wcss = [] for k in range(2, 11): kmeanModel = KMeans(n_clusters=k, init='k-means++') kmeanModel.fit(dff) wcss.append(kmeanModel.inertia_) silhouette_coefficients = [] for k in range(2, 11): kmeans = KMeans(n_clusters=k, init='random', n_init=10, max_iter=300, random_state=42) kmeans.fit(dff) score = silhouette_score(scaled_features, kmeans.labels_) silhouette_coefficients.append(score) plt.style.use('fivethirtyeight') plt.plot(range(2, 11), silhouette_coefficients) plt.xticks(range(2, 11)) plt.xlabel('Number of Clusters') plt.ylabel('Silhouette Coefficient') plt.show()
code
73063106/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] df_weekends = df[(df['weekday'] == 7) | (df['weekday'] == 6)] df_weekdays = df[(df['weekday'] != 7) & (df['weekday'] != 6)] df_weekdays.shape[0]
code
73063106/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df)
code
73063106/cell_31
[ "application_vnd.jupyter.stderr_output_1.png" ]
from kneed import KneeLocator from sklearn.cluster import KMeans from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) scaler = StandardScaler() scaled_features = scaler.fit_transform(df_customer) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) dff = scaled_features from sklearn.cluster import KMeans wcss = [] for k in range(2, 11): kmeanModel = KMeans(n_clusters=k, init='k-means++') kmeanModel.fit(dff) wcss.append(kmeanModel.inertia_) silhouette_coefficients = [] for k in range(2, 11): kmeans = KMeans(n_clusters=k, init='random', n_init=10, max_iter=300, random_state=42) kmeans.fit(dff) score = silhouette_score(scaled_features, kmeans.labels_) silhouette_coefficients.append(score) from kneed import KneeLocator kl = KneeLocator(x=range(2, 11), y=silhouette_coefficients, curve='convex', direction='decreasing') kl.elbow
code
73063106/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] df_weekends = df[(df['weekday'] == 7) | (df['weekday'] == 6)] df_weekdays = df[(df['weekday'] != 7) & (df['weekday'] != 6)] df_weekends.shape[0]
code
73063106/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.describe()
code
73063106/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()]
code
73063106/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns
code
73063106/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/ulabox-orders-with-categories-partials-2017/ulabox_orders_with_categories_partials_2017.csv') df.shape df.columns df.isnull().sum() df.columns = ['customer', 'order', 'total_items', 'discount_percent', 'weekday', 'hour', 'food_percent', 'fresh_percent', 'drinks_percent', 'home_percent', 'beauty_percent', 'health_percent', 'baby_percent', 'pets_percent'] df.discount_percent.mean() df[df['total_items'] == df.total_items.max()] len(df) - len(df.drop_duplicates()) df_customer = df.drop_duplicates('customer', keep='first') df_customer.drop(['customer', 'order'], inplace=True, axis=1) import matplotlib.pyplot as plt df_corr = df_customer.corr() fig, ax = plt.subplots(figsize=(16, 16)) ax = sns.heatmap(df_corr, annot=True) u_labels = df_customer['cluster'].unique() print(u_labels)
code
34133142/cell_20
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import OneHotEncoder import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/airbnbdm/train.csv') test = pd.read_csv('/kaggle/input/airbnbdm/test.csv') submission = pd.DataFrame() submission['Id'] = test['id'].copy() df.columns verification_methods = ['phone', 'email', 'reviews', 'government_id', 'jumio', 'offline_government_id', 'kba', 'facebook', 'selfie', 'work_email', 'identity_manual', 'google', 'manual_offline', 'manual_online', 'sent_id', 'None', 'weibo', 'zhima_selfie', 'sesame_offline', 'sesame'] def clean(df, test): cols_to_drop = ['id', 'name', 'summary', 'space', 'description', 'experiences_offered', 'neighborhood_overview', 'notes', 'access', 'interaction', 'house_rules', 'host_id', 'host_name', 'host_location', 'host_about', 'host_neighbourhood', 'neighbourhood_group_cleansed', 'city', 'state', 'zipcode', 'country_code', 'square_feet', 'host_listings_count', 'reviews_per_month', 'first_review', 'last_review', 'beds', 'host_verifications'] df['extra_people'] = df['extra_people'].str.strip('$').apply(lambda x: float(x)) test['extra_people'] = test['extra_people'].str.strip('$').apply(lambda x: float(x)) df['host_acceptance_rate'] = df['host_acceptance_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) test['host_acceptance_rate'] = test['host_acceptance_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) df['host_response_rate'] = df['host_response_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) test['host_response_rate'] = test['host_response_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) df['host_in_neighbourhood'] = (df['host_neighbourhood'] == df['neighbourhood_group_cleansed']).fillna(False) test['host_in_neighbourhood'] = (test['host_neighbourhood'] == test['neighbourhood_group_cleansed']).fillna(False) df['host_verifications'].str.strip('[').str.strip(']')[df['host_verifications'].str.strip('[').str.strip(']') == ''] = '[None]' test['host_verifications'].str.strip('[').str.strip(']')[test['host_verifications'].str.strip('[').str.strip(']') == ''] = '[None]' for method in verification_methods: df['is_' + method] = df['host_verifications'].str.contains(method) test['is_' + method] = test['host_verifications'].str.contains(method) def helper(x): if pd.isnull(x): return 0 if x == 'within an hour': return 4 if x == 'within a few hours': return 3 if x == 'within a day': return 2 if x == 'a few days or more': return 1 return 0 df['host_response_time'] = df['host_response_time'].apply(lambda x: helper(x)) test['host_response_time'] = test['host_response_time'].apply(lambda x: helper(x)) stored_ix = df[~df['description'].isnull()].index stored_ix2 = test[~test['description'].isnull()].index num_non_null_df = df[~df['description'].isnull()].shape[0] stemmer = SnowballStemmer('english') def preprocess_text(corpus): """Takes a corpus in list format and applies basic preprocessing steps of word tokenization, removing of english stop words, lower case and lemmatization.""" processed_corpus = [] english_words = set(nltk.corpus.words.words()) english_stopwords = set(stopwords.words('english')) wordnet_lemmatizer = WordNetLemmatizer() tokenizer = RegexpTokenizer('[\\w|!]+') for row in corpus: word_tokens = tokenizer.tokenize(row) word_tokens_lower = [t.lower() for t in word_tokens] word_tokens_lower_english = [t for t in word_tokens_lower if t in english_words or not t.isalpha()] word_tokens_no_stops = [t for t in word_tokens_lower_english if not t in english_stopwords] word_tokens_no_stops_lemmatized = [wordnet_lemmatizer.lemmatize(t) for t in word_tokens_no_stops] processed_corpus.append(word_tokens_no_stops_lemmatized) return processed_corpus stemmed_stopped = preprocess_text(df['description'].dropna().append(test['description'].dropna(), ignore_index=True)) dictionary = gensim.corpora.Dictionary(stemmed_stopped) dictionary.filter_extremes(no_below=15, keep_n=100000) bow_corpus = [dictionary.doc2bow(doc) for doc in stemmed_stopped] lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=3, id2word=dictionary, passes=2, workers=4) results = {0: [], 1: [], 2: []} for doc in bow_corpus[0:num_non_null_df]: done = [] for i in lda_model[doc]: results[i[0]].append(i[1]) done.append(i[0]) if 0 not in done: results[0].append(0) if 1 not in done: results[1].append(0) if 2 not in done: results[2].append(0) labels = pd.DataFrame(results) labels.index = stored_ix labels.columns = ['Cluster0', 'Cluster1', 'Cluster2'] df = df.join(labels) results = {0: [], 1: [], 2: []} for doc in bow_corpus[num_non_null_df:]: done = [] for i in lda_model[doc]: results[i[0]].append(i[1]) done.append(i[0]) if 0 not in done: results[0].append(0) if 1 not in done: results[1].append(0) if 2 not in done: results[2].append(0) labels = pd.DataFrame(results) labels.index = stored_ix2 labels.columns = ['Cluster0', 'Cluster1', 'Cluster2'] test = test.join(labels) df['listing_time'] = df['number_of_reviews'] / df['reviews_per_month'] df['listing_time'] = df['listing_time'].fillna(0) df = df.drop(cols_to_drop, axis=1) df['is_train'] = df['transit'].str.lower().str.contains('train') df['is_bus'] = df['transit'].str.lower().str.contains('bus') df['is_subway'] = df['transit'].str.lower().str.contains('subway') df['is_cab'] = df['transit'].str.lower().str.contains('cab') | df['transit'].str.lower().str.contains('car') | df['transit'].str.lower().str.contains('uber') | df['transit'].str.lower().str.contains('taxi') df['is_metro'] = df['transit'].str.lower().str.contains('metro') df['is_walk'] = df['transit'].str.lower().str.contains('walk') df['is_wifi'] = df['amenities'].str.lower().str.contains('wifi') | df['amenities'].str.lower().str.contains('internet') df['is_kitchen'] = df['amenities'].str.lower().str.contains('kitchen') df['is_heating'] = df['amenities'].str.lower().str.contains('heat') df['is_ac'] = df['amenities'].str.lower().str.contains('air conditioning') df['is_washer'] = df['amenities'].str.lower().str.contains('washer') | df['amenities'].str.lower().str.contains('dryer') | df['amenities'].str.lower().str.contains('dishwasher') df['is_tv'] = df['amenities'].str.lower().str.contains('tv') df['is_gym'] = df['amenities'].str.lower().str.contains('gym') df['is_pets'] = df['amenities'].str.lower().str.contains('pet') df['is_balcony'] = df['amenities'].str.lower().str.contains('balcony') df['is_linen'] = df['amenities'].str.lower().str.contains('linen') df['is_breakfast'] = df['amenities'].str.lower().str.contains('breakfast') df['is_coffee'] = df['amenities'].str.lower().str.contains('coffee') df['is_cooking'] = df['amenities'].str.lower().str.contains('cooking') df['is_pool'] = df['amenities'].str.lower().str.contains('pool') df['amenities'] = df['amenities'].str.strip('{').str.strip('}').str.split(',').apply(lambda x: len(x) if x[0] != '' else 0) df['host_is_superhost'] = df['host_is_superhost'].replace({'f': False, 't': True}) df = df.drop(['transit'], axis=1) for col in df[pd.Series(df.columns)[pd.Series(df.columns).str.contains('review')].to_list()].columns: to_fill = pd.Series(df[col].sample(df[col].isnull().sum())) to_fill.index = df[df[col].isnull()].index df[col] = df[col].fillna(to_fill) df[df.columns[df.columns.str.contains('^is')]] = df[df.columns[df.columns.str.contains('^is')]].fillna('False') df = df.fillna(0) df = df.reset_index(drop=True) test['listing_time'] = test['number_of_reviews'] / test['reviews_per_month'] test['listing_time'] = test['listing_time'].fillna(0) test = test.drop(cols_to_drop, axis=1) test['is_train'] = test['transit'].str.lower().str.contains('train') test['is_bus'] = test['transit'].str.lower().str.contains('bus') test['is_subway'] = test['transit'].str.lower().str.contains('subway') test['is_cab'] = test['transit'].str.lower().str.contains('cab') | test['transit'].str.lower().str.contains('car') | test['transit'].str.lower().str.contains('uber') | test['transit'].str.lower().str.contains('taxi') test['is_metro'] = test['transit'].str.lower().str.contains('metro') test['is_walk'] = test['transit'].str.lower().str.contains('walk') test['is_wifi'] = test['amenities'].str.lower().str.contains('wifi') | test['amenities'].str.lower().str.contains('internet') test['is_kitchen'] = test['amenities'].str.lower().str.contains('kitchen') test['is_heating'] = test['amenities'].str.lower().str.contains('heat') test['is_ac'] = test['amenities'].str.lower().str.contains('air conditioning') test['is_washer'] = test['amenities'].str.lower().str.contains('washer') | test['amenities'].str.lower().str.contains('dryer') | test['amenities'].str.lower().str.contains('dishwasher') test['is_tv'] = test['amenities'].str.lower().str.contains('tv') test['is_gym'] = test['amenities'].str.lower().str.contains('gym') test['is_pets'] = test['amenities'].str.lower().str.contains('pet') test['is_balcony'] = test['amenities'].str.lower().str.contains('balcony') test['is_linen'] = test['amenities'].str.lower().str.contains('linen') test['is_breakfast'] = test['amenities'].str.lower().str.contains('breakfast') test['is_coffee'] = test['amenities'].str.lower().str.contains('coffee') test['is_cooking'] = test['amenities'].str.lower().str.contains('cooking') test['is_pool'] = test['amenities'].str.lower().str.contains('pool') test['amenities'] = test['amenities'].str.strip('{').str.strip('}').str.split(',').apply(lambda x: len(x) if x[0] != '' else 0) test['host_is_superhost'] = test['host_is_superhost'].replace({'f': False, 't': True}) test = test.drop(['transit'], axis=1) for col in test[pd.Series(test.columns)[pd.Series(test.columns).str.contains('review')].to_list()].columns: to_fill = pd.Series(test[col].sample(test[col].isnull().sum())) to_fill.index = test[test[col].isnull()].index test[col] = test[col].fillna(to_fill) test[test.columns[test.columns.str.contains('^is')]] = test[test.columns[test.columns.str.contains('^is')]].fillna('False') test = test.fillna(0) df['bedrooms'] = df['bedrooms'].apply(lambda x: str(x)) df['bathrooms'] = df['bathrooms'].apply(lambda x: str(x)) test['bedrooms'] = test['bedrooms'].apply(lambda x: str(x)) test['bathrooms'] = test['bathrooms'].apply(lambda x: str(x)) test = test.reset_index(drop=True) return (df.replace({False: 0, True: 1, 'False': 0, 'True': 1, 'f': 0, 't': 1}), test.replace({False: 0, True: 1, 'False': 0, 'True': 1, 'f': 0, 't': 1})) df.columns df.shape scores = [] for i in range(20): time_features = ['host_since'] time_converter = Pipeline(steps=[('ft', FunctionTransformer(lambda x: (2020 - pd.to_datetime(x['host_since']).apply(lambda u: u.year)).values.reshape(-1, 1)))]) ohe_features = ['neighbourhood_cleansed', 'property_type', 'room_type', 'bed_type', 'cancellation_policy', 'bathrooms', 'bedrooms', 'market', 'country'] ohe_converter = Pipeline(steps=[('ohe', OneHotEncoder(handle_unknown='ignore'))]) preproc = ColumnTransformer(transformers=[('time', time_converter, time_features)], remainder='passthrough') catt_features = [df.drop(['price'], axis=1).columns.get_loc(col) for col in ohe_features] pl = Pipeline(steps=[('preprocessor', preproc), ('regressor', CatBoostRegressor(cat_features=catt_features, silent=True))]) break submission['Predicted'] = pd.Series(pl.predict(test)) preprocessed = pl['preprocessor'].fit_transform(df.drop('price', axis=1)) preprocessed = preprocessed preprocessed
code
34133142/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import QuantileTransformer from sklearn.decomposition import PCA from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV from sklearn.metrics import mean_squared_error as rmse from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from lightgbm import LGBMRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor import gensim from gensim import corpora, models from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * import numpy as np np.random.seed(2018) import nltk from nltk.corpus import wordnet from nltk.corpus import stopwords from nltk.tokenize import RegexpTokenizer import ast import os for dirname, _, filenames in os.walk('/kaggle/input/airbnbdm'): for filename in filenames: print(os.path.join(dirname, filename))
code
34133142/cell_7
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import RegexpTokenizer import gensim import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/airbnbdm/train.csv') test = pd.read_csv('/kaggle/input/airbnbdm/test.csv') submission = pd.DataFrame() submission['Id'] = test['id'].copy() df.columns verification_methods = ['phone', 'email', 'reviews', 'government_id', 'jumio', 'offline_government_id', 'kba', 'facebook', 'selfie', 'work_email', 'identity_manual', 'google', 'manual_offline', 'manual_online', 'sent_id', 'None', 'weibo', 'zhima_selfie', 'sesame_offline', 'sesame'] def clean(df, test): cols_to_drop = ['id', 'name', 'summary', 'space', 'description', 'experiences_offered', 'neighborhood_overview', 'notes', 'access', 'interaction', 'house_rules', 'host_id', 'host_name', 'host_location', 'host_about', 'host_neighbourhood', 'neighbourhood_group_cleansed', 'city', 'state', 'zipcode', 'country_code', 'square_feet', 'host_listings_count', 'reviews_per_month', 'first_review', 'last_review', 'beds', 'host_verifications'] df['extra_people'] = df['extra_people'].str.strip('$').apply(lambda x: float(x)) test['extra_people'] = test['extra_people'].str.strip('$').apply(lambda x: float(x)) df['host_acceptance_rate'] = df['host_acceptance_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) test['host_acceptance_rate'] = test['host_acceptance_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) df['host_response_rate'] = df['host_response_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) test['host_response_rate'] = test['host_response_rate'].fillna('0%').str.strip('%').apply(lambda x: float(x)) df['host_in_neighbourhood'] = (df['host_neighbourhood'] == df['neighbourhood_group_cleansed']).fillna(False) test['host_in_neighbourhood'] = (test['host_neighbourhood'] == test['neighbourhood_group_cleansed']).fillna(False) df['host_verifications'].str.strip('[').str.strip(']')[df['host_verifications'].str.strip('[').str.strip(']') == ''] = '[None]' test['host_verifications'].str.strip('[').str.strip(']')[test['host_verifications'].str.strip('[').str.strip(']') == ''] = '[None]' for method in verification_methods: df['is_' + method] = df['host_verifications'].str.contains(method) test['is_' + method] = test['host_verifications'].str.contains(method) def helper(x): if pd.isnull(x): return 0 if x == 'within an hour': return 4 if x == 'within a few hours': return 3 if x == 'within a day': return 2 if x == 'a few days or more': return 1 return 0 df['host_response_time'] = df['host_response_time'].apply(lambda x: helper(x)) test['host_response_time'] = test['host_response_time'].apply(lambda x: helper(x)) stored_ix = df[~df['description'].isnull()].index stored_ix2 = test[~test['description'].isnull()].index num_non_null_df = df[~df['description'].isnull()].shape[0] stemmer = SnowballStemmer('english') def preprocess_text(corpus): """Takes a corpus in list format and applies basic preprocessing steps of word tokenization, removing of english stop words, lower case and lemmatization.""" processed_corpus = [] english_words = set(nltk.corpus.words.words()) english_stopwords = set(stopwords.words('english')) wordnet_lemmatizer = WordNetLemmatizer() tokenizer = RegexpTokenizer('[\\w|!]+') for row in corpus: word_tokens = tokenizer.tokenize(row) word_tokens_lower = [t.lower() for t in word_tokens] word_tokens_lower_english = [t for t in word_tokens_lower if t in english_words or not t.isalpha()] word_tokens_no_stops = [t for t in word_tokens_lower_english if not t in english_stopwords] word_tokens_no_stops_lemmatized = [wordnet_lemmatizer.lemmatize(t) for t in word_tokens_no_stops] processed_corpus.append(word_tokens_no_stops_lemmatized) return processed_corpus stemmed_stopped = preprocess_text(df['description'].dropna().append(test['description'].dropna(), ignore_index=True)) dictionary = gensim.corpora.Dictionary(stemmed_stopped) dictionary.filter_extremes(no_below=15, keep_n=100000) bow_corpus = [dictionary.doc2bow(doc) for doc in stemmed_stopped] lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=3, id2word=dictionary, passes=2, workers=4) results = {0: [], 1: [], 2: []} for doc in bow_corpus[0:num_non_null_df]: done = [] for i in lda_model[doc]: results[i[0]].append(i[1]) done.append(i[0]) if 0 not in done: results[0].append(0) if 1 not in done: results[1].append(0) if 2 not in done: results[2].append(0) labels = pd.DataFrame(results) labels.index = stored_ix labels.columns = ['Cluster0', 'Cluster1', 'Cluster2'] df = df.join(labels) results = {0: [], 1: [], 2: []} for doc in bow_corpus[num_non_null_df:]: done = [] for i in lda_model[doc]: results[i[0]].append(i[1]) done.append(i[0]) if 0 not in done: results[0].append(0) if 1 not in done: results[1].append(0) if 2 not in done: results[2].append(0) labels = pd.DataFrame(results) labels.index = stored_ix2 labels.columns = ['Cluster0', 'Cluster1', 'Cluster2'] test = test.join(labels) df['listing_time'] = df['number_of_reviews'] / df['reviews_per_month'] df['listing_time'] = df['listing_time'].fillna(0) df = df.drop(cols_to_drop, axis=1) df['is_train'] = df['transit'].str.lower().str.contains('train') df['is_bus'] = df['transit'].str.lower().str.contains('bus') df['is_subway'] = df['transit'].str.lower().str.contains('subway') df['is_cab'] = df['transit'].str.lower().str.contains('cab') | df['transit'].str.lower().str.contains('car') | df['transit'].str.lower().str.contains('uber') | df['transit'].str.lower().str.contains('taxi') df['is_metro'] = df['transit'].str.lower().str.contains('metro') df['is_walk'] = df['transit'].str.lower().str.contains('walk') df['is_wifi'] = df['amenities'].str.lower().str.contains('wifi') | df['amenities'].str.lower().str.contains('internet') df['is_kitchen'] = df['amenities'].str.lower().str.contains('kitchen') df['is_heating'] = df['amenities'].str.lower().str.contains('heat') df['is_ac'] = df['amenities'].str.lower().str.contains('air conditioning') df['is_washer'] = df['amenities'].str.lower().str.contains('washer') | df['amenities'].str.lower().str.contains('dryer') | df['amenities'].str.lower().str.contains('dishwasher') df['is_tv'] = df['amenities'].str.lower().str.contains('tv') df['is_gym'] = df['amenities'].str.lower().str.contains('gym') df['is_pets'] = df['amenities'].str.lower().str.contains('pet') df['is_balcony'] = df['amenities'].str.lower().str.contains('balcony') df['is_linen'] = df['amenities'].str.lower().str.contains('linen') df['is_breakfast'] = df['amenities'].str.lower().str.contains('breakfast') df['is_coffee'] = df['amenities'].str.lower().str.contains('coffee') df['is_cooking'] = df['amenities'].str.lower().str.contains('cooking') df['is_pool'] = df['amenities'].str.lower().str.contains('pool') df['amenities'] = df['amenities'].str.strip('{').str.strip('}').str.split(',').apply(lambda x: len(x) if x[0] != '' else 0) df['host_is_superhost'] = df['host_is_superhost'].replace({'f': False, 't': True}) df = df.drop(['transit'], axis=1) for col in df[pd.Series(df.columns)[pd.Series(df.columns).str.contains('review')].to_list()].columns: to_fill = pd.Series(df[col].sample(df[col].isnull().sum())) to_fill.index = df[df[col].isnull()].index df[col] = df[col].fillna(to_fill) df[df.columns[df.columns.str.contains('^is')]] = df[df.columns[df.columns.str.contains('^is')]].fillna('False') df = df.fillna(0) df = df.reset_index(drop=True) test['listing_time'] = test['number_of_reviews'] / test['reviews_per_month'] test['listing_time'] = test['listing_time'].fillna(0) test = test.drop(cols_to_drop, axis=1) test['is_train'] = test['transit'].str.lower().str.contains('train') test['is_bus'] = test['transit'].str.lower().str.contains('bus') test['is_subway'] = test['transit'].str.lower().str.contains('subway') test['is_cab'] = test['transit'].str.lower().str.contains('cab') | test['transit'].str.lower().str.contains('car') | test['transit'].str.lower().str.contains('uber') | test['transit'].str.lower().str.contains('taxi') test['is_metro'] = test['transit'].str.lower().str.contains('metro') test['is_walk'] = test['transit'].str.lower().str.contains('walk') test['is_wifi'] = test['amenities'].str.lower().str.contains('wifi') | test['amenities'].str.lower().str.contains('internet') test['is_kitchen'] = test['amenities'].str.lower().str.contains('kitchen') test['is_heating'] = test['amenities'].str.lower().str.contains('heat') test['is_ac'] = test['amenities'].str.lower().str.contains('air conditioning') test['is_washer'] = test['amenities'].str.lower().str.contains('washer') | test['amenities'].str.lower().str.contains('dryer') | test['amenities'].str.lower().str.contains('dishwasher') test['is_tv'] = test['amenities'].str.lower().str.contains('tv') test['is_gym'] = test['amenities'].str.lower().str.contains('gym') test['is_pets'] = test['amenities'].str.lower().str.contains('pet') test['is_balcony'] = test['amenities'].str.lower().str.contains('balcony') test['is_linen'] = test['amenities'].str.lower().str.contains('linen') test['is_breakfast'] = test['amenities'].str.lower().str.contains('breakfast') test['is_coffee'] = test['amenities'].str.lower().str.contains('coffee') test['is_cooking'] = test['amenities'].str.lower().str.contains('cooking') test['is_pool'] = test['amenities'].str.lower().str.contains('pool') test['amenities'] = test['amenities'].str.strip('{').str.strip('}').str.split(',').apply(lambda x: len(x) if x[0] != '' else 0) test['host_is_superhost'] = test['host_is_superhost'].replace({'f': False, 't': True}) test = test.drop(['transit'], axis=1) for col in test[pd.Series(test.columns)[pd.Series(test.columns).str.contains('review')].to_list()].columns: to_fill = pd.Series(test[col].sample(test[col].isnull().sum())) to_fill.index = test[test[col].isnull()].index test[col] = test[col].fillna(to_fill) test[test.columns[test.columns.str.contains('^is')]] = test[test.columns[test.columns.str.contains('^is')]].fillna('False') test = test.fillna(0) df['bedrooms'] = df['bedrooms'].apply(lambda x: str(x)) df['bathrooms'] = df['bathrooms'].apply(lambda x: str(x)) test['bedrooms'] = test['bedrooms'].apply(lambda x: str(x)) test['bathrooms'] = test['bathrooms'].apply(lambda x: str(x)) test = test.reset_index(drop=True) return (df.replace({False: 0, True: 1, 'False': 0, 'True': 1, 'f': 0, 't': 1}), test.replace({False: 0, True: 1, 'False': 0, 'True': 1, 'f': 0, 't': 1})) df, test = clean(df, test)
code
34133142/cell_3
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/airbnbdm/train.csv') test = pd.read_csv('/kaggle/input/airbnbdm/test.csv') submission = pd.DataFrame() submission['Id'] = test['id'].copy() df.columns
code
33111475/cell_13
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3) plt.tight_layout() Nigeria.groupby('Month_name')['No. of suspected cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of suspected deaths'].sum().nlargest(3)
code
33111475/cell_9
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3)
code
33111475/cell_6
[ "text_plain_output_1.png" ]
from datetime import date d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 print(delta)
code
33111475/cell_11
[ "text_html_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3) plt.subplot(1, 2, 1) Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3).plot(kind='bar', grid=True) plt.title('Confirmed cases (3)') plt.xlabel('Months') plt.ylabel('No. of probable cases') plt.subplot(1, 2, 2) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3).plot(kind='bar', grid=True, color='red') plt.title('Confirmed deaths (3)') plt.xlabel('Months') plt.ylabel('No. of probable deaths') plt.tight_layout() plt.show()
code
33111475/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33111475/cell_7
[ "image_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 print('The date of Nigeria data is from', Nigeria.Dates.min(), 'to', Nigeria.Dates.max(), ',a total number of', delta) print('The total number of confirmed cases in Nigeria is', Nigeria['No. of confirmed cases'].sum()) print('The total number of confirmed deaths in Nigeria is', Nigeria['No. of confirmed deaths'].sum()) print('The total number of suspected cases in Nigeria is', Nigeria['No. of suspected cases'].sum()) print('The total number of suspected deaths in Nigeria is', Nigeria['No. of suspected deaths'].sum()) print('The total number of probable cases in Nigeria is', Nigeria['No. of probable cases'].sum()) print('The total number of probable deaths in Nigeria is', Nigeria['No. of probable deaths'].sum())
code
33111475/cell_8
[ "image_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
code
33111475/cell_15
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3) plt.tight_layout() Nigeria.groupby('Month_name')['No. of suspected cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of suspected deaths'].sum().nlargest(3) plt.subplot(1, 2, 1) Nigeria.groupby('Month_name')['No. of probable cases'].sum().nlargest(3).plot(kind='bar', grid=True) plt.title('Probable cases (3)') plt.xlabel('Months') plt.ylabel('No. of probable cases') plt.subplot(1, 2, 2) Nigeria.groupby('Month_name')['No. of probable deaths'].sum().nlargest(3).plot(kind='bar', grid=True, color='red') plt.title('Probable deaths (3)') plt.xlabel('Months') plt.ylabel('No. of probable deaths') plt.tight_layout() plt.show()
code
33111475/cell_3
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') df.head()
code
33111475/cell_14
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3) plt.tight_layout() Nigeria.groupby('Month_name')['No. of suspected cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of suspected deaths'].sum().nlargest(3) plt.subplot(1, 2, 1) Nigeria.groupby('Month_name')['No. of suspected cases'].sum().nlargest(3).plot(kind='bar', grid=True) plt.title('Suspected cases (3)') plt.xlabel('Months') plt.ylabel('No. of suspected cases') plt.show()
code
33111475/cell_10
[ "text_html_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3)
code
33111475/cell_12
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] d1 = date(2014, 8, 29) d2 = date(2016, 3, 23) delta = d2 - d1 Nigeria.groupby('Month_name')['No. of confirmed cases', 'No. of confirmed deaths'].sum() Nigeria.groupby('Month_name')['No. of confirmed cases'].sum().nlargest(3) Nigeria.groupby('Month_name')['No. of confirmed deaths'].sum().nlargest(3) plt.tight_layout() Nigeria.groupby('Month_name')['No. of suspected cases'].sum().nlargest(3)
code
33111475/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv') from datetime import date import datetime as dt df['Dates'] = pd.to_datetime(df['Date']) df['Year'] = df.Dates.dt.year df['Month_name'] = df.Dates.dt.month_name() df['Day_name'] = df.Dates.dt.day_name() df['Month'] = df.Dates.dt.month df['Week'] = df.Dates.dt.week df['Day_of_year'] = df.Dates.dt.dayofyear Nigeria = df.loc[df.Country == 'Nigeria'] Nigeria.head()
code
49115994/cell_9
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno) def check_duplicates(df, df_name): print(f'{df_name}') print(f'{df.Date.duplicated().value_counts()}') print('') check_duplicates(df=Aquifer_Doganella, df_name='Aquifer_Doganella') check_duplicates(df=Aquifer_Auser, df_name='Aquifer_Auser') check_duplicates(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') check_duplicates(df=Lake_Bilancino, df_name='Lake_Bilancino') check_duplicates(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') check_duplicates(df=Aquifer_Luco, df_name='Aquifer_Luco') check_duplicates(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') check_duplicates(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') check_duplicates(df=River_Arno, df_name='River_Arno')
code
49115994/cell_7
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
import pandas as pd Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno) def date_range_of_data(df, df_name): print(f'The date range for ## {df_name} ## is from') print(f"{df['Date'].min()} to {df['Date'].max()}") print(f"which is a total of {(df['Date'].max() - df['Date'].min()).days} days") print('') date_range_of_data(df=Aquifer_Doganella, df_name='Aquifer_Doganella') date_range_of_data(df=Aquifer_Auser, df_name='Aquifer_Auser') date_range_of_data(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') date_range_of_data(df=Lake_Bilancino, df_name='Lake_Bilancino') date_range_of_data(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') date_range_of_data(df=Aquifer_Luco, df_name='Aquifer_Luco') date_range_of_data(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') date_range_of_data(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') date_range_of_data(df=River_Arno, df_name='River_Arno')
code
49115994/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno) def check_duplicates(df, df_name): pass check_duplicates(df=Aquifer_Doganella, df_name='Aquifer_Doganella') check_duplicates(df=Aquifer_Auser, df_name='Aquifer_Auser') check_duplicates(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') check_duplicates(df=Lake_Bilancino, df_name='Lake_Bilancino') check_duplicates(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') check_duplicates(df=Aquifer_Luco, df_name='Aquifer_Luco') check_duplicates(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') check_duplicates(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') check_duplicates(df=River_Arno, df_name='River_Arno') def find_total_missing_days(df, df_name): daily_date = pd.date_range(start=df.Date.min(), end=df.Date.max(), freq='D') uniq_days = df.Date.nunique() find_total_missing_days(df=Aquifer_Doganella, df_name='Aquifer_Doganella') find_total_missing_days(df=Aquifer_Auser, df_name='Aquifer_Auser') find_total_missing_days(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') find_total_missing_days(df=Lake_Bilancino, df_name='Lake_Bilancino') find_total_missing_days(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') find_total_missing_days(df=Aquifer_Luco, df_name='Aquifer_Luco') find_total_missing_days(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') find_total_missing_days(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') find_total_missing_days(df=River_Arno, df_name='River_Arno') def total_missing_days_pattern(df, df_name): daily_date = pd.date_range(start=df.Date.min(), end=df.Date.max(), freq='D') daily_data = pd.DataFrame({'Date': daily_date}) temp = df[['Date']].copy() temp['missing'] = 0 final = daily_data.merge(temp, on=['Date'], how='left') final.fillna(1, inplace=True) total_missing_days_pattern(df=Aquifer_Doganella, df_name='Aquifer_Doganella') total_missing_days_pattern(df=Aquifer_Auser, df_name='Aquifer_Auser') total_missing_days_pattern(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') total_missing_days_pattern(df=Lake_Bilancino, df_name='Lake_Bilancino') total_missing_days_pattern(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') total_missing_days_pattern(df=Aquifer_Luco, df_name='Aquifer_Luco') total_missing_days_pattern(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') total_missing_days_pattern(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') total_missing_days_pattern(df=River_Arno, df_name='River_Arno') def Null_analysis(df, title): temp = df.isnull().sum() plt.figure(figsize=(15, 5)) g = sns.barplot(temp.index, temp.values) plt.xticks(rotation=90) plt.ylim(0, temp.values.max() + 1000) plt.title(title) for p in g.patches: g.annotate('{:.0f}\n{:.2f}%'.format(p.get_height(), p.get_height() / df.shape[0]), (p.get_x() + 0.4, p.get_height() + 10), ha='center', va='bottom', color='black') plt.show() Null_analysis(df=Aquifer_Doganella, title='Aquifer_Doganella') Null_analysis(df=Aquifer_Auser, title='Aquifer_Auser') Null_analysis(df=Water_Spring_Amiata, title='Water_Spring_Amiata') Null_analysis(df=Lake_Bilancino, title='Lake_Bilancino') Null_analysis(df=Water_Spring_Madonna_di_Canneto, title='Water_Spring_Madonna_di_Canneto') Null_analysis(df=Aquifer_Luco, title='Aquifer_Luco') Null_analysis(df=Aquifer_Petrignano, title='Aquifer_Petrignano') Null_analysis(df=Water_Spring_Lupa, title='Water_Spring_Lupa') Null_analysis(df=River_Arno, title='River_Arno')
code
49115994/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno) def check_duplicates(df, df_name): pass check_duplicates(df=Aquifer_Doganella, df_name='Aquifer_Doganella') check_duplicates(df=Aquifer_Auser, df_name='Aquifer_Auser') check_duplicates(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') check_duplicates(df=Lake_Bilancino, df_name='Lake_Bilancino') check_duplicates(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') check_duplicates(df=Aquifer_Luco, df_name='Aquifer_Luco') check_duplicates(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') check_duplicates(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') check_duplicates(df=River_Arno, df_name='River_Arno') def find_total_missing_days(df, df_name): daily_date = pd.date_range(start=df.Date.min(), end=df.Date.max(), freq='D') uniq_days = df.Date.nunique() find_total_missing_days(df=Aquifer_Doganella, df_name='Aquifer_Doganella') find_total_missing_days(df=Aquifer_Auser, df_name='Aquifer_Auser') find_total_missing_days(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') find_total_missing_days(df=Lake_Bilancino, df_name='Lake_Bilancino') find_total_missing_days(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') find_total_missing_days(df=Aquifer_Luco, df_name='Aquifer_Luco') find_total_missing_days(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') find_total_missing_days(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') find_total_missing_days(df=River_Arno, df_name='River_Arno') def total_missing_days_pattern(df, df_name): daily_date = pd.date_range(start=df.Date.min(), end=df.Date.max(), freq='D') daily_data = pd.DataFrame({'Date': daily_date}) temp = df[['Date']].copy() temp['missing'] = 0 final = daily_data.merge(temp, on=['Date'], how='left') final.fillna(1, inplace=True) plt.figure(figsize=(20, 5)) sns.scatterplot(final.Date, final.missing, hue=final.missing) plt.title(df_name) total_missing_days_pattern(df=Aquifer_Doganella, df_name='Aquifer_Doganella') total_missing_days_pattern(df=Aquifer_Auser, df_name='Aquifer_Auser') total_missing_days_pattern(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') total_missing_days_pattern(df=Lake_Bilancino, df_name='Lake_Bilancino') total_missing_days_pattern(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') total_missing_days_pattern(df=Aquifer_Luco, df_name='Aquifer_Luco') total_missing_days_pattern(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') total_missing_days_pattern(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') total_missing_days_pattern(df=River_Arno, df_name='River_Arno')
code
49115994/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno) def check_duplicates(df, df_name): pass check_duplicates(df=Aquifer_Doganella, df_name='Aquifer_Doganella') check_duplicates(df=Aquifer_Auser, df_name='Aquifer_Auser') check_duplicates(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') check_duplicates(df=Lake_Bilancino, df_name='Lake_Bilancino') check_duplicates(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') check_duplicates(df=Aquifer_Luco, df_name='Aquifer_Luco') check_duplicates(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') check_duplicates(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') check_duplicates(df=River_Arno, df_name='River_Arno') def find_total_missing_days(df, df_name): daily_date = pd.date_range(start=df.Date.min(), end=df.Date.max(), freq='D') uniq_days = df.Date.nunique() print(f'#### {df_name}') print(f'Total unique days we have data: {uniq_days}') print(f'Total days missing: {len(daily_date) - uniq_days}') print('') find_total_missing_days(df=Aquifer_Doganella, df_name='Aquifer_Doganella') find_total_missing_days(df=Aquifer_Auser, df_name='Aquifer_Auser') find_total_missing_days(df=Water_Spring_Amiata, df_name='Water_Spring_Amiata') find_total_missing_days(df=Lake_Bilancino, df_name='Lake_Bilancino') find_total_missing_days(df=Water_Spring_Madonna_di_Canneto, df_name='Water_Spring_Madonna_di_Canneto') find_total_missing_days(df=Aquifer_Luco, df_name='Aquifer_Luco') find_total_missing_days(df=Aquifer_Petrignano, df_name='Aquifer_Petrignano') find_total_missing_days(df=Water_Spring_Lupa, df_name='Water_Spring_Lupa') find_total_missing_days(df=River_Arno, df_name='River_Arno')
code
49115994/cell_5
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
import pandas as pd Aquifer_Doganella = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Doganella.csv') Aquifer_Auser = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Auser.csv') Water_Spring_Amiata = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Amiata.csv') Lake_Bilancino = pd.read_csv('/kaggle/input/acea-water-prediction/Lake_Bilancino.csv') Water_Spring_Madonna_di_Canneto = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Madonna_di_Canneto.csv') Aquifer_Luco = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Luco.csv') Aquifer_Petrignano = pd.read_csv('/kaggle/input/acea-water-prediction/Aquifer_Petrignano.csv') Water_Spring_Lupa = pd.read_csv('/kaggle/input/acea-water-prediction/Water_Spring_Lupa.csv') River_Arno = pd.read_csv('/kaggle/input/acea-water-prediction/River_Arno.csv') Aquifer_Doganella['Date'] = pd.to_datetime(Aquifer_Doganella['Date']) Aquifer_Auser['Date'] = pd.to_datetime(Aquifer_Auser['Date']) Water_Spring_Amiata['Date'] = pd.to_datetime(Water_Spring_Amiata['Date']) Lake_Bilancino['Date'] = pd.to_datetime(Lake_Bilancino['Date']) Water_Spring_Madonna_di_Canneto['Date'] = pd.to_datetime(Water_Spring_Madonna_di_Canneto['Date']) Aquifer_Luco['Date'] = pd.to_datetime(Aquifer_Luco['Date']) Aquifer_Petrignano['Date'] = pd.to_datetime(Aquifer_Petrignano['Date']) Water_Spring_Lupa['Date'] = pd.to_datetime(Water_Spring_Lupa['Date']) River_Arno['Date'] = pd.to_datetime(River_Arno['Date']) def get_datefeatures(df): df['month'] = df.Date.dt.month df['day'] = df.Date.dt.day df['week'] = df.Date.dt.week df['year'] = df.Date.dt.year return df Aquifer_Doganella = get_datefeatures(df=Aquifer_Doganella) Aquifer_Auser = get_datefeatures(df=Aquifer_Auser) Water_Spring_Amiata = get_datefeatures(df=Water_Spring_Amiata) Lake_Bilancino = get_datefeatures(df=Lake_Bilancino) Water_Spring_Madonna_di_Canneto = get_datefeatures(df=Water_Spring_Madonna_di_Canneto) Aquifer_Luco = get_datefeatures(df=Aquifer_Luco) Aquifer_Petrignano = get_datefeatures(df=Aquifer_Petrignano) Water_Spring_Lupa = get_datefeatures(df=Water_Spring_Lupa) River_Arno = Water_Spring_Lupa = get_datefeatures(df=River_Arno)
code
89130056/cell_13
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd import re import zipfile import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') file_path = '../input/painter-by-numbers/' archive = zipfile.ZipFile(file_path + 'replacements_for_corrupted_files.zip', 'r') corrupted_ids = set() for item in archive.namelist(): ID = re.sub('[^0-9]', '', item) if ID != '': corrupted_ids.add(ID) drop_idx = [] for index, row in df.iterrows(): id_check = re.sub('[^0-9]', '', row['new_filename']) if id_check in corrupted_ids: drop_idx.append(index) df = df.drop(drop_idx) painter_dict = {'Kandinsky': '', 'Dali': '', 'Picasso': '', 'Delacroix': '', 'Rembrandt': '', 'Gogh': '', 'Kuniyoshi': '', 'Dore': '', 'Steinlen': '', 'Saryan': '', 'Goya': '', 'Lautrec': '', 'Modigliani': '', 'Beksinski': '', 'Pissarro': '', 'Kirchner': '', 'Renoir': '', 'Piranesi': '', 'Degas': '', 'Chagall': ''} paintings_dict = painter_dict.copy() for artist in painter_dict: for painter in df['artist']: if artist in painter: painter_dict[artist] = painter paintings = df[df['artist'] == painter].shape[0] paintings_dict[artist] = paintings break sample_size = min(paintings_dict.values()) min_a = list(paintings_dict.keys())[list(paintings_dict.values()).index(sample_size)] active_df = pd.DataFrame({}) for artist in painter_dict.values(): tr_df = df[df['artist'] == artist].sort_values(by=['in_train', 'size_bytes'], ascending=[False, True]) active_df = pd.concat([active_df, tr_df.iloc[:sample_size]]) artists = list(painter_dict.values()) LabEnc = preprocessing.LabelEncoder() LabEnc.fit(artists)
code