path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068850/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index sbc.columns sbc.shape[0] sbc.shape[1]
code
32068850/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index sbc.columns sbc.shape[0] sbc.shape[1] sbc.loc['Harvey Mudd College'] sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
code
32068850/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index sbc.columns sbc.shape[0] sbc.shape[1] sbc.head()
code
32068850/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index sbc.columns
code
32068850/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbm.tail(10)
code
32068850/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index
code
32068850/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') dataFrameList = [sbm, sbc, sbr] for df in dataFrameList: for col in df.columns: i = 0 while i < df.shape[0]: if isinstance(df[col].iloc[i], str) and df[col].iloc[i][0] == '$': df[col].iloc[i] = df[col].iloc[i].replace(',', '') df[col].iloc[i] = df[col].iloc[i].replace('$', '') df[col].iloc[i] = float(df[col].iloc[i]) i = i + 1
code
32068850/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbc.index sbc.columns sbc.shape[0] sbc.shape[1] sbc.loc['Harvey Mudd College'] sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']] sbc.iloc[100:-100]
code
32068850/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major') sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name') sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name') sbm.head()
code
1007484/cell_11
[ "text_html_output_1.png" ]
from scipy import stats, optimize import numpy as np import pandas as pd df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') matchups = [[str(x + 1), str(16 - x)] for x in range(8)] df = df[df.gender == 'mens'] pre = df[df.playin_flag == 1] data = [] for region in pre.team_region.unique(): for seed in range(2, 17): res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])] if res.shape[0] > 1: data.append([]) for _, row in res.iterrows(): data[-1].extend([row.team_rating, row.rd1_win]) post = df[df.playin_flag == 0] for region in post.team_region.unique(): for matchup in matchups: res = post[(post.team_region == region) & post.team_seed.isin(matchup)] if res.shape[0] > 1: data.append([]) for _, row in res.iterrows(): data[-1].extend([row.team_rating, row.rd2_win]) match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob']) match['delta'] = match.Team1_Rating - match.Team2_Rating match['win_extra'] = match.Team1_Prob - 0.5 def matcher(std, diff, prob): p = stats.norm.cdf(0, diff, std) return np.abs(p - prob) stds = [] for _, row in match.iterrows(): x0 = 1 res = optimize.minimize(matcher, x0=x0, args=(row.delta, row.Team1_Prob)) while res.status != 0 or res.x == x0: x0 *= 5 res = optimize.minimize(matcher, x0=x0, args=(row.delta, row.Team1_Prob)) if x0 > 1000: break stds.append(res.x) stds
code
1007484/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') matchups = [[str(x + 1), str(16 - x)] for x in range(8)] df = df[df.gender == 'mens'] pre = df[df.playin_flag == 1] data = [] for region in pre.team_region.unique(): for seed in range(2, 17): res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])] if res.shape[0] > 1: data.append([]) for _, row in res.iterrows(): data[-1].extend([row.team_rating, row.rd1_win]) post = df[df.playin_flag == 0] for region in post.team_region.unique(): for matchup in matchups: res = post[(post.team_region == region) & post.team_seed.isin(matchup)] if res.shape[0] > 1: data.append([]) for _, row in res.iterrows(): data[-1].extend([row.team_rating, row.rd2_win]) match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob']) match['delta'] = match.Team1_Rating - match.Team2_Rating match['win_extra'] = match.Team1_Prob - 0.5 sns.regplot('delta', 'win_extra', data=match, order=2)
code
1007484/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') df.head()
code
90152893/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90152893/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
df = pandas.read_csv('../input/water-potability/water_potability.csv') df.describe()
code
73078417/cell_21
[ "image_output_1.png" ]
import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') air_passengers = pd.read_csv('../input/air-passengers/AirPassengers.csv', header=0, parse_dates=[0], names=['Month', 'Passengers'], index_col=0) air_passengers.plot()
code
73078417/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) plt.plot(acf_result) plt.axhline(y=0, linestyle='--') plt.axhline(y=-1.96 / np.sqrt(len(result_curve)), linestyle='--') plt.axhline(y=1.96 / np.sqrt(len(result_curve)), linestyle='--')
code
73078417/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt pylab.rcParams['figure.figsize'] = (10, 6) import pandas as pd import numpy as np
code
73078417/cell_11
[ "text_plain_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) grid = np.linspace(0, 100, 1000) sin5 = np.sin(grid) result_curve = sin5 plt.plot(grid, result_curve)
code
73078417/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) grid = np.linspace(0, 100, 1000) sin5 = np.sin(grid) result_curve = sin5 grid = np.linspace(0, 100, 100) sin5 = np.sin(grid) result_curve = sin5 acf_result = stattools.acf(result_curve, nlags=100) acf_result = stattools.acf(f['Open'], nlags=1000) diff_f = f.Open - f.Open.shift() diff_f.dropna(inplace=True) acf_result = stattools.acf(diff_f) plt.subplot(121) plt.plot(acf_result) plt.axhline(y=0, linestyle='--') plt.axhline(y=-1.96 / np.sqrt(len(diff_f)), linestyle='--') plt.axhline(y=1.96 / np.sqrt(len(diff_f)), linestyle='--')
code
73078417/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt import numpy as np grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise plt.plot(grid, result_curve)
code
73078417/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) grid = np.linspace(0, 100, 1000) sin5 = np.sin(grid) result_curve = sin5 grid = np.linspace(0, 100, 100) sin5 = np.sin(grid) result_curve = sin5 acf_result = stattools.acf(result_curve, nlags=100) plt.plot(f['Open'])
code
73078417/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) grid = np.linspace(0, 100, 1000) sin5 = np.sin(grid) result_curve = sin5 grid = np.linspace(0, 100, 100) sin5 = np.sin(grid) result_curve = sin5 acf_result = stattools.acf(result_curve, nlags=100) acf_result = stattools.acf(f['Open'], nlags=1000) plt.subplot(121) plt.plot(acf_result) plt.axhline(y=0, linestyle='--') plt.axhline(y=-1.96 / np.sqrt(len(f['Open'])), linestyle='--') plt.axhline(y=1.96 / np.sqrt(len(f['Open'])), linestyle='--')
code
73078417/cell_17
[ "text_html_output_1.png" ]
import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') diff_f = f.Open - f.Open.shift() diff_f.plot() diff_f.dropna(inplace=True)
code
73078417/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd f = pd.read_csv('../input/time-series-forecasting-with-yahoo-stock-price/yahoo_stock.csv') f.head()
code
73078417/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pylab import plt from statsmodels.tsa import stattools import numpy as np grid = np.linspace(0, 720, 500) noise = np.random.rand(500) result_curve = noise acf_result = stattools.acf(result_curve) grid = np.linspace(0, 100, 1000) sin5 = np.sin(grid) result_curve = sin5 grid = np.linspace(0, 100, 100) sin5 = np.sin(grid) result_curve = sin5 acf_result = stattools.acf(result_curve, nlags=100) plt.plot(acf_result) plt.axhline(y=0, linestyle='--') plt.axhline(y=-1.96 / np.sqrt(len(result_curve)), linestyle='--') plt.axhline(y=1.96 / np.sqrt(len(result_curve)), linestyle='--')
code
18154941/cell_21
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() learn.recorder.plot(suggestion=True)
code
18154941/cell_13
[ "image_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes)
code
18154941/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' fname = os.path.join(data_path, train_label_file) df = pd.read_csv(fname) df.head(5)
code
18154941/cell_4
[ "text_plain_output_1.png" ]
import torch import torch print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled, torch.backends.cudnn.deterministic) torch.backends.cudnn.deterministic = True print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled, torch.backends.cudnn.deterministic)
code
18154941/cell_23
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() lr = 0.01 / 2 learn.fit_one_cycle(5, lr, callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='headonly_vgg16_bn')])
code
18154941/cell_20
[ "image_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model
code
18154941/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() lr = 0.01 / 2 learn.fit_one_cycle(5, lr, callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='headonly_vgg16_bn')]) learn.load('headonly_vgg16_bn') learn.unfreeze() learn.lr_find() learn.fit_one_cycle(5, slice(0.0001 / 2, lr / 10), callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='best_model_vgg16_bn_224')]) learn.recorder.plot_losses() learn.recorder.plot_metrics()
code
18154941/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src
code
18154941/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18154941/cell_18
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' fname = os.path.join(data_path, train_label_file) if not os.path.exists('/tmp/.cache/torch/checkpoints/'): os.makedirs('/tmp/.cache/torch/checkpoints/') print(os.listdir('/tmp/.cache/torch/checkpoints'))
code
18154941/cell_32
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) bs = 16 size = 448 data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes)
code
18154941/cell_28
[ "image_output_2.png", "image_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() lr = 0.01 / 2 learn.fit_one_cycle(5, lr, callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='headonly_vgg16_bn')]) learn.load('headonly_vgg16_bn') learn.unfreeze() learn.lr_find() learn.fit_one_cycle(5, slice(0.0001 / 2, lr / 10), callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='best_model_vgg16_bn_224')])
code
18154941/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() lr = 0.01 / 2 learn.fit_one_cycle(5, lr, callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='headonly_vgg16_bn')]) learn.recorder.plot_losses() learn.recorder.plot_metrics()
code
18154941/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) data.show_batch(rows=3, figsize=(7, 6))
code
18154941/cell_10
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' fname = os.path.join(data_path, train_label_file) df = pd.read_csv(fname) _ = df.hist()
code
18154941/cell_27
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra tfms = get_transforms(do_flip=True, flip_vert=True, max_warp=0) data_path = '../input/aptos2019-blindness-detection' train_label_file = 'train.csv' train_images_folder = 'train_images' test_label_file = 'test.csv' test_images_folder = 'test_images' image_suffix = '.png' split_pct = 0.1 bs = 64 size = 224 np.random.seed(42) src = ImageList.from_csv(data_path, train_label_file, folder=train_images_folder, suffix=image_suffix).split_by_rand_pct(split_pct).label_from_df() src data = src.transform(tfms, size=size).databunch(bs=bs).normalize(imagenet_stats) (data.c, data.classes) arch = models.vgg16_bn kappa = KappaScore() kappa.weights = 'quadratic' learn = cnn_learner(data, arch, metrics=[kappa], model_dir='../saved_models') learn.model learn.lr_find() lr = 0.01 / 2 learn.fit_one_cycle(5, lr, callbacks=[SaveModelCallback(learn, monitor='kappa_score', mode='max', name='headonly_vgg16_bn')]) learn.load('headonly_vgg16_bn') learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True)
code
128008350/cell_3
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) stimulus = pd.read_excel('/kaggle/input/young-adults-affective-data-ecg-and-gsr-signals/ECG_GSR_Emotions/Stimulus_Description.xlsx') stimulus['Target Emotion'] = stimulus['Target Emotion'].str.title() stimulus.info() stimulus.head()
code
128008350/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) stimulus=pd.read_excel("/kaggle/input/young-adults-affective-data-ecg-and-gsr-signals/ECG_GSR_Emotions/Stimulus_Description.xlsx") stimulus["Target Emotion"]=stimulus["Target Emotion"].str.title() stimulus.info() stimulus.head() ecg_data = pd.read_excel('/kaggle/input/young-adults-affective-data-ecg-and-gsr-signals/ECG_GSR_Emotions/Self-Annotation Labels/Self-annotation Single Modal_Use.xlsx') ecg_data['filename'] = 'ECGdata_s' + ecg_data['Session Id'].astype(str) + 'p' + ecg_data['Participant Id'].astype(str) + 'v' + ecg_data['Video Id'].astype(str) + '.dat' PATH_2 = '/kaggle/input/young-adults-affective-data-ecg-and-gsr-signals/ECG_GSR_Emotions/Raw Data/Single Modal/ECG/' ecg_data['ECG_list'] = ecg_data['filename'].apply(lambda x: list(pd.read_table(PATH_2 + x, sep=','))) ecg_data = ecg_data.merge(stimulus.iloc[:, 0:3], left_on=['Session Id', 'Video Id'], right_on=['Session ID', 'Video ID'], how='left') ecg_data.to_csv('ecg_data.csv', index=False) ecg_data = pd.read_csv('/kaggle/working/ecg_data.csv') ecg.data.info() ecg_data.head()
code
33098890/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) for col in ['casual', 'registered', 'count']: df['%s_log' % col] = np.log(df[col] + 1) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear def get_rmsle(y_pred, y_actual): diff = np.log(y_pred + 1) - np.log(y_actual + 1) mean_error = np.square(diff).mean() return np.sqrt(mean_error) def get_data(): data = df[df['_data'] == 'train'].copy() return data def custom_train_test_split(data, cutoff_day=15): train = data[data['day'] <= cutoff_day] test = data[data['day'] > cutoff_day] return (train, test) def prep_data(data, input_cols): X = data[input_cols] y_r = data['registered_log'] y_c = data['casual_log'] return (X, y_r, y_c) def predict_on_validation_set(model, input_cols): data = get_data() train, test = custom_train_test_split(data) X_train, y_train_r, y_train_c = prep_data(train, input_cols) X_test, y_test_r, y_test_c = prep_data(test, input_cols) model_r = model.fit(X_train, y_train_r) y_pred_r = np.exp(model_r.predict(X_test)) - 1 model_c = model.fit(X_train, y_train_c) y_pred_c = np.exp(model_c.predict(X_test)) - 1 y_pred_comb = np.round(y_pred_r + y_pred_c) y_pred_comb[y_pred_comb < 0] = 0 y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2 score = get_rmsle(y_pred_comb, y_test_comb) return (y_pred_comb, y_test_comb, score) df_test = df[df['_data'] == 'test'].copy() def predict_on_test_set(model, x_cols): df_train = df[df['_data'] == 'train'].copy() X_train = df_train[x_cols] y_train_cas = df_train['casual_log'] y_train_reg = df_train['registered_log'] X_test = df_test[x_cols] casual_model = model.fit(X_train, y_train_cas) y_pred_cas = casual_model.predict(X_test) y_pred_cas = np.exp(y_pred_cas) - 1 registered_model = model.fit(X_train, y_train_reg) y_pred_reg = registered_model.predict(X_test) y_pred_reg = np.exp(y_pred_reg) - 1 return y_pred_cas + y_pred_reg params = {'n_estimators': 1000, 'max_depth': 15, 'random_state': 0, 'min_samples_split': 5, 'n_jobs': -1} rf_model = RandomForestRegressor(**params) rf_cols = ['weather', 'temp', 'atemp', 'windspeed', 'workingday', 'season', 'holiday', 'sticky', 'hour', 'dow', 'woy', 'peak'] rf_p, rf_t, rf_score = predict_on_validation_set(rf_model, rf_cols) params = {'n_estimators': 150, 'max_depth': 5, 'random_state': 0, 'min_samples_leaf': 10, 'learning_rate': 0.1, 'subsample': 0.7, 'loss': 'ls'} gbm_model = GradientBoostingRegressor(**params) gbm_cols = ['weather', 'temp', 'atemp', 'humidity', 'windspeed', 'holiday', 'workingday', 'season', 'hour', 'dow', 'year', 'ideal', 'count_season'] gbm_p, gbm_t, gbm_score = predict_on_validation_set(gbm_model, gbm_cols) y_p = np.round(0.2 * rf_p + 0.8 * gbm_p) rf_pred = predict_on_test_set(rf_model, rf_cols) gbm_pred = predict_on_test_set(gbm_model, gbm_cols)
code
33098890/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear df['ideal'] = df[['temp', 'windspeed']].apply(lambda x: (0, 1)[x['temp'] > 27 and x['windspeed'] < 30], axis=1) df['sticky'] = df[['humidity', 'workingday']].apply(lambda x: (0, 1)[x['workingday'] == 1 and x['humidity'] >= 60], axis=1)
code
33098890/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) for col in ['casual', 'registered', 'count']: df['%s_log' % col] = np.log(df[col] + 1) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear def get_rmsle(y_pred, y_actual): diff = np.log(y_pred + 1) - np.log(y_actual + 1) mean_error = np.square(diff).mean() return np.sqrt(mean_error) def get_data(): data = df[df['_data'] == 'train'].copy() return data def custom_train_test_split(data, cutoff_day=15): train = data[data['day'] <= cutoff_day] test = data[data['day'] > cutoff_day] return (train, test) def prep_data(data, input_cols): X = data[input_cols] y_r = data['registered_log'] y_c = data['casual_log'] return (X, y_r, y_c) def predict_on_validation_set(model, input_cols): data = get_data() train, test = custom_train_test_split(data) X_train, y_train_r, y_train_c = prep_data(train, input_cols) X_test, y_test_r, y_test_c = prep_data(test, input_cols) model_r = model.fit(X_train, y_train_r) y_pred_r = np.exp(model_r.predict(X_test)) - 1 model_c = model.fit(X_train, y_train_c) y_pred_c = np.exp(model_c.predict(X_test)) - 1 y_pred_comb = np.round(y_pred_r + y_pred_c) y_pred_comb[y_pred_comb < 0] = 0 y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2 score = get_rmsle(y_pred_comb, y_test_comb) return (y_pred_comb, y_test_comb, score) df_test = df[df['_data'] == 'test'].copy() def predict_on_test_set(model, x_cols): df_train = df[df['_data'] == 'train'].copy() X_train = df_train[x_cols] y_train_cas = df_train['casual_log'] y_train_reg = df_train['registered_log'] X_test = df_test[x_cols] casual_model = model.fit(X_train, y_train_cas) y_pred_cas = casual_model.predict(X_test) y_pred_cas = np.exp(y_pred_cas) - 1 registered_model = model.fit(X_train, y_train_reg) y_pred_reg = registered_model.predict(X_test) y_pred_reg = np.exp(y_pred_reg) - 1 return y_pred_cas + y_pred_reg params = {'n_estimators': 1000, 'max_depth': 15, 'random_state': 0, 'min_samples_split': 5, 'n_jobs': -1} rf_model = RandomForestRegressor(**params) rf_cols = ['weather', 'temp', 'atemp', 'windspeed', 'workingday', 'season', 'holiday', 'sticky', 'hour', 'dow', 'woy', 'peak'] rf_p, rf_t, rf_score = predict_on_validation_set(rf_model, rf_cols) print(rf_score)
code
33098890/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import numpy as np from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import matplotlib.pyplot as plt
code
33098890/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear df['peak'] = df[['hour', 'workingday']].apply(lambda x: (0, 1)[x['workingday'] == 1 and (x['hour'] == 8 or 17 <= x['hour'] <= 18 or 12 <= x['hour'] <= 13) or (x['workingday'] == 0 and 10 <= x['hour'] <= 19)], axis=1)
code
33098890/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear df['holiday'] = df[['month', 'day', 'holiday', 'year']].apply(lambda x: (x['holiday'], 1)[x['year'] == 2012 and x['month'] == 10 and (x['day'] in [30])], axis=1) df['holiday'] = df[['month', 'day', 'holiday']].apply(lambda x: (x['holiday'], 1)[x['month'] == 12 and x['day'] in [24, 26, 31]], axis=1) df['workingday'] = df[['month', 'day', 'workingday']].apply(lambda x: (x['workingday'], 0)[x['month'] == 12 and x['day'] in [24, 31]], axis=1)
code
33098890/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) for col in ['casual', 'registered', 'count']: df['%s_log' % col] = np.log(df[col] + 1) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear def get_rmsle(y_pred, y_actual): diff = np.log(y_pred + 1) - np.log(y_actual + 1) mean_error = np.square(diff).mean() return np.sqrt(mean_error) def get_data(): data = df[df['_data'] == 'train'].copy() return data def custom_train_test_split(data, cutoff_day=15): train = data[data['day'] <= cutoff_day] test = data[data['day'] > cutoff_day] return (train, test) def prep_data(data, input_cols): X = data[input_cols] y_r = data['registered_log'] y_c = data['casual_log'] return (X, y_r, y_c) def predict_on_validation_set(model, input_cols): data = get_data() train, test = custom_train_test_split(data) X_train, y_train_r, y_train_c = prep_data(train, input_cols) X_test, y_test_r, y_test_c = prep_data(test, input_cols) model_r = model.fit(X_train, y_train_r) y_pred_r = np.exp(model_r.predict(X_test)) - 1 model_c = model.fit(X_train, y_train_c) y_pred_c = np.exp(model_c.predict(X_test)) - 1 y_pred_comb = np.round(y_pred_r + y_pred_c) y_pred_comb[y_pred_comb < 0] = 0 y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2 score = get_rmsle(y_pred_comb, y_test_comb) return (y_pred_comb, y_test_comb, score) df_test = df[df['_data'] == 'test'].copy() def predict_on_test_set(model, x_cols): df_train = df[df['_data'] == 'train'].copy() X_train = df_train[x_cols] y_train_cas = df_train['casual_log'] y_train_reg = df_train['registered_log'] X_test = df_test[x_cols] casual_model = model.fit(X_train, y_train_cas) y_pred_cas = casual_model.predict(X_test) y_pred_cas = np.exp(y_pred_cas) - 1 registered_model = model.fit(X_train, y_train_reg) y_pred_reg = registered_model.predict(X_test) y_pred_reg = np.exp(y_pred_reg) - 1 return y_pred_cas + y_pred_reg params = {'n_estimators': 1000, 'max_depth': 15, 'random_state': 0, 'min_samples_split': 5, 'n_jobs': -1} rf_model = RandomForestRegressor(**params) rf_cols = ['weather', 'temp', 'atemp', 'windspeed', 'workingday', 'season', 'holiday', 'sticky', 'hour', 'dow', 'woy', 'peak'] rf_p, rf_t, rf_score = predict_on_validation_set(rf_model, rf_cols) params = {'n_estimators': 150, 'max_depth': 5, 'random_state': 0, 'min_samples_leaf': 10, 'learning_rate': 0.1, 'subsample': 0.7, 'loss': 'ls'} gbm_model = GradientBoostingRegressor(**params) gbm_cols = ['weather', 'temp', 'atemp', 'humidity', 'windspeed', 'holiday', 'workingday', 'season', 'hour', 'dow', 'year', 'ideal', 'count_season'] gbm_p, gbm_t, gbm_score = predict_on_validation_set(gbm_model, gbm_cols) y_p = np.round(0.2 * rf_p + 0.8 * gbm_p) rf_pred = predict_on_test_set(rf_model, rf_cols) gbm_pred = predict_on_test_set(gbm_model, gbm_cols) y_pred = np.round(0.2 * rf_pred + 0.8 * gbm_pred) df_test['count'] = y_pred final_df = df_test[['datetime', 'count']].copy() final_df.to_csv('output5.csv', index=False)
code
33098890/cell_14
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) for col in ['casual', 'registered', 'count']: df['%s_log' % col] = np.log(df[col] + 1) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear def get_rmsle(y_pred, y_actual): diff = np.log(y_pred + 1) - np.log(y_actual + 1) mean_error = np.square(diff).mean() return np.sqrt(mean_error) def get_data(): data = df[df['_data'] == 'train'].copy() return data def custom_train_test_split(data, cutoff_day=15): train = data[data['day'] <= cutoff_day] test = data[data['day'] > cutoff_day] return (train, test) def prep_data(data, input_cols): X = data[input_cols] y_r = data['registered_log'] y_c = data['casual_log'] return (X, y_r, y_c) def predict_on_validation_set(model, input_cols): data = get_data() train, test = custom_train_test_split(data) X_train, y_train_r, y_train_c = prep_data(train, input_cols) X_test, y_test_r, y_test_c = prep_data(test, input_cols) model_r = model.fit(X_train, y_train_r) y_pred_r = np.exp(model_r.predict(X_test)) - 1 model_c = model.fit(X_train, y_train_c) y_pred_c = np.exp(model_c.predict(X_test)) - 1 y_pred_comb = np.round(y_pred_r + y_pred_c) y_pred_comb[y_pred_comb < 0] = 0 y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2 score = get_rmsle(y_pred_comb, y_test_comb) return (y_pred_comb, y_test_comb, score) df_test = df[df['_data'] == 'test'].copy() def predict_on_test_set(model, x_cols): df_train = df[df['_data'] == 'train'].copy() X_train = df_train[x_cols] y_train_cas = df_train['casual_log'] y_train_reg = df_train['registered_log'] X_test = df_test[x_cols] casual_model = model.fit(X_train, y_train_cas) y_pred_cas = casual_model.predict(X_test) y_pred_cas = np.exp(y_pred_cas) - 1 registered_model = model.fit(X_train, y_train_reg) y_pred_reg = registered_model.predict(X_test) y_pred_reg = np.exp(y_pred_reg) - 1 return y_pred_cas + y_pred_reg params = {'n_estimators': 1000, 'max_depth': 15, 'random_state': 0, 'min_samples_split': 5, 'n_jobs': -1} rf_model = RandomForestRegressor(**params) rf_cols = ['weather', 'temp', 'atemp', 'windspeed', 'workingday', 'season', 'holiday', 'sticky', 'hour', 'dow', 'woy', 'peak'] rf_p, rf_t, rf_score = predict_on_validation_set(rf_model, rf_cols) params = {'n_estimators': 150, 'max_depth': 5, 'random_state': 0, 'min_samples_leaf': 10, 'learning_rate': 0.1, 'subsample': 0.7, 'loss': 'ls'} gbm_model = GradientBoostingRegressor(**params) gbm_cols = ['weather', 'temp', 'atemp', 'humidity', 'windspeed', 'holiday', 'workingday', 'season', 'hour', 'dow', 'year', 'ideal', 'count_season'] gbm_p, gbm_t, gbm_score = predict_on_validation_set(gbm_model, gbm_cols) y_p = np.round(0.2 * rf_p + 0.8 * gbm_p) rf_pred = predict_on_test_set(rf_model, rf_cols) gbm_pred = predict_on_test_set(gbm_model, gbm_cols) y_pred = np.round(0.2 * rf_pred + 0.8 * gbm_pred)
code
33098890/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfs = {} for name in ['train', 'test']: df = pd.read_csv('/kaggle/input/bike-sharing-demand/%s.csv' % name) df['_data'] = name dfs[name] = df df = dfs['train'].append(dfs['test']) df.columns = map(str.lower, df.columns) for col in ['casual', 'registered', 'count']: df['%s_log' % col] = np.log(df[col] + 1) dt = pd.DatetimeIndex(df['datetime']) df.set_index(dt, inplace=True) df['date'] = dt.date df['day'] = dt.day df['month'] = dt.month df['year'] = dt.year df['hour'] = dt.hour df['dow'] = dt.dayofweek df['woy'] = dt.weekofyear def get_rmsle(y_pred, y_actual): diff = np.log(y_pred + 1) - np.log(y_actual + 1) mean_error = np.square(diff).mean() return np.sqrt(mean_error) def get_data(): data = df[df['_data'] == 'train'].copy() return data def custom_train_test_split(data, cutoff_day=15): train = data[data['day'] <= cutoff_day] test = data[data['day'] > cutoff_day] return (train, test) def prep_data(data, input_cols): X = data[input_cols] y_r = data['registered_log'] y_c = data['casual_log'] return (X, y_r, y_c) def predict_on_validation_set(model, input_cols): data = get_data() train, test = custom_train_test_split(data) X_train, y_train_r, y_train_c = prep_data(train, input_cols) X_test, y_test_r, y_test_c = prep_data(test, input_cols) model_r = model.fit(X_train, y_train_r) y_pred_r = np.exp(model_r.predict(X_test)) - 1 model_c = model.fit(X_train, y_train_c) y_pred_c = np.exp(model_c.predict(X_test)) - 1 y_pred_comb = np.round(y_pred_r + y_pred_c) y_pred_comb[y_pred_comb < 0] = 0 y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2 score = get_rmsle(y_pred_comb, y_test_comb) return (y_pred_comb, y_test_comb, score) df_test = df[df['_data'] == 'test'].copy() def predict_on_test_set(model, x_cols): df_train = df[df['_data'] == 'train'].copy() X_train = df_train[x_cols] y_train_cas = df_train['casual_log'] y_train_reg = df_train['registered_log'] X_test = df_test[x_cols] casual_model = model.fit(X_train, y_train_cas) y_pred_cas = casual_model.predict(X_test) y_pred_cas = np.exp(y_pred_cas) - 1 registered_model = model.fit(X_train, y_train_reg) y_pred_reg = registered_model.predict(X_test) y_pred_reg = np.exp(y_pred_reg) - 1 return y_pred_cas + y_pred_reg params = {'n_estimators': 1000, 'max_depth': 15, 'random_state': 0, 'min_samples_split': 5, 'n_jobs': -1} rf_model = RandomForestRegressor(**params) rf_cols = ['weather', 'temp', 'atemp', 'windspeed', 'workingday', 'season', 'holiday', 'sticky', 'hour', 'dow', 'woy', 'peak'] rf_p, rf_t, rf_score = predict_on_validation_set(rf_model, rf_cols) params = {'n_estimators': 150, 'max_depth': 5, 'random_state': 0, 'min_samples_leaf': 10, 'learning_rate': 0.1, 'subsample': 0.7, 'loss': 'ls'} gbm_model = GradientBoostingRegressor(**params) gbm_cols = ['weather', 'temp', 'atemp', 'humidity', 'windspeed', 'holiday', 'workingday', 'season', 'hour', 'dow', 'year', 'ideal', 'count_season'] gbm_p, gbm_t, gbm_score = predict_on_validation_set(gbm_model, gbm_cols) print(gbm_score) y_p = np.round(0.2 * rf_p + 0.8 * gbm_p) print(get_rmsle(y_p, rf_t))
code
2022076/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, precision_recall_curve from sklearn.metrics import roc_curve import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') precision, recall, _ = precision_recall_curve(y, y_hat) fig, ax = plt.subplots(1, figsize=(12, 6)) ax.step(recall, precision, color='steelblue', where='post') ax.fill_between(recall, precision, step='post', color='lightgray') plt.suptitle('Precision-Recall Tradeoff for Seattle Rain Prediction') plt.xlabel('Recall') plt.ylabel('Precision') from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y, y_hat) fig, ax = plt.subplots(1, figsize=(12, 6)) plt.plot(fpr, tpr, color='darkorange', label='Model Performace') plt.plot([0, 1], [0, 1], color='gray', label='Random Performace') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Seattle Rain ROC Curve') plt.legend(loc='lower right')
code
2022076/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import hamming_loss import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import hamming_loss hamming_loss(y, y_hat)
code
2022076/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df.head()
code
2022076/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import fbeta_score import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import fbeta_score fbeta_score(y, y_hat, beta=1)
code
2022076/cell_8
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import confusion_matrix confusion_matrix(y, y_hat)
code
2022076/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, precision_recall_curve import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import precision_score, recall_score, precision_recall_curve print(precision_score(y, y_hat)) print(recall_score(y, y_hat))
code
2022076/cell_3
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X)
code
2022076/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, precision_recall_curve import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') precision, recall, _ = precision_recall_curve(y, y_hat) fig, ax = plt.subplots(1, figsize=(12, 6)) ax.step(recall, precision, color='steelblue', where='post') ax.fill_between(recall, precision, step='post', color='lightgray') plt.suptitle('Precision-Recall Tradeoff for Seattle Rain Prediction') plt.xlabel('Recall') plt.ylabel('Precision')
code
2022076/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import roc_auc_score roc_auc_score(y, y_hat)
code
2022076/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix import pandas as pd import seaborn as sns import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) import seaborn as sns sns.heatmap(confusion_matrix(y, y_hat) / len(y), cmap='Blues', annot=True)
code
2022076/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import accuracy_score accuracy_score(y, y_hat)
code
2026938/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_train=X_t, y_train=y_t, X_test=X_v, y_test=y_v): model.fit(X_train, y_train) y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) return model def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2))) ridge_model = Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None) start_time = time.time() y_train = train['log_price'] = np.log(train['price'] + 1) print('train test splitting...') X_t, X_v, y_t, y_v = train_test_split(X_train, y_train, test_size=0.01) print('training model...') ridge_model.fit(X_train, y_train) print('TIME:', time.time() - start_time)
code
2026938/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import pandas as pd train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']]
code
2026938/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:]
code
2026938/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t')
code
2026938/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_train=X_t, y_train=y_t, X_test=X_v, y_test=y_v): model.fit(X_train, y_train) y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) return model def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2))) ridge_model = Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None) start_time = time.time() y_train = train['log_price'] = np.log(train['price'] + 1) X_t, X_v, y_t, y_v = train_test_split(X_train, y_train, test_size=0.01) ridge_model.fit(X_train, y_train) def create_submission(model, test=X_test, submission=submission, path='./predictions.csv'): predictions = model.predict(test) predictions = pd.Series(np.exp(predictions) - 1) submission['price'] = predictions submission.to_csv(path, index=False) start_time = time.time() create_submission(ridge_model) print('TIME:', time.time() - start_time) print('TOTAL TIME:', time.time() - Time_0)
code
2026938/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
Time_0 = time.time() import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.sparse import csr_matrix, hstack import time import re import math from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, LabelBinarizer from sklearn.cross_validation import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics import mean_squared_log_error from sklearn.linear_model import Ridge from sklearn.ensemble import GradientBoostingRegressor seed = 90
code
2026938/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_train=X_t, y_train=y_t, X_test=X_v, y_test=y_v): model.fit(X_train, y_train) y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) print(error) return model def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2)))
code
2026938/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import Ridge ridge_model = Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)
code
2026938/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_train=X_t, y_train=y_t, X_test=X_v, y_test=y_v): model.fit(X_train, y_train) y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) return model def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2))) ridge_model = Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None) start_time = time.time() y_train = train['log_price'] = np.log(train['price'] + 1) X_t, X_v, y_t, y_v = train_test_split(X_train, y_train, test_size=0.01) ridge_model.fit(X_train, y_train) def create_submission(model, test=X_test, submission=submission, path='./predictions.csv'): predictions = model.predict(test) predictions = pd.Series(np.exp(predictions) - 1) submission['price'] = predictions submission.to_csv(path, index=False) print(submission.describe())
code
2026938/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import pandas as pd import time train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset.brand_name.fillna(value='missing', inplace=True) dataset.item_description.fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): cv = CountVectorizer() name = cv.fit_transform(dataset['name']) tv = TfidfVectorizer(max_features=51000, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) start_time = time.time() print('Handle Missing...') merge = handle_missing(merge) print('splitting cat...') merge = split_cat(merge) print('making labels...') cat1, cat2, cat3, brand_name = label_maker(merge) print('getting dummies...') X_dummies = get_dums(merge) print('processing text...') name, description = text_processing(merge) print('stacking train...') sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() print('TIME:', time.time() - start_time)
code
1010539/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_hdf('../input/train.h5') def myticks(x, pos): exponent = abs(int(np.log10(np.abs(x)))) return exponent def plot_exp(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.t16_exp, data.timestamp) ax.set_title(title) ax.set_xlabel('Negative Power of Technical_16') ax.set_ylabel('Timestamp') plt.show() data.technical_16.fillna(0, inplace=True) data['t16_exp'] = data.technical_16.map(lambda z: int(np.log10(np.abs(z))) - 1 if z != 0 else 0) plot_exp(data.loc[(data.id == 1201) & (data.t16_exp != 0.0) & ~data.t16_exp.isnull(), ['timestamp', 't16_exp']], 'id = 1201')
code
1010539/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_hdf('../input/train.h5') def myticks(x, pos): exponent = abs(int(np.log10(np.abs(x)))) return exponent def plot_exp(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.t16_exp, data.timestamp) ax.set_title(title) ax.set_xlabel('Negative Power of Technical_16') ax.set_ylabel('Timestamp') plt.show() data.technical_16.fillna(0, inplace=True) data['t16_exp'] = data.technical_16.map(lambda z: int(np.log10(np.abs(z))) - 1 if z != 0 else 0) data['t16_first_number'] = data.technical_16 * (10 ** data.t16_exp.abs()).astype('int') def plot_first_numb(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.timestamp, data.t16_first_number,marker = 'v', mfc = 'g') ax.set_title(title) ax.set_yticks(range(-10,10)) ax.set_xlabel('Timestamp') ax.set_ylabel('First_number') plt.show() plot_first_numb(data.loc[(data.id == 1201) & (data.t16_exp != 0), ['timestamp', 't16_first_number']], 'id = 1201')
code
1010539/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_hdf('../input/train.h5') def myticks(x, pos): exponent = abs(int(np.log10(np.abs(x)))) return exponent def plot_exp(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.t16_exp, data.timestamp) ax.set_title(title) ax.set_xlabel('Negative Power of Technical_16') ax.set_ylabel('Timestamp') plt.show() data.technical_16.fillna(0, inplace=True) data['t16_exp'] = data.technical_16.map(lambda z: int(np.log10(np.abs(z))) - 1 if z != 0 else 0) plot_exp(data.loc[(data.id == 288) & (data.t16_exp != 0) & ~data.t16_exp.isnull(), ['timestamp', 't16_exp']], 'id = 288')
code
1010539/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_hdf('../input/train.h5') def myticks(x, pos): exponent = abs(int(np.log10(np.abs(x)))) return exponent def plot_exp(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.t16_exp, data.timestamp) ax.set_title(title) ax.set_xlabel('Negative Power of Technical_16') ax.set_ylabel('Timestamp') plt.show() data.technical_16.fillna(0, inplace=True) data['t16_exp'] = data.technical_16.map(lambda z: int(np.log10(np.abs(z))) - 1 if z != 0 else 0) data['t16_first_number'] = data.technical_16 * (10 ** data.t16_exp.abs()).astype('int') def plot_first_numb(data, title): fig, ax =plt.subplots(figsize = (12, 8)) ax.plot(data.timestamp, data.t16_first_number,marker = 'v', mfc = 'g') ax.set_title(title) ax.set_yticks(range(-10,10)) ax.set_xlabel('Timestamp') ax.set_ylabel('First_number') plt.show() plot_first_numb(data.loc[(data.id == 300) & (data.t16_exp != 0), ['timestamp', 't16_first_number']], 'id = 300')
code
1010539/cell_3
[ "image_output_1.png" ]
import pandas as pd data = pd.read_hdf('../input/train.h5')
code
50239241/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.age, df.trestbps) df.corr()
code
50239241/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target)
code
50239241/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target)
code
50239241/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.head()
code
50239241/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.age, df.trestbps) df.corr() corr_matrix = df.corr() plt.figure(figsize=(15, 10)) sns.heatmap(corr_matrix, fmt='.2f', linewidth=0.5, annot=True, cmap='Blues')
code
50239241/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape
code
50239241/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape df['target'].value_counts()
code
50239241/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.age, df.trestbps) plt.figure(figsize=(10, 6)) plt.scatter(df.trestbps[df.target == 1], df.age[df.target == 1]) plt.scatter(df.trestbps[df.target == 0], df.age[df.target == 0]) plt.xlabel('Resting Blood Pressure (mm Hg)') plt.ylabel('Age (Years)') plt.title('Relation between the Resting Blood Pressure vs Age') plt.legend(['Disease', 'No-Disease'])
code
50239241/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.age, df.trestbps)
code
50239241/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.cp, df.target).plot(kind='bar', rot=0, xlabel='Chest Pain', ylabel='Frequency', title='Frequency Graph between the Chest Pain and Target', colormap='tab20c') plt.legend(['No-Disease', 'Disease'])
code
50239241/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.sex, df.target).plot(kind='bar', rot=0, ylabel='Frequency', xlabel='Sex', title='Frequency graph between the Sex and Target', colormap='tab20c') plt.legend(['No-Disease', 'Disease'])
code
50239241/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.describe()
code
34123364/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') print(laureates_data[laureates_data['Laureate Type'] != 'Individual']['Laureate Type'])
code
34123364/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') print(laureates_data.columns) print(laureates_data[laureates_data.isnull().any(axis=1)])
code
34123364/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') print(laureates_data[laureates_data['Laureate Type'] != 'Individual']['Category'].unique())
code
34123364/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') print(laureates_data.head()) print(laureates_data.dtypes)
code
34123364/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') print('The number of entries: %d \n\n' % laureates_data[laureates_data['Full Name'].str.contains('Marie Curie')].shape[0]) print(laureates_data[laureates_data['Full Name'].str.contains('Marie Curie')])
code
34123364/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd laureates_data = pd.read_csv('/kaggle/input/nobel-laureates/archive.csv') name_counts = laureates_data['Full Name'].value_counts() multi_name = list(name_counts[name_counts > 1].index) for name in multi_name: temp = laureates_data[laureates_data['Full Name'] == name].Year.unique() if len(temp) > 1: print(name, ' ', temp, '\n')
code
16132425/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import os bkshare_df = pd.read_csv('../input/bike_share.csv') bkshare_df1 = bkshare_df.copy() def remove_duplicates(bkshare_df1): bkshare_df1.drop_duplicates(inplace=True) def descibe_df(bkshare_df1): pass def bkshare_corr(bkshare_df1): pass remove_duplicates(bkshare_df1) descibe_df(bkshare_df1) bkshare_corr(bkshare_df1) plt.figure(figsize=(10, 5)) ax = sns.heatmap(bkshare_df1.corr(), annot=True) plt.show(ax)
code
16132425/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import os bkshare_df = pd.read_csv('../input/bike_share.csv') bkshare_df1 = bkshare_df.copy() bkshare_df1.info()
code
16132425/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import os print(os.listdir('../input')) bkshare_df = pd.read_csv('../input/bike_share.csv') bkshare_df1 = bkshare_df.copy() bkshare_df1.head()
code
16132425/cell_3
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import os bkshare_df = pd.read_csv('../input/bike_share.csv') bkshare_df1 = bkshare_df.copy() def remove_duplicates(bkshare_df1): bkshare_df1.drop_duplicates(inplace=True) def descibe_df(bkshare_df1): print('Describing Dataset') print('------------------') display(bkshare_df1.describe()) def bkshare_corr(bkshare_df1): print('Correlation') print('------------') display(bkshare_df1.corr()) remove_duplicates(bkshare_df1) descibe_df(bkshare_df1) bkshare_corr(bkshare_df1)
code