path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129001471/cell_9
[ "image_output_1.png" ]
from botsFactoryLib import processData, genExposure, genExposureDown, genPreds, genShiftedSMA, loadModel import os import pandas as pd import plotly.graph_objects as go import talib import vectorbt as vbt import os import json import pytz import talib import pickle import numpy as np import pandas as pd import datetime as dt import vectorbt as vbt import matplotlib.pyplot as plt import plotly.graph_objects as go from histDataHandler import loadSuchData from botsFactoryLib import processData, genExposure, genExposureDown, genPreds, genShiftedSMA, loadModel current_directory = os.getcwd() parent_directory = os.path.dirname(current_directory) os.chdir(parent_directory) vbt.settings.set_theme('dark') vbt.settings['plotting']['layout']['width'] = 700 vbt.settings['plotting']['layout']['height'] = 350 subplots = ['trades', 'trade_pnl', 'cum_returns', 'underwater', 'net_exposure'] data = pd.read_csv('backtesting_ohlcv_data.csv', index_col=0, parse_dates=True) data shiftWindow = 48 modelName = f'articleModelSMA{shiftWindow}' model, modelParamsDict, targetScaler, scalers = loadModel(modelName, data) preds = pd.concat([genPreds(data, model, modelParamsDict, targetScaler, scalers), talib.SMA(data['Close'], timeperiod=shiftWindow).rename('Current_SMA')], axis=1).dropna() exposure = genExposure(pd.concat([preds, data['Close'].shift(1).rename('Shifted_Close')], axis=1), 2.5, 10, 'Prediction') preds_SMA = pd.concat([genShiftedSMA(data, shiftWindow, shiftWindow + 1).rename('Prediction'), talib.SMA(data['Close'], timeperiod=shiftWindow).rename('Current_SMA')], axis=1).dropna() exposure_SMA = genExposure(pd.concat([preds_SMA, data['Close'].shift(1).rename('Shifted_Close')], axis=1), 1.25, 10, 'Prediction') pf_model = vbt.Portfolio.from_orders(data['Open'][exposure.index], exposure, size_type='targetpercent', freq=modelParamsDict['frequency']) pf_shiftedSMA = vbt.Portfolio.from_orders(data['Open'][exposure_SMA.index], exposure_SMA, size_type='targetpercent', freq=modelParamsDict['frequency']) pf_model.stats() figSpot_model = go.Figure(pf_model.plot(subplots=subplots)) figSpot_model.show()
code
129001471/cell_2
[ "text_html_output_1.png" ]
import os import pandas as pd import vectorbt as vbt import os import json import pytz import talib import pickle import numpy as np import pandas as pd import datetime as dt import vectorbt as vbt import matplotlib.pyplot as plt import plotly.graph_objects as go from histDataHandler import loadSuchData from botsFactoryLib import processData, genExposure, genExposureDown, genPreds, genShiftedSMA, loadModel current_directory = os.getcwd() parent_directory = os.path.dirname(current_directory) os.chdir(parent_directory) vbt.settings.set_theme('dark') vbt.settings['plotting']['layout']['width'] = 700 vbt.settings['plotting']['layout']['height'] = 350 subplots = ['trades', 'trade_pnl', 'cum_returns', 'underwater', 'net_exposure'] data = pd.read_csv('backtesting_ohlcv_data.csv', index_col=0, parse_dates=True) data
code
129001471/cell_8
[ "text_plain_output_1.png" ]
from botsFactoryLib import processData, genExposure, genExposureDown, genPreds, genShiftedSMA, loadModel import os import pandas as pd import talib import vectorbt as vbt import os import json import pytz import talib import pickle import numpy as np import pandas as pd import datetime as dt import vectorbt as vbt import matplotlib.pyplot as plt import plotly.graph_objects as go from histDataHandler import loadSuchData from botsFactoryLib import processData, genExposure, genExposureDown, genPreds, genShiftedSMA, loadModel current_directory = os.getcwd() parent_directory = os.path.dirname(current_directory) os.chdir(parent_directory) vbt.settings.set_theme('dark') vbt.settings['plotting']['layout']['width'] = 700 vbt.settings['plotting']['layout']['height'] = 350 subplots = ['trades', 'trade_pnl', 'cum_returns', 'underwater', 'net_exposure'] data = pd.read_csv('backtesting_ohlcv_data.csv', index_col=0, parse_dates=True) data shiftWindow = 48 modelName = f'articleModelSMA{shiftWindow}' model, modelParamsDict, targetScaler, scalers = loadModel(modelName, data) preds = pd.concat([genPreds(data, model, modelParamsDict, targetScaler, scalers), talib.SMA(data['Close'], timeperiod=shiftWindow).rename('Current_SMA')], axis=1).dropna() exposure = genExposure(pd.concat([preds, data['Close'].shift(1).rename('Shifted_Close')], axis=1), 2.5, 10, 'Prediction') preds_SMA = pd.concat([genShiftedSMA(data, shiftWindow, shiftWindow + 1).rename('Prediction'), talib.SMA(data['Close'], timeperiod=shiftWindow).rename('Current_SMA')], axis=1).dropna() exposure_SMA = genExposure(pd.concat([preds_SMA, data['Close'].shift(1).rename('Shifted_Close')], axis=1), 1.25, 10, 'Prediction') pf_model = vbt.Portfolio.from_orders(data['Open'][exposure.index], exposure, size_type='targetpercent', freq=modelParamsDict['frequency']) pf_shiftedSMA = vbt.Portfolio.from_orders(data['Open'][exposure_SMA.index], exposure_SMA, size_type='targetpercent', freq=modelParamsDict['frequency']) pf_model.stats()
code
32071289/cell_9
[ "text_plain_output_1.png" ]
"""mae = mean_absolute_error(y_valid_cc, preds_cc['preds']) msle = mean_squared_log_error(y_valid_cc, preds_cc['preds']) print("CC MAE: %f MSLE %f" % (mae, msle)) mae = mean_absolute_error(y_valid_ft, preds_ft['preds']) msle = mean_squared_log_error(y_valid_ft, preds_ft['preds']) print("FT MAE: %f MSLE %f" % (mae, msle))""" '\nCC MAE: 53.621829 MSLE 0.032919\nFT MAE: 3.685815 MSLE 0.008674\n'
code
32071289/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) covid_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv', index_col='Id', parse_dates=['Date']) covid_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv', index_col='ForecastId', parse_dates=['Date']) last_register = pd.to_datetime(covid_train['Date'].iloc[covid_train.shape[0] - 1]) def adjustState(row): if pd.isna(row['Province_State']): row['Province_State'] = row['Country_Region'] return row covid_train = covid_train.apply(adjustState, axis=1) covid_test = covid_test.apply(adjustState, axis=1) covid_train.fillna('NA', inplace=True) covid_test.fillna('NA', inplace=True) n_cases_cc = 50 n_cases_ft = 50 data_mark_date = pd.DataFrame(columns=['Country_Region', 'Province_State', 'Date_cc', 'Date_ft']) data_mark_date.set_index(['Country_Region', 'Province_State']) for country in covid_train['Country_Region'].unique(): for state in covid_train[covid_train['Country_Region'] == country]['Province_State'].unique(): data_df = covid_train[(covid_train['Country_Region'] == country) & (covid_train['Province_State'] == state)] if data_df[data_df['ConfirmedCases'] >= n_cases_cc].shape[0] > 0: date_cc = data_df[data_df['ConfirmedCases'] >= n_cases_cc].iloc[0]['Date'] else: date_cc = last_register if data_df[data_df['Fatalities'] >= n_cases_ft].shape[0] > 0: date_ft = data_df[data_df['Fatalities'] >= n_cases_ft].iloc[0]['Date'] else: date_ft = last_register data_state = pd.DataFrame({'Country_Region': [country], 'Province_State': [state], 'Date_cc': [date_cc], 'Date_ft': [date_ft]}) data_state.set_index(['Country_Region', 'Province_State']) data_mark_date = data_mark_date.append(data_state.iloc[0]) def mark_date(row): data_df = data_mark_date[(data_mark_date['Country_Region'] == row['Country_Region']) & (data_mark_date['Province_State'] == row['Province_State'])].iloc[0] if not pd.isna(data_df['Date_cc']): row['Date_cc'] = (row['Date'] - data_df['Date_cc']).days if not pd.isna(data_df['Date_ft']): row['Date_ft'] = (row['Date'] - data_df['Date_ft']).days return row covid_train = covid_train[(covid_train['ConfirmedCases'] > 0) | (covid_train['Fatalities'] > 0)] covid_train['Date_cc'] = [0 for i in range(covid_train.shape[0])] covid_train['Date_ft'] = [0 for i in range(covid_train.shape[0])] covid_train = covid_train.apply(mark_date, axis=1) covid_test['Date_cc'] = [0 for i in range(covid_test.shape[0])] covid_test['Date_ft'] = [0 for i in range(covid_test.shape[0])] covid_test = covid_test.apply(mark_date, axis=1) covid_train['Date_st'] = covid_train['Date'].map(lambda x: x.timestamp()) covid_test['Date_st'] = covid_test['Date'].map(lambda x: x.timestamp()) enc = LabelEncoder() covid_train['Province_State_enc'] = enc.fit_transform(covid_train['Province_State']) covid_test['Province_State_enc'] = enc.transform(covid_test['Province_State']) enc = LabelEncoder() covid_train['Country_Region_enc'] = enc.fit_transform(covid_train['Country_Region']) covid_test['Country_Region_enc'] = enc.transform(covid_test['Country_Region']) X_features = ['Province_State', 'Country_Region', 'Date_st', 'Date_cc', 'Date_ft'] X = covid_train[X_features] y_cc = covid_train['ConfirmedCases'] y_ft = covid_train['Fatalities'] print('Adjust data complete')
code
32071289/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) covid_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv', index_col='Id', parse_dates=['Date']) covid_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv', index_col='ForecastId', parse_dates=['Date']) last_register = pd.to_datetime(covid_train['Date'].iloc[covid_train.shape[0] - 1]) print('Len train %d, Len test %d' % (covid_train.shape[0], covid_test.shape[0])) print('Last train "Date": ', last_register)
code
32071289/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_log_error print('Setup complete')
code
32071289/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) covid_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv', index_col='Id', parse_dates=['Date']) covid_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv', index_col='ForecastId', parse_dates=['Date']) last_register = pd.to_datetime(covid_train['Date'].iloc[covid_train.shape[0] - 1]) covid_train.info()
code
32071289/cell_10
[ "text_plain_output_1.png" ]
"""X_train_2, X_valid_2, y_train_cc_2, y_valid_cc_2 = train_test_split(covid_train[X_features_cc], y_cc, random_state=42) model_cc_2 = RandomForestRegressor(n_estimators=100, random_state=42) model_cc_2.fit(X_train_2, y_train_cc_2) predic = model_cc_2.predict(X_valid_2) mae = mean_absolute_error(y_valid_cc_2, predic) msle = mean_squared_log_error(y_valid_cc_2, predic) print("CC MAE: %f MSLE %f" % (mae, msle)) X_train_2, X_valid_2, y_train_ft_2, y_valid_ft_2 = train_test_split(covid_train[X_features_ft], y_ft, random_state=42) model_ft_2 = RandomForestRegressor(n_estimators=50, random_state=42) model_ft_2.fit(X_train_2, y_train_ft_2) predic = model_ft_2.predict(X_valid_2) mae = mean_absolute_error(y_valid_ft_2, predic) msle = mean_squared_log_error(y_valid_ft_2, predic) print("CC MAE: %f MSLE %f" % (mae, msle))""" '\nCC MAE: 217.231728 MSLE 0.881407\nCC MAE: 14.093100 MSLE 0.503043\n'
code
49116605/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) order = pd.read_csv('/kaggle/input/market-basket-id-ndsc-2020/association_order.csv') order
code
49116605/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
2036189/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import itertools import numpy as np # linear algebra import random """ create sample data user and product are segmented """ random.seed(0) NN_word = 2000 NN_sentence = 10000 NN_SEG = 7 class Seq(object): def __init__(self, neg_sample=5, batch_size=32, stop=None, common_prods=[50, 100, 200, 500, 1000]): self.common_prods = common_prods self.batch_size = batch_size self.product_list = [ee + 1 for ee in range(NN_word)] self.product_set = set(self.product_list) self.neg_sample = neg_sample nn_each_group, amari = divmod(len(self.product_list), NN_SEG) if amari != 0: nn_each_group += 1 del_none = lambda l: filter(lambda x: x is not None, l) self.product_group = [[e1 for e1 in del_none(ee)] for ee in itertools.zip_longest(*[iter(self.product_list)] * nn_each_group)] self.user_list = [ee + 1 for ee in range(NN_sentence)] self.user_list_org = [ee for ee in self.user_list] if stop is not None: self.user_list = self.user_list[:stop] self.user_set = set(self.user_list) self.user_id_next = self.user_list[0] self.create_seed() 'estimate self length' self.initialize_it() self.len = 1 for _ in self.it: self.len += 1 self.initialize_it() def initialize_it(self): self.it = iter(range(0, len(self.user_list), self.batch_size)) self.idx_next = self.it.__next__() def create_seed(self): self.seeds = {} for ii, user_id in enumerate(self.user_list): self.seeds[user_id] = ii def __len__(self): return self.len def __iter__(self): return self def __next__(self): idx = self.idx_next self.user_ids_part = self.user_list[idx:idx + self.batch_size if idx + self.batch_size < len(self.user_list) else len(self.user_list)] res = self.getpart(self.user_ids_part) try: self.idx_next = self.it.__next__() except StopIteration: self.initialize_it() return res def __getitem__(self, user_id): ret_users, ret_prods, ret_y = self.get_data(user_id) return ({'input_user': np.array(ret_users), 'input_prod': np.array(ret_prods)}, ret_y) def get_data(self, user_id): random.seed(self.seeds[user_id]) nword = random.randint(5, 20) a, _ = divmod(len(self.user_list_org), NN_SEG) ii = int(user_id / (a + 1)) prods = random.sample(self.product_group[ii], nword) prods.extend(self.common_prods) prods = list(set(prods)) neg = self.get_neg(prods) ret_users = [user_id] * (len(prods) * (1 + self.neg_sample)) ret_prods = prods + neg ret_y = [1] * len(prods) + [0] * len(neg) return (ret_users, ret_prods, ret_y) def get_neg(self, prods): o = self.product_set.difference(prods) random.seed() neg = random.sample(o, len(prods) * self.neg_sample) return neg def getpart(self, user_ids_part): x_input_user = [] x_input_prod = [] y = [] for user_id in user_ids_part: x_train, y_train = self[user_id] x_input_prod.extend(x_train['input_prod'].tolist()) x_input_user.extend(x_train['input_user'].tolist()) y.append(y_train) return ({'input_prod': np.array(x_input_prod), 'input_user': np.array(x_input_user)}, np.concatenate(y)) seq = Seq(neg_sample=1, common_prods=[]) print(len(seq)) X_list = [np.zeros((1, NN_word + 1))] for iuser in seq.user_list: x_train, y_train = seq[iuser] prods = x_train['input_prod'][np.array(y_train) == 1] irow = np.zeros((1, NN_word + 1)) irow[0, prods] = 1 X_list.append(irow) X = np.concatenate(X_list) print(X.shape) X
code
2036189/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import itertools import matplotlib.pyplot as plt import numpy as np # linear algebra import random """ create sample data user and product are segmented """ random.seed(0) NN_word = 2000 NN_sentence = 10000 NN_SEG = 7 class Seq(object): def __init__(self, neg_sample=5, batch_size=32, stop=None, common_prods=[50, 100, 200, 500, 1000]): self.common_prods = common_prods self.batch_size = batch_size self.product_list = [ee + 1 for ee in range(NN_word)] self.product_set = set(self.product_list) self.neg_sample = neg_sample nn_each_group, amari = divmod(len(self.product_list), NN_SEG) if amari != 0: nn_each_group += 1 del_none = lambda l: filter(lambda x: x is not None, l) self.product_group = [[e1 for e1 in del_none(ee)] for ee in itertools.zip_longest(*[iter(self.product_list)] * nn_each_group)] self.user_list = [ee + 1 for ee in range(NN_sentence)] self.user_list_org = [ee for ee in self.user_list] if stop is not None: self.user_list = self.user_list[:stop] self.user_set = set(self.user_list) self.user_id_next = self.user_list[0] self.create_seed() 'estimate self length' self.initialize_it() self.len = 1 for _ in self.it: self.len += 1 self.initialize_it() def initialize_it(self): self.it = iter(range(0, len(self.user_list), self.batch_size)) self.idx_next = self.it.__next__() def create_seed(self): self.seeds = {} for ii, user_id in enumerate(self.user_list): self.seeds[user_id] = ii def __len__(self): return self.len def __iter__(self): return self def __next__(self): idx = self.idx_next self.user_ids_part = self.user_list[idx:idx + self.batch_size if idx + self.batch_size < len(self.user_list) else len(self.user_list)] res = self.getpart(self.user_ids_part) try: self.idx_next = self.it.__next__() except StopIteration: self.initialize_it() return res def __getitem__(self, user_id): ret_users, ret_prods, ret_y = self.get_data(user_id) return ({'input_user': np.array(ret_users), 'input_prod': np.array(ret_prods)}, ret_y) def get_data(self, user_id): random.seed(self.seeds[user_id]) nword = random.randint(5, 20) a, _ = divmod(len(self.user_list_org), NN_SEG) ii = int(user_id / (a + 1)) prods = random.sample(self.product_group[ii], nword) prods.extend(self.common_prods) prods = list(set(prods)) neg = self.get_neg(prods) ret_users = [user_id] * (len(prods) * (1 + self.neg_sample)) ret_prods = prods + neg ret_y = [1] * len(prods) + [0] * len(neg) return (ret_users, ret_prods, ret_y) def get_neg(self, prods): o = self.product_set.difference(prods) random.seed() neg = random.sample(o, len(prods) * self.neg_sample) return neg def getpart(self, user_ids_part): x_input_user = [] x_input_prod = [] y = [] for user_id in user_ids_part: x_train, y_train = self[user_id] x_input_prod.extend(x_train['input_prod'].tolist()) x_input_user.extend(x_train['input_user'].tolist()) y.append(y_train) return ({'input_prod': np.array(x_input_prod), 'input_user': np.array(x_input_user)}, np.concatenate(y)) seq = Seq(neg_sample=1, common_prods=[]) X_list = [np.zeros((1, NN_word + 1))] for iuser in seq.user_list: x_train, y_train = seq[iuser] prods = x_train['input_prod'][np.array(y_train) == 1] irow = np.zeros((1, NN_word + 1)) irow[0, prods] = 1 X_list.append(irow) X = np.concatenate(X_list) X plt.figure(figsize=(10, 10)) plt.imshow(X)
code
2036189/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from IPython.display import SVG from keras.utils.vis_utils import model_to_dot
code
90124843/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import scipy.stats as st population = np.random.normal(size=10000000, loc=173, scale=10) population_mean = np.round(np.mean(population), 2) print('The population mean is', population_mean, 'and nobody knows this value.')
code
90124843/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import scipy.stats as st population = np.random.normal(size=10000000, loc=173, scale=10) population_mean = np.round(np.mean(population), 2) sample = np.random.choice(population, size=10) print('\nThe sample mean is', np.round(np.mean(sample), 2), '\n\nWe collected the data from the sample and we know this value.')
code
90124843/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import scipy.stats as st import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import scipy.stats as st population = np.random.normal(size=10000000, loc=173, scale=10) population_mean = np.round(np.mean(population), 2) sample = np.random.choice(population, size=10) def confidence_interval(sample, prob): return st.t.interval(prob, len(sample) - 1, loc=np.mean(sample), scale=st.sem(sample)) print('The 0.95 confidence interval generated from this sample is', confidence_interval(sample, prob=0.95))
code
90124843/cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import scipy.stats as st population = np.random.normal(size=10000000, loc=173, scale=10) plt.figure(figsize=(15, 5)) plt.hist(population, bins=100) plt.grid() plt.title('Population distribution') plt.show()
code
90124843/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import scipy.stats as st import numpy as np np.random.seed(0) import matplotlib.pyplot as plt import scipy.stats as st population = np.random.normal(size=10000000, loc=173, scale=10) population_mean = np.round(np.mean(population), 2) sample = np.random.choice(population, size=10) def confidence_interval(sample, prob): return st.t.interval(prob, len(sample) - 1, loc=np.mean(sample), scale=st.sem(sample)) number_of_correct_guesses = 0 for i in range(100): sample = np.random.choice(population, size=10) conf_interval = confidence_interval(sample, prob=0.95) if population_mean >= conf_interval[0] and population_mean <= conf_interval[1]: number_of_correct_guesses += 1 print(f'Out of 100 samples, the confidence intervals for {number_of_correct_guesses} of them contained the population mean.')
code
73067313/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np t = np.arange(0, 11) x = 0.85 ** t plt.figure(figsize=(12, 8)) plt.subplot(2, 2, 1) plt.title('Analog Signal', fontsize=20) plt.plot(t, x, linewidth=3, label='x(t) = (0.85)^t') plt.xlabel('t', fontsize=15) plt.ylabel('amplitude', fontsize=15) plt.legend(loc='upper right') plt.subplot(2, 2, 2) plt.title('Sampling', fontsize=20) plt.plot(t, x, linewidth=3, label='x(t) = (0.85)^t') n = t markerline, stemlines, baseline = plt.stem(n, x, label='x(n) = (0.85)^n') plt.setp(stemlines, 'linewidth', 3) plt.xlabel('n', fontsize=15) plt.ylabel('amplitude', fontsize=15) plt.legend(loc='upper right') plt.subplot(2, 2, 3) plt.title('Quantization', fontsize=20) plt.plot(t, x, linewidth=3) markerline, stemlines, baseline = plt.stem(n, x) plt.setp(stemlines, 'linewidth', 3) plt.xlabel('n', fontsize=15) plt.ylabel('Range of Quantizer', fontsize=15) plt.axhline(y=0.1, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.2, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.3, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.4, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.5, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.6, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.7, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.8, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.9, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=1.0, xmin=0, xmax=10, color='r', linewidth=3.0) plt.subplot(2, 2, 4) plt.title('Quantized Signal', fontsize=20) xq = np.around(x, 1) markerline, stemlines, baseline = plt.stem(n, xq) plt.setp(stemlines, 'linewidth', 3) plt.xlabel('n', fontsize=15) plt.ylabel('Range of Quantizer', fontsize=15) plt.axhline(y=0.1, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.2, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.3, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.4, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.5, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.6, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.7, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.8, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=0.9, xmin=0, xmax=10, color='r', linewidth=3.0) plt.axhline(y=1.0, xmin=0, xmax=10, color='r', linewidth=3.0) plt.tight_layout()
code
73067313/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy import signal import matplotlib.pyplot as plt import numpy as np t = np.arange(0, 11) x = 0.85 ** t n = t markerline, stemlines, baseline = plt.stem(n, x, label='x(n) = (0.85)^n') plt.setp(stemlines, 'linewidth', 3) markerline, stemlines, baseline = plt.stem(n, x) plt.setp(stemlines, 'linewidth', 3) xq = np.around(x, 1) markerline, stemlines, baseline = plt.stem(n, xq) plt.setp(stemlines, 'linewidth', 3) plt.tight_layout() impulse = signal.unit_impulse(10, 'mid') shifted_impulse = signal.unit_impulse(7, 2) t = np.linspace(0, 10, 100) amp = 5 f = 50 x = amp * np.sin(2 * np.pi * f * t) x_ = amp * np.exp(-t) plt.figure(figsize=(10, 6)) plt.subplot(2, 2, 1) plt.plot(np.arange(-5, 5), impulse, linewidth=3, label='Unit impulse function') plt.ylim(-0.01, 1) plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right') plt.subplot(2, 2, 2) plt.plot(shifted_impulse, linewidth=3, label='Shifted Unit impulse function') plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right') plt.subplot(2, 2, 3) plt.plot(t, x, linewidth=3, label='Sine wave') plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right') plt.subplot(2, 2, 4) plt.plot(t, x_, linewidth=3, label='Exponential Signal') plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right') plt.tight_layout()
code
73067313/cell_10
[ "image_output_1.png" ]
from scipy import signal import matplotlib.pyplot as plt import numpy as np t = np.arange(0, 11) x = 0.85 ** t n = t markerline, stemlines, baseline = plt.stem(n, x, label='x(n) = (0.85)^n') plt.setp(stemlines, 'linewidth', 3) markerline, stemlines, baseline = plt.stem(n, x) plt.setp(stemlines, 'linewidth', 3) xq = np.around(x, 1) markerline, stemlines, baseline = plt.stem(n, xq) plt.setp(stemlines, 'linewidth', 3) plt.tight_layout() impulse = signal.unit_impulse(10, 'mid') shifted_impulse = signal.unit_impulse(7, 2) t = np.linspace(0, 10, 100) amp = 5 f = 50 x = amp * np.sin(2 * np.pi * f * t) x_ = amp * np.exp(-t) plt.ylim(-0.01, 1) plt.tight_layout() n = np.linspace(0, 10, 100) amp = 5 f = 50 x = amp * np.sin(2 * np.pi * f * n) x_ = amp * np.exp(-n) plt.figure(figsize=(12, 8)) plt.subplot(2, 2, 1) plt.stem(n, x, 'yo', label='Sine wave') plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right') plt.subplot(2, 2, 2) plt.stem(n, x_, 'yo', label='Exponential Signal') plt.xlabel('time.', fontsize=15) plt.ylabel('Amplitude', fontsize=15) plt.legend(fontsize=10, loc='upper right')
code
128010001/cell_9
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.head(10)
code
128010001/cell_25
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.isna().sum() stationsdf.count() stationsdf['rental_methods'].value_counts()
code
128010001/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False print(DATA_PATH)
code
128010001/cell_23
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() tripsdf.info()
code
128010001/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.countplot(y=tripsdf['gender'])
code
128010001/cell_48
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.set(rc={'figure.figsize': (15, 5)}) sns.countplot(data=tripsdf, x=tripsdf['cust_hour'], hue='gender')
code
128010001/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128010001/cell_19
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count()
code
128010001/cell_18
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.isna().sum() stationsdf.count()
code
128010001/cell_51
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) tripsdf['tripduration'].value_counts()
code
128010001/cell_59
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) tripsdf.sort_values(by='manhattan_distance', ascending=True)
code
128010001/cell_58
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) print(tripsdf['manhattan_distance'].min())
code
128010001/cell_28
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() tripsdf.describe()
code
128010001/cell_15
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum()
code
128010001/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.countplot(y=tripsdf['usertype'])
code
128010001/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() plt.figure(figsize=(15, 20)) sns.countplot(y=tripsdf['birth_year'])
code
128010001/cell_46
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) tripsdf['cust_hour'].value_counts().sort_index()
code
128010001/cell_14
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.isna().sum()
code
128010001/cell_22
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.isna().sum() stationsdf.count() stationsdf.info()
code
128010001/cell_53
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.set(rc={'figure.figsize': (10, 5)}) sns.set(rc={'figure.figsize': (15, 5)}) sns.set_style('whitegrid') sns.displot(data=tripsdf, x=np.log10(tripsdf['tripduration']), kind='hist', aspect=5, hue='gender', palette='Blues').set_axis_labels('log trip_duration') sns.displot(data=tripsdf, x=np.log10(tripsdf['tripduration']), kind='ecdf', aspect=5, hue='gender').set_axis_labels('log trip_duration')
code
128010001/cell_27
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') stationsdf.isna().sum() stationsdf.count() stationsdf.describe()
code
128010001/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233] tripsdf.isna().sum() tripsdf.count() sns.set(rc={'figure.figsize': (15, 30)}) sns.countplot(data=tripsdf, y=tripsdf[tripsdf.cust_age <= 99]['cust_age'], hue='gender', dodge=True)
code
128010001/cell_12
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os if os.path.exists('/kaggle/input'): DATA_PATH = '/kaggle/input/citibike-sampled-data-2013-2017/' on_kaggle = True else: DATA_PATH = './' on_kaggle = False stationsdf = pd.read_csv(DATA_PATH + 'citibike-stations.csv') tripsdf = pd.read_csv(DATA_PATH + 'citibike-trips.csv') tripsdf[tripsdf.start_station_id == 3233]
code
129024960/cell_42
[ "image_output_1.png" ]
grid_result_lasso.best_params_
code
129024960/cell_21
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] df.describe()
code
129024960/cell_56
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef X.columns[coef == 0] # Creating a correlation matrix corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap(corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={"fontsize":7} ) plt.xticks(rotation=45, ha='right', fontsize=7) plt.yticks(fontsize=7) plt.show() def get_correlated_variables(dataset, threshold): corr_columns = set() corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i][j]) > threshold: column_name = corr_matrix.columns[i] corr_columns.add(column_name) return corr_columns corr_features = get_correlated_variables(X, 0.8) corr_features X_final = X[corr_features] X_final.head()
code
129024960/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x='Room_Occupancy_Count', y=feat, data=df, ax=ax) ax.set_xlabel('') ax.set_ylabel(feat) fig.tight_layout()
code
129024960/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] fig, ax = plt.subplots(1, 5, figsize=(10, 5)) plt.subplots_adjust(wspace=0.5) sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') sns.violinplot(data=df['S5_CO2'], ax=ax[4], color='b') ax[4].set_xlabel('S5_CO2') for subplot in ax: subplot.set_xticklabels([]) plt.show()
code
129024960/cell_30
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() sns.pairplot(data=df, hue='Room_Occupancy_Count')
code
129024960/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef X.columns[coef == 0]
code
129024960/cell_55
[ "image_output_1.png" ]
from mlxtend.feature_selection import SequentialFeatureSelector as SFS sffs = SFS(RandomForestClassifier(), k_features=(1, len(X.columns)), forward=True, floating=True, scoring='accuracy', cv=5) sffs.fit(X, y) corr_features = list(sffs.k_feature_names_) corr_features
code
129024960/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.head()
code
129024960/cell_40
[ "text_html_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] scaler = StandardScaler() X_scaled = scaler.fit_transform(X) lasso_reg = Lasso() lasso_reg.fit(X_scaled, y)
code
129024960/cell_48
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap(corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={'fontsize': 7}) plt.xticks(rotation=45, ha='right', fontsize=7) plt.yticks(fontsize=7) plt.show()
code
129024960/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
grid = {'alpha': [0.0001, 0.001, 0.01, 0.1, 1]} cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=42) grid_search_lasso = GridSearchCV(estimator=lasso_reg, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy', error_score=0) grid_result_lasso = grid_search_lasso.fit(X_scaled, y)
code
129024960/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape
code
129024960/cell_49
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] # Creating a correlation matrix corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap(corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={"fontsize":7} ) plt.xticks(rotation=45, ha='right', fontsize=7) plt.yticks(fontsize=7) plt.show() df.shape
code
129024960/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] df.info()
code
129024960/cell_51
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df['S1_Light'], ax=ax[0], color='brown') ax[0].set_xlabel('S1_Light') # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df['S2_Light'], ax=ax[1], color='g') ax[1].set_xlabel('S2_Light') # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df['S3_Light'], ax=ax[2]) ax[2].set_xlabel('S3_Light') # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df['S4_Light'], ax=ax[3], color='y') ax[3].set_xlabel('S4_Light') # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df['S5_CO2'], ax=ax[4], color = 'b') ax[4].set_xlabel('S5_CO2') # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout(); X = df.drop(['Room_Occupancy_Count'], axis=1) y = df[['Room_Occupancy_Count']] grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef X.columns[coef == 0] # Creating a correlation matrix corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap(corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={"fontsize":7} ) plt.xticks(rotation=45, ha='right', fontsize=7) plt.yticks(fontsize=7) plt.show() def get_correlated_variables(dataset, threshold): corr_columns = set() corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i][j]) > threshold: column_name = corr_matrix.columns[i] corr_columns.add(column_name) return corr_columns corr_features = get_correlated_variables(X, 0.8) corr_features
code
129024960/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df['Room_Occupancy_Count'].value_counts().plot(kind='pie')
code
129024960/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()]
code
129024960/cell_43
[ "text_html_output_1.png" ]
grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef
code
129024960/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()] df.drop(columns=['Date', 'Time'], axis=1, inplace=True) df[df.duplicated()] df.head()
code
129024960/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('room_occupancy_estimation_dataset.csv') df.shape df[df.duplicated()]
code
2022682/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show()
code
2022682/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape
code
2022682/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe iris_main.info()
code
2022682/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() X = iris_main.drop(['Id', 'Species'], axis=1) Y = iris_main['Species'] X.head()
code
2022682/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_palette('husl') from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2022682/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe iris_main.describe()
code
2022682/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe iris_main['Species'].value_counts()
code
2022682/cell_15
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() K_range = list(range(1, 26)) scores = [] for k in K_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(Y_test, Y_pred)) logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) print('the accuracy sore for logistic regressiion is : ', metrics.accuracy_score(Y_test, Y_pred))
code
2022682/cell_16
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() K_range = list(range(1, 26)) scores = [] for k in K_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(Y_test, Y_pred)) knn = KNeighborsClassifier(n_neighbors=12) knn.fit(X_train, Y_train) knn.predict([[2, 5, 1, 1.5]])
code
2022682/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main
code
2022682/cell_14
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() K_range = list(range(1, 26)) scores = [] for k in K_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(Y_test, Y_pred)) plt.plot(K_range, scores) plt.xlabel('Accuracy scores') plt.ylabel('value of for knn') plt.title('accuracy scores with respect to each value of K for knn') plt.show()
code
2022682/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe tmp = iris_main.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() X = iris_main.drop(['Id', 'Species'], axis=1) Y = iris_main['Species'] Y.head()
code
2022682/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) iris_main = pd.read_csv('../input/Iris.csv') iris_main.shape iris_main.describe
code
34150026/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.head()
code
34150026/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') id = test['qid'] id.shape
code
34150026/cell_57
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train) score = logit.score(X_train, y_train) y = logit.predict(X_test) from sklearn.metrics import classification_report print('classification_report of X_test data is : ', classification_report(y_test, y))
code
34150026/cell_44
[ "text_plain_output_1.png" ]
from bs4 import BeautifulSoup from keras.preprocessing import text, sequence from nltk.corpus import stopwords from string import punctuation import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) test.rename(columns={'question_text': 'text'}, inplace=True) test.isnull().sum() test.drop('qid', inplace=True, axis=1) stop = set(stopwords.words('english')) punctuation = list(string.punctuation) stop.update(punctuation) def strip_html(text): soup = BeautifulSoup(text, 'html.parser') return soup.get_text() def remove_between_square_brackets(text): return re.sub('\\[[^]]*\\]', '', text) def remove_urls(text): return re.sub('http\\S+', '', text) def remove_hash(text): text = ' '.join((word.strip() for word in re.split('#|_', text))) return text def remove_stopwords(text): final_text = [] for i in text.split(): if i.strip().lower() not in stop: final_text.append(i.strip()) return ' '.join(final_text) def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) text = remove_urls(text) text = remove_hash(text) text = remove_stopwords(text) return text max_features = 20000 maxlen = 100 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(train.text) tokenized_train = tokenizer.texts_to_sequences(train.text) X = sequence.pad_sequences(tokenized_train, maxlen=maxlen) tokenized_test = tokenizer.texts_to_sequences(test.text) sub_test = sequence.pad_sequences(tokenized_test, maxlen=maxlen) sub_test.shape
code
34150026/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.head()
code
34150026/cell_55
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train) score = logit.score(X_train, y_train) y = logit.predict(X_test) from sklearn.metrics import accuracy_score print('accuracy of X_test data is : ', accuracy_score(y_test, y))
code
34150026/cell_26
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') test.rename(columns={'question_text': 'text'}, inplace=True) test.isnull().sum()
code
34150026/cell_61
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') submission.head()
code
34150026/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() sns.barplot(x.index, x) plt.gca().set_ylabel('samples')
code
34150026/cell_60
[ "text_plain_output_1.png" ]
from bs4 import BeautifulSoup from keras.preprocessing import text, sequence from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from string import punctuation import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) test.rename(columns={'question_text': 'text'}, inplace=True) test.isnull().sum() test.drop('qid', inplace=True, axis=1) stop = set(stopwords.words('english')) punctuation = list(string.punctuation) stop.update(punctuation) def strip_html(text): soup = BeautifulSoup(text, 'html.parser') return soup.get_text() def remove_between_square_brackets(text): return re.sub('\\[[^]]*\\]', '', text) def remove_urls(text): return re.sub('http\\S+', '', text) def remove_hash(text): text = ' '.join((word.strip() for word in re.split('#|_', text))) return text def remove_stopwords(text): final_text = [] for i in text.split(): if i.strip().lower() not in stop: final_text.append(i.strip()) return ' '.join(final_text) def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) text = remove_urls(text) text = remove_hash(text) text = remove_stopwords(text) return text max_features = 20000 maxlen = 100 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(train.text) tokenized_train = tokenizer.texts_to_sequences(train.text) X = sequence.pad_sequences(tokenized_train, maxlen=maxlen) tokenized_test = tokenizer.texts_to_sequences(test.text) sub_test = sequence.pad_sequences(tokenized_test, maxlen=maxlen) sub_test.shape from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train) score = logit.score(X_train, y_train) y = logit.predict(X_test) final = logit.predict(sub_test) final.shape
code
34150026/cell_52
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train) score = logit.score(X_train, y_train) score
code
34150026/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34150026/cell_49
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train)
code
34150026/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum()
code
34150026/cell_62
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from keras.preprocessing import text, sequence from nltk.corpus import stopwords from string import punctuation import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) test.rename(columns={'question_text': 'text'}, inplace=True) test.isnull().sum() test.drop('qid', inplace=True, axis=1) stop = set(stopwords.words('english')) punctuation = list(string.punctuation) stop.update(punctuation) def strip_html(text): soup = BeautifulSoup(text, 'html.parser') return soup.get_text() def remove_between_square_brackets(text): return re.sub('\\[[^]]*\\]', '', text) def remove_urls(text): return re.sub('http\\S+', '', text) def remove_hash(text): text = ' '.join((word.strip() for word in re.split('#|_', text))) return text def remove_stopwords(text): final_text = [] for i in text.split(): if i.strip().lower() not in stop: final_text.append(i.strip()) return ' '.join(final_text) def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) text = remove_urls(text) text = remove_hash(text) text = remove_stopwords(text) return text max_features = 20000 maxlen = 100 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(train.text) tokenized_train = tokenizer.texts_to_sequences(train.text) X = sequence.pad_sequences(tokenized_train, maxlen=maxlen) tokenized_test = tokenizer.texts_to_sequences(test.text) sub_test = sequence.pad_sequences(tokenized_test, maxlen=maxlen) test.head()
code
34150026/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train['target'].value_counts()
code
34150026/cell_66
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from keras.preprocessing import text, sequence from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from string import punctuation import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') id = test['qid'] id.shape x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) test.rename(columns={'question_text': 'text'}, inplace=True) test.isnull().sum() test.drop('qid', inplace=True, axis=1) stop = set(stopwords.words('english')) punctuation = list(string.punctuation) stop.update(punctuation) def strip_html(text): soup = BeautifulSoup(text, 'html.parser') return soup.get_text() def remove_between_square_brackets(text): return re.sub('\\[[^]]*\\]', '', text) def remove_urls(text): return re.sub('http\\S+', '', text) def remove_hash(text): text = ' '.join((word.strip() for word in re.split('#|_', text))) return text def remove_stopwords(text): final_text = [] for i in text.split(): if i.strip().lower() not in stop: final_text.append(i.strip()) return ' '.join(final_text) def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) text = remove_urls(text) text = remove_hash(text) text = remove_stopwords(text) return text max_features = 20000 maxlen = 100 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(train.text) tokenized_train = tokenizer.texts_to_sequences(train.text) X = sequence.pad_sequences(tokenized_train, maxlen=maxlen) tokenized_test = tokenizer.texts_to_sequences(test.text) sub_test = sequence.pad_sequences(tokenized_test, maxlen=maxlen) sub_test.shape from sklearn.linear_model import LogisticRegression logit = LogisticRegression(max_iter=1000) logit.fit(X_train, y_train) score = logit.score(X_train, y_train) y = logit.predict(X_test) final = logit.predict(sub_test) final.shape submission = pd.DataFrame({'qid': id, 'prediction': final}) sub = pd.read_csv('samplesubmission.csv') sub.head()
code
34150026/cell_43
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from keras.preprocessing import text, sequence from nltk.corpus import stopwords from string import punctuation import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) stop = set(stopwords.words('english')) punctuation = list(string.punctuation) stop.update(punctuation) def strip_html(text): soup = BeautifulSoup(text, 'html.parser') return soup.get_text() def remove_between_square_brackets(text): return re.sub('\\[[^]]*\\]', '', text) def remove_urls(text): return re.sub('http\\S+', '', text) def remove_hash(text): text = ' '.join((word.strip() for word in re.split('#|_', text))) return text def remove_stopwords(text): final_text = [] for i in text.split(): if i.strip().lower() not in stop: final_text.append(i.strip()) return ' '.join(final_text) def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) text = remove_urls(text) text = remove_hash(text) text = remove_stopwords(text) return text max_features = 20000 maxlen = 100 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(train.text) tokenized_train = tokenizer.texts_to_sequences(train.text) X = sequence.pad_sequences(tokenized_train, maxlen=maxlen) X.shape
code
34150026/cell_31
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') x = train.target.value_counts() train.rename(columns={'question_text': 'text'}, inplace=True) train.isnull().sum() train.drop('qid', inplace=True, axis=1) train.head()
code
34150026/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') test.head()
code
34150026/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/quora-insincere-questions-classification/train.csv') test = pd.read_csv('../input/quora-insincere-questions-classification/test.csv') submission = pd.read_csv('../input/quora-insincere-questions-classification/sample_submission.csv') print('There are {} rows and {} columns in train'.format(train.shape[0], train.shape[1])) print('There are {} rows and {} columns in test'.format(test.shape[0], test.shape[1]))
code