path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068206/cell_41
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) sns.set(rc={'figure.figsize': (13, 13)}) ax = sns.barplot(x=sub_sales_df['index_shop'], y=sub_sales_df['item_cnt_day'], order=sub_sales_df['index_shop'], color='salmon')
code
32068206/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068206/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize': (11.7, 12)}) ax = sns.countplot(y='item_category_id', data=items, order=items['item_category_id'].value_counts(ascending=True).index)
code
32068206/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') item_categories.head(5)
code
32068206/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) print('Count of time blocks: ', len(sales_train['date_block_num'].unique()))
code
32068206/cell_49
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sub_sales_df['index_shop'], y=sub_sales_df['item_cnt_day'], order=sub_sales_df['index_shop'],color="salmon") sns.set(rc={'figure.figsize':(10,10)}) ax = sns.kdeplot(sales_train['item_price'], color="black", shade=True) sns.set(rc={'figure.figsize':(10,10)}) ax = sns.kdeplot(sales_train['item_cnt_day'], color="green", bw=1.5, shade=True) sns.set(rc={'figure.figsize': (12, 10)}) ax = sns.pointplot(sales_train['date'], sales_train['date_block_num'], color='red') ax.set_xlabel('')
code
32068206/cell_28
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') shops.describe()
code
32068206/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items.head(5)
code
32068206/cell_38
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize': (13, 13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color='salmon')
code
32068206/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) print('Count of items overall:', len(sales_train)) print('Count of items < 0:', len(sales_train[sales_train['item_cnt_day'] < 0])) print('Count of items < 10:', len(sales_train[sales_train['item_cnt_day'] < 10])) print('Count of items 10 <= x <= 100:', len(sales_train) - len(sales_train[sales_train['item_cnt_day'] > 100]) - len(sales_train[sales_train['item_cnt_day'] < 10])) print('Count of items > 100:', len(sales_train[sales_train['item_cnt_day'] > 100]))
code
32068206/cell_35
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sales_train.describe()
code
32068206/cell_43
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sub_sales_df['index_shop'], y=sub_sales_df['item_cnt_day'], order=sub_sales_df['index_shop'],color="salmon") sns.set(rc={'figure.figsize': (10, 10)}) ax = sns.kdeplot(sales_train['item_price'], color='black', shade=True)
code
32068206/cell_46
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sub_sales_df['index_shop'], y=sub_sales_df['item_cnt_day'], order=sub_sales_df['index_shop'],color="salmon") sns.set(rc={'figure.figsize':(10,10)}) ax = sns.kdeplot(sales_train['item_price'], color="black", shade=True) sns.set(rc={'figure.figsize': (10, 10)}) ax = sns.kdeplot(sales_train['item_cnt_day'], color='green', bw=1.5, shade=True)
code
32068206/cell_14
[ "image_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') print('Count of categories with count of items < 100 is', len(list(filter(lambda x: x < 100, items['item_category_id'].value_counts(ascending=True)))))
code
32068206/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items.describe()
code
32068206/cell_27
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') shops.info()
code
32068206/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sales_train.groupby('shop_id').mean()
code
32068206/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') print('Count of categories with count of items > 1000 is', len(list(filter(lambda x: x > 1000, items['item_category_id'].value_counts(ascending=True)))))
code
122259364/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd sample = pd.read_csv('sample_solution.csv') test_data = pd.read_csv('test_data.csv', index_col=0) train_data = pd.read_csv('train_data.csv', index_col=0) sample.head()
code
329837/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.decomposition import PCA import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) from sklearn.decomposition import PCA dums = df[chars].select_dtypes(include=['bool']).astype(float) dums = dums.join(pd.get_dummies(df[[i for i in chars if i not in dums.columns.values]])) from sklearn.decomposition import PCA pca = PCA(n_components=2) components = pca.fit_transform(dums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) components.plot(kind='bar', figsize=(12, 4))
code
329837/cell_4
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) print('It looks like {}% of the characteristics might be related to one another.'.format(len(flags) / len(chars) * 100))
code
329837/cell_6
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) from sklearn.decomposition import PCA dums = df[chars].select_dtypes(include=['bool']).astype(float) dums = dums.join(pd.get_dummies(df[[i for i in chars if i not in dums.columns.values]])) print('Before PCA the full size of the characteristics is {} features'.format(len(dums.columns.values)))
code
329837/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') print(df.head())
code
329837/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.decomposition import PCA import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) from sklearn.decomposition import PCA dums = df[chars].select_dtypes(include=['bool']).astype(float) dums = dums.join(pd.get_dummies(df[[i for i in chars if i not in dums.columns.values]])) from sklearn.decomposition import PCA pca = PCA(n_components=2) components = pca.fit_transform(dums) print(pca.explained_variance_ratio_)
code
318221/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment['gid'].value_counts()
code
318221/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment[(comment.gid == '117291968282998') & (comment.rid == '')]['name'].value_counts().head(10)
code
318221/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment[(comment.gid == '117291968282998') & (comment.rid != '')]['rname'].value_counts().head(10)
code
318221/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') rmember.head(3)
code
106205992/cell_9
[ "text_html_output_2.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] target = pd.DataFrame(y, columns=[f'target_{idx + 1}' for idx in range(y.shape[1])]) y_names = target.columns.tolist() train = pd.concat([train, target], axis=1) splits = RandomSplitter(valid_pct=0.2)(range_of(train)) to = TabularPandas(train, procs=[Categorify, Normalize], cat_names=cat_cols, cont_names=num_cols, y_names=y_names, splits=splits) dls = to.dataloaders(bs=16).to('cuda') xb_cat, xb_cont, yb = dls.one_batch() (xb_cat.shape, xb_cont.shape, yb.shape)
code
106205992/cell_4
[ "text_plain_output_1.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] target = pd.DataFrame(y, columns=[f'target_{idx + 1}' for idx in range(y.shape[1])]) y_names = target.columns.tolist() target.head(2)
code
106205992/cell_11
[ "text_html_output_1.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] target = pd.DataFrame(y, columns=[f'target_{idx + 1}' for idx in range(y.shape[1])]) y_names = target.columns.tolist() train = pd.concat([train, target], axis=1) splits = RandomSplitter(valid_pct=0.2)(range_of(train)) to = TabularPandas(train, procs=[Categorify, Normalize], cat_names=cat_cols, cont_names=num_cols, y_names=y_names, splits=splits) dls = to.dataloaders(bs=16).to('cuda') xb_cat, xb_cont, yb = dls.one_batch() (xb_cat.shape, xb_cont.shape, yb.shape) learn = tabular_learner(dls, metrics=[accuracy_multi], loss_func=BCEWithLogitsLossFlat()) learn.lr_find() learn.fit_one_cycle(5, 0.001)
code
106205992/cell_3
[ "text_html_output_1.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] train.head(2)
code
106205992/cell_10
[ "text_html_output_1.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] target = pd.DataFrame(y, columns=[f'target_{idx + 1}' for idx in range(y.shape[1])]) y_names = target.columns.tolist() train = pd.concat([train, target], axis=1) splits = RandomSplitter(valid_pct=0.2)(range_of(train)) to = TabularPandas(train, procs=[Categorify, Normalize], cat_names=cat_cols, cont_names=num_cols, y_names=y_names, splits=splits) dls = to.dataloaders(bs=16).to('cuda') xb_cat, xb_cont, yb = dls.one_batch() (xb_cat.shape, xb_cont.shape, yb.shape) learn = tabular_learner(dls, metrics=[accuracy_multi], loss_func=BCEWithLogitsLossFlat()) learn.lr_find()
code
106205992/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
train = pd.DataFrame(X, columns=[f'feat_{idx + 1}' for idx in range(X.shape[1])]) num_cols = train.columns.tolist() categories = ['a'] * 50 + ['b'] * 25 + ['c'] * 25 random.shuffle(categories) train['feat_5'] = categories cat_cols = ['feat_5'] target = pd.DataFrame(y, columns=[f'target_{idx + 1}' for idx in range(y.shape[1])]) y_names = target.columns.tolist() train = pd.concat([train, target], axis=1) train.head(2)
code
32065262/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape
code
32065262/cell_57
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot() sales.groupby('Product Type')['Total Net Sales'].sum().sort_values().plot.barh() sales.groupby('Product Type')['Net Quantity'].sum().sort_values(ascending=False).head(3)
code
32065262/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales['Total Net Sales'].sort_values(ascending=False).head(10)
code
32065262/cell_44
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum()
code
32065262/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales['Returns'].loc['Basket']
code
32065262/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.head()
code
32065262/cell_29
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum()
code
32065262/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales['Total Net Sales'].max()
code
32065262/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales['Total Net Sales'].idxmax()
code
32065262/cell_61
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot() sales.groupby('Product Type')['Total Net Sales'].sum().sort_values().plot.barh() sales.groupby('Product Type')['Net Quantity'].sum().sort_values(ascending=False).head(3) a = sales.groupby('Product Type').sum() (a['Discounts'] / a['Net Quantity']).round(2).sort_values() a = sales.groupby('Product Type').sum() a
code
32065262/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns
code
32065262/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot()
code
32065262/cell_52
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot() sales.groupby('Product Type').size().plot.pie()
code
32065262/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32065262/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values()
code
32065262/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket']
code
32065262/cell_59
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot() sales.groupby('Product Type')['Total Net Sales'].sum().sort_values().plot.barh() sales.groupby('Product Type')['Net Quantity'].sum().sort_values(ascending=False).head(3) a = sales.groupby('Product Type').sum() (a['Discounts'] / a['Net Quantity']).round(2).sort_values()
code
32065262/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.tail(10)
code
32065262/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales['Returns']
code
32065262/cell_47
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size()
code
32065262/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean()
code
32065262/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales[['Gross Sales', 'Returns']].loc['Basket']
code
32065262/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns']
code
32065262/cell_53
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales.groupby('Product Type').sum() sales.groupby('Product Type')['Returns'].sum().sort_values() sales.groupby('Product Type').size() sales.groupby('Product Type').size().plot() sales.groupby('Product Type')['Total Net Sales'].sum().sort_values().plot.barh()
code
32065262/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum()
code
32065262/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sales = pd.read_csv('/kaggle/input/retail-business-sales-20172019/business.retailsales.csv', index_col=0) sales.columns sales.shape sales.loc['Basket'] sales.loc['Basket']['Returns'] sales.sum() sales.loc['Basket'].sum() sales.loc['Basket'].mean() sales['Total Net Sales'].sort_values(ascending=False).head(10)
code
33119952/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50239348/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') train.shape
code
50239348/cell_19
[ "text_html_output_1.png" ]
import numpy as np import os import pandas as pd import random import tensorflow as tf import tensorflow_addons as tfa train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') train.shape Y_train = train['label'] X_train = train.drop('label', axis=1).values test = test.values RANDOM_STATE = 75 def random_seed(seed): random.seed(RANDOM_STATE) os.environ['PYTHONHASHSEED'] = str(RANDOM_STATE) np.random.seed(RANDOM_STATE) tf.random.set_seed(RANDOM_STATE) def show_image(image): plt.rcParams['axes.grid'] = False plt.axis('off') AUTOTUNE = tf.data.experimental.AUTOTUNE EPOCHS = 25 SHUFFLE_BUFFER_SIZE = 5000 BATCH_SIZE = 64 @tf.function def reshape(image, label): return (tf.reshape(image, (28, 28, 1)), label) @tf.function def normalize(image, label): image = tf.cast(image, tf.float32) return (image / 255.0, label) @tf.function def rotate_tf(image): random_angles = tf.random.uniform(shape=(), minval=-np.pi / 15, maxval=np.pi / 15) return tfa.image.rotate(image, random_angles) @tf.function def augment(image, label): image = rotate_tf(image) return (image, label) def get_data_set(images, labels, is_training=False): dataset = tf.data.Dataset.from_tensor_slices((images, labels)) dataset = dataset.map(reshape, num_parallel_calls=AUTOTUNE) dataset = dataset.map(normalize, num_parallel_calls=AUTOTUNE) if is_training: dataset = dataset.map(augment, num_parallel_calls=AUTOTUNE) dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTOTUNE) return dataset train_dataset = get_data_set(X_train, y_train, is_training=True) test_dataset = get_data_set(X_val, y_val) sample_x, sample_y = next(iter(train_dataset)) show_image(sample_x[0][:, :, 0])
code
50239348/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import random import tensorflow as tf import tensorflow_addons as tfa train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') train.shape Y_train = train['label'] X_train = train.drop('label', axis=1).values test = test.values RANDOM_STATE = 75 def random_seed(seed): random.seed(RANDOM_STATE) os.environ['PYTHONHASHSEED'] = str(RANDOM_STATE) np.random.seed(RANDOM_STATE) tf.random.set_seed(RANDOM_STATE) AUTOTUNE = tf.data.experimental.AUTOTUNE EPOCHS = 25 SHUFFLE_BUFFER_SIZE = 5000 BATCH_SIZE = 64 @tf.function def reshape(image, label): return (tf.reshape(image, (28, 28, 1)), label) @tf.function def normalize(image, label): image = tf.cast(image, tf.float32) return (image / 255.0, label) @tf.function def rotate_tf(image): random_angles = tf.random.uniform(shape=(), minval=-np.pi / 15, maxval=np.pi / 15) return tfa.image.rotate(image, random_angles) @tf.function def augment(image, label): image = rotate_tf(image) return (image, label) def get_data_set(images, labels, is_training=False): dataset = tf.data.Dataset.from_tensor_slices((images, labels)) dataset = dataset.map(reshape, num_parallel_calls=AUTOTUNE) dataset = dataset.map(normalize, num_parallel_calls=AUTOTUNE) if is_training: dataset = dataset.map(augment, num_parallel_calls=AUTOTUNE) dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTOTUNE) return dataset train_dataset = get_data_set(X_train, y_train, is_training=True) test_dataset = get_data_set(X_val, y_val) sample_x, sample_y = next(iter(train_dataset)) print(sample_x.shape) print(sample_y.shape)
code
50239348/cell_3
[ "text_plain_output_1.png" ]
import tensorflow as tf print(tf.__version__)
code
50239348/cell_5
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') train.head()
code
73072014/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import binom from scipy.stats import norm from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); # import bernoulli from scipy.stats import bernoulli # generate bernoulli data sample_size = 100000 param_p = 0.3 # probability of sucess data_bern = bernoulli.rvs(size=sample_size,p=param_p) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_bern[0:5]) # Create the Plot ax= sns.distplot(data_bern, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Bernoulli Distribution: Sample Size = {sample_size}, p={param_p}'); ax.legend(); from scipy.stats import binom # Generate Binomial Data sample_size = 10000 param_n = 10 # number of trials param_p = 0.7 # probability of success in one trial data_binom = binom.rvs(size=sample_size, n=param_n,p=param_p,) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_binom[0:5]) # Create the Plot ax = sns.distplot(data_binom, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Binomial Distribution: n={param_n} ,p={param_p}') ax.legend(); from scipy.stats import poisson sample_size = 10000 param_mu = 3 data_poisson = poisson.rvs(size=sample_size, mu=param_mu) print('The first 5 values from this distribution:') print(data_poisson[0:5]) ax = sns.distplot(data_poisson, kde=False, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Poisson Distribution: Sample Size = {sample_size}, mu={param_mu}') ax.legend()
code
73072014/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) from scipy.stats import uniform sample_size = 10000 param_loc = 5 param_scale = 10 data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) print('The first 5 values from this distribution:') print(data_uniform[0:5]) ax = sns.distplot(data_uniform, bins=100, kde_kws={'label': 'KDE'}, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}')
code
73072014/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import norm from scipy.stats import norm from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); from scipy.stats import norm data_normal = norm.rvs(size=100, loc=3, scale=2) print(data_normal)
code
73072014/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import beta from scipy.stats import beta from scipy.stats import binom from scipy.stats import norm from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); # import bernoulli from scipy.stats import bernoulli # generate bernoulli data sample_size = 100000 param_p = 0.3 # probability of sucess data_bern = bernoulli.rvs(size=sample_size,p=param_p) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_bern[0:5]) # Create the Plot ax= sns.distplot(data_bern, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Bernoulli Distribution: Sample Size = {sample_size}, p={param_p}'); ax.legend(); from scipy.stats import binom # Generate Binomial Data sample_size = 10000 param_n = 10 # number of trials param_p = 0.7 # probability of success in one trial data_binom = binom.rvs(size=sample_size, n=param_n,p=param_p,) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_binom[0:5]) # Create the Plot ax = sns.distplot(data_binom, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Binomial Distribution: n={param_n} ,p={param_p}') ax.legend(); from scipy.stats import poisson # Generate Poisson Data sample_size = 10000 param_mu = 3 #(rate of events per time, often denoted lambda) data_poisson = poisson.rvs(size=sample_size, mu=param_mu) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_poisson[0:5]) # Create the Plot ax = sns.distplot(data_poisson, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Poisson Distribution: Sample Size = {sample_size}, mu={param_mu}'); ax.legend(); from scipy.stats import beta # Generate beta Data sample_size = 100000 param_a = 1 param_b = 5 data_beta = beta.rvs(param_a, param_b, size=sample_size) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_beta[0:5]) # Create the Plot ax = sns.distplot(data_beta, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Beta({param_a},{param_b}) Distribution: Sample Size = {sample_size}'); ax.legend(); from scipy.stats import beta sample_size = 100000 param_a = 0.5 param_b = 0.5 data_beta = beta.rvs(param_a, param_b, size=sample_size) print('The first 5 values from this distribution:') print(data_beta[0:5]) ax = sns.distplot(data_beta, kde_kws={'label': 'KDE'}, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Beta({param_a},{param_b}) Distribution: Sample Size = {sample_size}') ax.legend()
code
73072014/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import norm from scipy.stats import norm from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); from scipy.stats import bernoulli sample_size = 100000 param_p = 0.3 data_bern = bernoulli.rvs(size=sample_size, p=param_p) print('The first 5 values from this distribution:') print(data_bern[0:5]) ax = sns.distplot(data_bern, kde=False, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Bernoulli Distribution: Sample Size = {sample_size}, p={param_p}') ax.legend()
code
73072014/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import binom from scipy.stats import norm from scipy.stats import norm from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); # import bernoulli from scipy.stats import bernoulli # generate bernoulli data sample_size = 100000 param_p = 0.3 # probability of sucess data_bern = bernoulli.rvs(size=sample_size,p=param_p) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_bern[0:5]) # Create the Plot ax= sns.distplot(data_bern, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Bernoulli Distribution: Sample Size = {sample_size}, p={param_p}'); ax.legend(); from scipy.stats import binom sample_size = 10000 param_n = 10 param_p = 0.7 data_binom = binom.rvs(size=sample_size, n=param_n, p=param_p) print('The first 5 values from this distribution:') print(data_binom[0:5]) ax = sns.distplot(data_binom, kde=False, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Binomial Distribution: n={param_n} ,p={param_p}') ax.legend()
code
73072014/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import beta from scipy.stats import binom from scipy.stats import norm from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm # generate random numbers from a normal distribution sample_size = 100 param_loc = 3 # mean param_scale = 2 # standard deviation data_normal = norm.rvs(size=sample_size,loc=param_loc,scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_normal[0:5]) # plot a histogram of the output ax = sns.distplot(data_normal, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}'); # import bernoulli from scipy.stats import bernoulli # generate bernoulli data sample_size = 100000 param_p = 0.3 # probability of sucess data_bern = bernoulli.rvs(size=sample_size,p=param_p) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_bern[0:5]) # Create the Plot ax= sns.distplot(data_bern, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Bernoulli Distribution: Sample Size = {sample_size}, p={param_p}'); ax.legend(); from scipy.stats import binom # Generate Binomial Data sample_size = 10000 param_n = 10 # number of trials param_p = 0.7 # probability of success in one trial data_binom = binom.rvs(size=sample_size, n=param_n,p=param_p,) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_binom[0:5]) # Create the Plot ax = sns.distplot(data_binom, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Binomial Distribution: n={param_n} ,p={param_p}') ax.legend(); from scipy.stats import poisson # Generate Poisson Data sample_size = 10000 param_mu = 3 #(rate of events per time, often denoted lambda) data_poisson = poisson.rvs(size=sample_size, mu=param_mu) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_poisson[0:5]) # Create the Plot ax = sns.distplot(data_poisson, kde=False, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Poisson Distribution: Sample Size = {sample_size}, mu={param_mu}'); ax.legend(); from scipy.stats import beta sample_size = 100000 param_a = 1 param_b = 5 data_beta = beta.rvs(param_a, param_b, size=sample_size) print('The first 5 values from this distribution:') print(data_beta[0:5]) ax = sns.distplot(data_beta, kde_kws={'label': 'KDE'}, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Beta({param_a},{param_b}) Distribution: Sample Size = {sample_size}') ax.legend()
code
73072014/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import norm from scipy.stats import uniform import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set(rc={'figure.figsize': (9.5, 5)}) # import uniform distribution from scipy.stats import uniform # generate random numbers from a uniform distribution sample_size = 10000 param_loc = 5 # left-hand endpoint of the domain interval param_scale = 10 # width of the domain interval data_uniform = uniform.rvs(size=sample_size, loc=param_loc, scale=param_scale) # print a few values from the distribution: print('The first 5 values from this distribution:') print(data_uniform[0:5]) # plot a historgram of the output ax = sns.distplot(data_uniform, bins=100, kde_kws={"label": "KDE"}, hist_kws={"label": "Histogram"}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Uniform Distribution: Sample Size = {sample_size}. loc={param_loc}, scale={param_scale}'); from scipy.stats import norm sample_size = 100 param_loc = 3 param_scale = 2 data_normal = norm.rvs(size=sample_size, loc=param_loc, scale=param_scale) print('The first 5 values from this distribution:') print(data_normal[0:5]) ax = sns.distplot(data_normal, bins=100, kde_kws={'label': 'KDE'}, hist_kws={'label': 'Histogram'}) ax.set(xlabel='x ', ylabel='Frequency', title=f'Normal Distribution: Sample Size = {sample_size}, loc={param_loc}, scale={param_scale}')
code
106194764/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import gc import numpy as np # linear algebra import os import pandas as pd import shutil import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import numpy as np import pandas as pd import gc from tqdm import tqdm cols = {'countryCode': np.float32, 'c2': np.float32, 'c4': np.float32, 'size': np.float32, 'mediationProviderVersion': np.int32, 'bidFloorPrice': np.float32, 'sentPrice': np.float32, 'winBid': np.float32, 'c1std_bidWin': np.float32, 'c1max_bidWin': np.float32, 'c1min_bidWin': np.float32, 'c1mean_bidWin': np.float32, 'c1median_bidWin': np.float32, 'c1sem_bidWin': np.float32, 'c1var_bidWin': np.float32, 'deviceIdstd_bidWin': np.float32, 'deviceIdmax_bidWin': np.float32, 'deviceIdmin_bidWin': np.float32, 'deviceIdmean_bidWin': np.float32, 'deviceIdmedian_bidWin': np.float32, 'deviceIdsem_bidWin': np.float32, 'deviceIdvar_bidWin': np.float32, 'unitDisplayTypestd_bidWin': np.float32, 'unitDisplayTypemax_bidWin': np.float32, 'unitDisplayTypemean_bidWin': np.float32, 'unitDisplayTypemedian_bidWin': np.float32, 'unitDisplayTypesem_bidWin': np.float32, 'unitDisplayTypevar_bidWin': np.float32, 'bundleIdstd_bidWin': np.float32, 'bundleIdmax_bidWin': np.float32, 'bundleIdmin_bidWin': np.float32, 'bundleIdmean_bidWin': np.float32, 'bundleIdmedian_bidWin': np.float32, 'bundleIdsem_bidWin': np.float32, 'bundleIdvar_bidWin': np.float32, 'ver': np.float32, 'unitDisplayType_0': np.uint8, 'unitDisplayType_1': np.uint8, 'unitDisplayType_2': np.uint8, 'connectionType_0': np.uint8, 'connectionType_1': np.uint8, 'connectionType_2': np.uint8, 'connectionType_3': np.uint8, 'bundleId_0': np.uint8, 'bundleId_1': np.uint8, 'bundleId_2': np.uint8, 'bundleId_3': np.uint8, 'bundleId_4': np.uint8, 'bundleId_5': np.uint8, 'bundleId_6': np.uint8, 'bundleId_7': np.uint8, 'bundleId_8': np.uint8, 'bundleId_9': np.uint8, 'bundleId_10': np.uint8, 'bundleId_11': np.uint8, 'bundleId_12': np.uint8, 'bundleId_13': np.uint8, 'bundleId_14': np.uint8, 'bundleId_15': np.uint8, 'bundleId_16': np.uint8, 'bundleId_17': np.uint8} for x in range(50): cols[f'c1_{x}'] = np.uint8 df = pd.read_csv('../input/fork-of-eda-item-price/prepared_train.csv', float_precision='round_trip', dtype=cols) s = df.deviceIdstd_bidWin.mean() df['deviceIdstd_bidWin'].fillna(s, inplace=True) s = df.deviceIdvar_bidWin.mean() df['deviceIdvar_bidWin'].fillna(s, inplace=True) s = df.deviceIdsem_bidWin.mean() df['deviceIdsem_bidWin'].fillna(s, inplace=True) train, val = train_test_split(df, test_size=0.33, random_state=42) len(train.columns) import tensorflow as tf import glob, math from IPython.display import clear_output class Auction_Sequence(tf.keras.utils.Sequence): def __init__(self, data, batch_size): self.data = data self.batch_size = batch_size def __len__(self): return math.ceil(len(self.data) / self.batch_size) def __getitem__(self, idx): dc = ['winBid'] batch = self.data[idx * self.batch_size:(idx + 1) * self.batch_size] batch_x = batch.drop(dc, axis=1) batch_y = batch['winBid'] return (batch_x.values, batch_y.values) def on_epoch_end(self): self.data = self.data.sample(frac=1, ignore_index=True, random_state=11) train_seq = Auction_Sequence(train, 4096) test_seq = Auction_Sequence(val, 4096) def get_model(size): tf.random.set_seed(11) np.random.seed(11) activation = tf.keras.layers.LeakyReLU(alpha=0.3) feature = tf.keras.Input(shape=(size,)) hidden_state = tf.keras.layers.Dense(128, activation=activation, name='ind')(feature) hidden_state = tf.keras.layers.Dense(64, activation=activation)(hidden_state) hidden_state = tf.keras.layers.Dense(32, activation=activation)(hidden_state) hidden_state = tf.keras.layers.Dense(16, activation=activation)(hidden_state) hidden_state = tf.keras.layers.Dense(8, activation=activation)(hidden_state) hidden_state = tf.keras.layers.Dense(4, activation=activation)(hidden_state) outputs = tf.keras.layers.Dense(1, activation=None)(hidden_state) model = tf.keras.models.Model(inputs=feature, outputs=outputs) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='mse') return model from tqdm import tqdm from sklearn.metrics import f1_score import shutil import os path = f'./models/train_model/' os.makedirs(path, exist_ok=True) model = get_model(len(train.columns) - 1) checkpoint = tf.keras.callbacks.ModelCheckpoint(path + 'model-{val_loss:03f}-{epoch:03d}-.h5', verbose=1, monitor='val_loss', save_weights_only=True, save_best_only=True, mode='min') model.fit(train_seq, epochs=100, validation_data=test_seq, workers=-1, max_queue_size=10, use_multiprocessing=True, callbacks=[checkpoint], verbose=2) files = glob.glob(os.path.expanduser(f'{path}*')) best_weight = sorted(files, key=os.path.getmtime)[-1] print(best_weight) model.load_weights(best_weight) shutil.rmtree(path) score = float(best_weight.split('-')[1]) model.save(f'train_{score}.h5') del model, best_weight gc.collect() tf.keras.backend.clear_session()
code
106194764/cell_4
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import numpy as np import pandas as pd import gc from tqdm import tqdm cols = {'countryCode': np.float32, 'c2': np.float32, 'c4': np.float32, 'size': np.float32, 'mediationProviderVersion': np.int32, 'bidFloorPrice': np.float32, 'sentPrice': np.float32, 'winBid': np.float32, 'c1std_bidWin': np.float32, 'c1max_bidWin': np.float32, 'c1min_bidWin': np.float32, 'c1mean_bidWin': np.float32, 'c1median_bidWin': np.float32, 'c1sem_bidWin': np.float32, 'c1var_bidWin': np.float32, 'deviceIdstd_bidWin': np.float32, 'deviceIdmax_bidWin': np.float32, 'deviceIdmin_bidWin': np.float32, 'deviceIdmean_bidWin': np.float32, 'deviceIdmedian_bidWin': np.float32, 'deviceIdsem_bidWin': np.float32, 'deviceIdvar_bidWin': np.float32, 'unitDisplayTypestd_bidWin': np.float32, 'unitDisplayTypemax_bidWin': np.float32, 'unitDisplayTypemean_bidWin': np.float32, 'unitDisplayTypemedian_bidWin': np.float32, 'unitDisplayTypesem_bidWin': np.float32, 'unitDisplayTypevar_bidWin': np.float32, 'bundleIdstd_bidWin': np.float32, 'bundleIdmax_bidWin': np.float32, 'bundleIdmin_bidWin': np.float32, 'bundleIdmean_bidWin': np.float32, 'bundleIdmedian_bidWin': np.float32, 'bundleIdsem_bidWin': np.float32, 'bundleIdvar_bidWin': np.float32, 'ver': np.float32, 'unitDisplayType_0': np.uint8, 'unitDisplayType_1': np.uint8, 'unitDisplayType_2': np.uint8, 'connectionType_0': np.uint8, 'connectionType_1': np.uint8, 'connectionType_2': np.uint8, 'connectionType_3': np.uint8, 'bundleId_0': np.uint8, 'bundleId_1': np.uint8, 'bundleId_2': np.uint8, 'bundleId_3': np.uint8, 'bundleId_4': np.uint8, 'bundleId_5': np.uint8, 'bundleId_6': np.uint8, 'bundleId_7': np.uint8, 'bundleId_8': np.uint8, 'bundleId_9': np.uint8, 'bundleId_10': np.uint8, 'bundleId_11': np.uint8, 'bundleId_12': np.uint8, 'bundleId_13': np.uint8, 'bundleId_14': np.uint8, 'bundleId_15': np.uint8, 'bundleId_16': np.uint8, 'bundleId_17': np.uint8} for x in range(50): cols[f'c1_{x}'] = np.uint8 df = pd.read_csv('../input/fork-of-eda-item-price/prepared_train.csv', float_precision='round_trip', dtype=cols) s = df.deviceIdstd_bidWin.mean() df['deviceIdstd_bidWin'].fillna(s, inplace=True) s = df.deviceIdvar_bidWin.mean() df['deviceIdvar_bidWin'].fillna(s, inplace=True) s = df.deviceIdsem_bidWin.mean() df['deviceIdsem_bidWin'].fillna(s, inplace=True) train, val = train_test_split(df, test_size=0.33, random_state=42) df.isnull().sum().sum()
code
106194764/cell_5
[ "text_plain_output_1.png" ]
len(train.columns)
code
16131921/cell_9
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/insurance.csv') data.describe().T num_data = data.select_dtypes(include=np.number) cat_data = data.select_dtypes(exclude=np.number) encode_cat_data = pd.get_dummies(cat_data) fin_df = [num_data, encode_cat_data] fin_data = pd.concat(fin_df, axis=1) fin_data.head()
code
16131921/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/insurance.csv') data.info()
code
16131921/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/insurance.csv') data.describe().T graphs = sns.pairplot(data) graphs.set()
code
16131921/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
!pwd !ls /kaggle/input import seaborn as sns import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os
code
16131921/cell_11
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/insurance.csv') data.describe().T graphs = sns.pairplot(data) graphs.set() num_data = data.select_dtypes(include=np.number) cat_data = data.select_dtypes(exclude=np.number) encode_cat_data = pd.get_dummies(cat_data) fin_df = [num_data, encode_cat_data] fin_data = pd.concat(fin_df, axis=1) graphs = sns.pairplot(fin_data) graphs.set() boxP = sns.boxplot(data=fin_data.age, orient='h', color='red')
code
16131921/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16131921/cell_10
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/insurance.csv') data.describe().T graphs = sns.pairplot(data) graphs.set() num_data = data.select_dtypes(include=np.number) cat_data = data.select_dtypes(exclude=np.number) encode_cat_data = pd.get_dummies(cat_data) fin_df = [num_data, encode_cat_data] fin_data = pd.concat(fin_df, axis=1) graphs = sns.pairplot(fin_data) graphs.set()
code
16131921/cell_12
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/insurance.csv') data.describe().T graphs = sns.pairplot(data) graphs.set() num_data = data.select_dtypes(include=np.number) cat_data = data.select_dtypes(exclude=np.number) encode_cat_data = pd.get_dummies(cat_data) fin_df = [num_data, encode_cat_data] fin_data = pd.concat(fin_df, axis=1) graphs = sns.pairplot(fin_data) graphs.set() boxP = sns.boxplot(data = fin_data.age ,orient = 'h' ,color = 'red') fin_data.corr()
code
16131921/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/insurance.csv') data.describe().T
code
16136283/cell_21
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000]
code
16136283/cell_13
[ "text_html_output_1.png" ]
(y_train[0], X_train[0])
code
16136283/cell_25
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000] X_train_vectorized = vect.transform(X_train) X_train_vectorized X_train_vectorized.shape
code
16136283/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv') df.info()
code
16136283/cell_30
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv') df.dropna(inplace=True) df = df[df['Rating'] != 3] df['Positively Rated'] = np.where(df['Rating'] > 3, 1, 0) X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000] X_train_vectorized = vect.transform(X_train) X_train_vectorized X_train_vectorized.shape from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train_vectorized, y_train) pred = model.predict(vect.transform(X_test)) feature_names = np.array(vect.get_feature_names()) sorted_coef_index = model.coef_[0].argsort() print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]])) print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
code
16136283/cell_20
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10]
code
16136283/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv') df['Brand Name'].value_counts().head()
code
16136283/cell_26
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000] X_train_vectorized = vect.transform(X_train) X_train_vectorized X_train_vectorized.shape from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train_vectorized, y_train)
code
16136283/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv') df.dropna(inplace=True) df = df[df['Rating'] != 3] df['Positively Rated'].mean()
code
16136283/cell_19
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names())
code
16136283/cell_18
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) vect
code
16136283/cell_28
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000] X_train_vectorized = vect.transform(X_train) X_train_vectorized X_train_vectorized.shape from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train_vectorized, y_train) pred = model.predict(vect.transform(X_test)) from sklearn.metrics import roc_auc_score roc_auc_score(y_test, pred)
code
16136283/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv') df.head()
code
16136283/cell_24
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer X_train.shape from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer().fit(X_train) len(vect.get_feature_names()) vect.get_feature_names()[0:10] vect.get_feature_names()[::3000] X_train_vectorized = vect.transform(X_train) X_train_vectorized
code