path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128023684/cell_26
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') md = pd.DataFrame(df_combined[['cust_id', 'prod_cat', 'prod_subcat', 'tran_date']].value_counts()) combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) import operator sorted_x = {k: v for k, v in sorted(contribution_dictionary.items(), key=lambda item: item[1], reverse=True)} df_pie = pd.DataFrame({'item and channel': list(sorted_x.keys()), 'contribution': list(sorted_x.values())}, index=list(sorted_x.keys())) df_pie
code
128023684/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_combined[['cust_id', 'prod_cat', 'prod_subcat', 'tran_date']].value_counts()
code
128023684/cell_19
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) contribution_dictionary
code
128023684/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128023684/cell_32
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') md = pd.DataFrame(df_combined[['cust_id', 'prod_cat', 'prod_subcat', 'tran_date']].value_counts()) combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) import operator sorted_x = {k: v for k, v in sorted(contribution_dictionary.items(), key=lambda item: item[1], reverse=True)} df_pie = pd.DataFrame({'item and channel': list(sorted_x.keys()), 'contribution': list(sorted_x.values())}, index=list(sorted_x.keys())) channel_and_product_wise = {} for i in range(df_pie.shape[0]): df_part = df_combined[(df_combined.prod_cat == str(df_pie[['item and channel']].values.tolist()[i][0][0])) & (df_combined.Store_type == str(df_pie[['item and channel']].values.tolist()[i][0][1]))] channel_and_product_wise[df_pie[['item and channel']].values.tolist()[i][0][0] + ',' + df_pie[['item and channel']].values.tolist()[i][0][1]] = np.round(df_part.groupby('cust_id')['total_amt'].sum(), 2).nlargest(100).to_dict() df_second.keys() df_second.sample(10) user_lists = [] for key, values in channel_and_product_wise.items(): user_lists = list(values.keys()) df_second_part = df_second[df_second.customer_Id.isin(user_lists)].reset_index() print(df_second_part.age.value_counts())
code
128023684/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') md = pd.DataFrame(df_combined[['cust_id', 'prod_cat', 'prod_subcat', 'tran_date']].value_counts()) combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) import operator sorted_x = {k: v for k, v in sorted(contribution_dictionary.items(), key=lambda item: item[1], reverse=True)} df_pie = pd.DataFrame({'item and channel': list(sorted_x.keys()), 'contribution': list(sorted_x.values())}, index=list(sorted_x.keys())) channel_and_product_wise = {} for i in range(df_pie.shape[0]): print(df_pie[['item and channel']].values.tolist()[i][0][0], '&', df_pie[['item and channel']].values.tolist()[i][0][1]) df_part = df_combined[(df_combined.prod_cat == str(df_pie[['item and channel']].values.tolist()[i][0][0])) & (df_combined.Store_type == str(df_pie[['item and channel']].values.tolist()[i][0][1]))] channel_and_product_wise[df_pie[['item and channel']].values.tolist()[i][0][0] + ',' + df_pie[['item and channel']].values.tolist()[i][0][1]] = np.round(df_part.groupby('cust_id')['total_amt'].sum(), 2).nlargest(100).to_dict()
code
128023684/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') cust_id = 272098 tran_date = '13-05-2013' df_combined[(df_combined['cust_id'] == cust_id) & (df_combined['tran_date'] == tran_date)]
code
128023684/cell_31
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_second.keys() df_second.sample(10)
code
128023684/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_third.columns df_third[(df_third['cust_id'] == 268663) & (df_third['tran_date'] == '22-04-2012')]
code
128023684/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count()
code
128023684/cell_27
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) df_combined.head(5)
code
128023684/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_combined[(df_combined['cust_id'] == 268663) & (df_combined['tran_date'] == '22-04-2012')]
code
128023684/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') df_third.head(10)
code
129012029/cell_21
[ "text_html_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) tesla.head()
code
129012029/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure df = yf.download(ticker, period='5d') from matplotlib.pyplot import figure df = yf.download(ticker, period='1mo') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_23
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_20
[ "text_plain_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) df = yf.download(ticker, start='2020-05-10', end='2021-05-10') df.info()
code
129012029/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure df = yf.download(ticker, period='5d') from matplotlib.pyplot import figure df = yf.download(ticker, period='1mo') from matplotlib.pyplot import figure df = yf.download(ticker, period='6mo') from matplotlib.pyplot import figure df = yf.download(ticker, period='1y') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d', interval='1m') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure df = yf.download(ticker, period='5d') from matplotlib.pyplot import figure df = yf.download(ticker, period='1mo') from matplotlib.pyplot import figure df = yf.download(ticker, period='6mo') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_2
[ "image_output_1.png" ]
!pip install yfinance
code
129012029/cell_11
[ "text_plain_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) tesla.describe()
code
129012029/cell_19
[ "text_html_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) df = yf.download(ticker, start='2020-05-10', end='2021-05-10') df
code
129012029/cell_18
[ "text_plain_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) df = yf.download(ticker, start='2020-05-10', end='2021-05-10')
code
129012029/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker)
code
129012029/cell_24
[ "image_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure df = yf.download(ticker, period='5d') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
129012029/cell_14
[ "text_html_output_1.png" ]
from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(tesla.Close) plt.show()
code
129012029/cell_10
[ "text_plain_output_1.png" ]
import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) tesla.info()
code
129012029/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure from matplotlib.pyplot import figure import matplotlib.pyplot as plt import yfinance as yf ticker = 'tsla' tesla = yf.download(ticker) from matplotlib.pyplot import figure df = yf.download(ticker, start='2020-05-10', end='2021-05-10') from matplotlib.pyplot import figure df = yf.download(ticker, period='1d') from matplotlib.pyplot import figure df = yf.download(ticker, period='5d') from matplotlib.pyplot import figure df = yf.download(ticker, period='1mo') from matplotlib.pyplot import figure df = yf.download(ticker, period='6mo') from matplotlib.pyplot import figure df = yf.download(ticker, period='1y') from matplotlib.pyplot import figure figure(figsize=(15, 7), dpi=80) plt.plot(df.Close) plt.show()
code
2017020/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pandas import DataFrame from pandas import Series import matplotlib.pyplot as plt data = pd.read_csv('../input/Top_hashtag.csv') data.shape
code
2017020/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2017020/cell_5
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt left = [1, 2, 3, 4, 5] height = [4967, 6833, 893, 813, 3473] tick_label = ['love', 'freind', 'beachfamily', 'yellow'] plt.bar(left, height, tick_label=tick_label, width=0.8, color=['blue', 'blue']) plt.xlabel('x - axis') plt.ylabel('y - axis') plt.title('My bar chart!') plt.show()
code
105187552/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') 'In order to take a closer look at our dataset, we will use head() to print\nthe first five observations of our dataset and tail() to print the last five observations.' """Now we will find the total no. of rows and columns in the dataset using .shape""" df.shape """Checking the data-types of the columns, along with finding whether they contain null values or not.""" """describe() function provides us with statistical summary. This function returns the count,mean,standard deviation,minimum and maximum values and the quantiles of the data.""" df.describe()
code
105187552/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') 'In order to take a closer look at our dataset, we will use head() to print\nthe first five observations of our dataset and tail() to print the last five observations.' df.tail()
code
105187552/cell_6
[ "text_plain_output_1.png" ]
"""Here, we conclude that our dataset comprises of 344 observations and 9 characteristics."""
code
105187552/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105187552/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') 'In order to take a closer look at our dataset, we will use head() to print\nthe first five observations of our dataset and tail() to print the last five observations.' """Now we will find the total no. of rows and columns in the dataset using .shape""" df.shape """Checking the data-types of the columns, along with finding whether they contain null values or not.""" df.info()
code
105187552/cell_8
[ "text_html_output_1.png" ]
"""So the conclusion is that our data contains float, integer and string values(object). Also,there is no null/missing values. """
code
105187552/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') 'In order to take a closer look at our dataset, we will use head() to print\nthe first five observations of our dataset and tail() to print the last five observations.' df.head()
code
105187552/cell_10
[ "text_html_output_1.png" ]
import seaborn as sns data = sns.load_dataset('penguins')
code
105187552/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') 'In order to take a closer look at our dataset, we will use head() to print\nthe first five observations of our dataset and tail() to print the last five observations.' """Now we will find the total no. of rows and columns in the dataset using .shape""" df.shape
code
89130324/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape train_data.columns
code
89130324/cell_20
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf = clf.fit(X_train, y_train) clf.score(X_train, y_train)
code
89130324/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape test_data.shape train_data.columns train_data.isnull().sum() test_data.isnull().sum() drop_cols = ['id', 'obj_ID', 'run_ID', 'MJD'] train_data.drop(columns=drop_cols, inplace=True) submission_df = pd.DataFrame({'id': test_data.id}) test_data.drop(columns=drop_cols, inplace=True) sns.countplot(submission_df.label)
code
89130324/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape train_data.columns train_data.isnull().sum()
code
89130324/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89130324/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape
code
89130324/cell_18
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape test_data.shape train_data.columns train_data.isnull().sum() test_data.isnull().sum() drop_cols = ['id', 'obj_ID', 'run_ID', 'MJD'] train_data.drop(columns=drop_cols, inplace=True) submission_df = pd.DataFrame({'id': test_data.id}) test_data.drop(columns=drop_cols, inplace=True) y = train_data['label'] X = train_data.drop(columns=['label']) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=18) print(len(X_train), len(X_test))
code
89130324/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') test_data.shape
code
89130324/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape test_data.shape train_data.columns train_data.isnull().sum() test_data.isnull().sum() drop_cols = ['id', 'obj_ID', 'run_ID', 'MJD'] train_data.drop(columns=drop_cols, inplace=True) submission_df = pd.DataFrame({'id': test_data.id}) test_data.drop(columns=drop_cols, inplace=True) sns.heatmap(train_data.corr(), annot=True)
code
89130324/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf = clf.fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test)
code
89130324/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') train_data.shape train_data.columns train_data.head()
code
89130324/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nytfug/train.csv') test_data = pd.read_csv('/kaggle/input/nytfug/test.csv') test_data.shape test_data.isnull().sum()
code
128017503/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() plt.figure(figsize=(10, 10)) mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True sns.heatmap(train.corr(), mask=mask, annot=True, cmap='Blues') plt.show()
code
128017503/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd exercise = pd.read_csv('/kaggle/input/fmendesdat263xdemos/exercise.csv') calories = pd.read_csv('/kaggle/input/fmendesdat263xdemos/calories.csv') exercise['Calories_Burned'] = calories['Calories'] exercise = exercise.drop(['User_ID'], axis=1) exercise
code
128017503/cell_2
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import warnings import pandas as pd import numpy as np import random import os import gc from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore')
code
128017503/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns fig, axes = plt.subplots(2, 3, figsize=(10, 10)) sns.boxplot(y=train['Age'], ax=axes[0][0]) sns.boxplot(y=train['Height'], ax=axes[0][1]) sns.boxplot(y=train['Weight'], ax=axes[0][2]) sns.boxplot(y=train['Duration'], ax=axes[1][0]) sns.boxplot(y=train['Heart_Rate'], ax=axes[1][1]) sns.boxplot(y=train['Body_Temp'], ax=axes[1][2]) plt.tight_layout() plt.show()
code
128017503/cell_19
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(5, 5)) plt.pie(train.Gender.value_counts(), labels=train.Gender.value_counts().index, autopct='%.2f%%') plt.legend() plt.show()
code
128017503/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.plot(train['Age'], train['Calories_Burned'], 'g*') plt.title('Age vs Calories Burned') plt.xlabel('Age') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Height'], train['Calories_Burned'], 'g*') plt.title('Height vs Calories Burned') plt.xlabel('Height') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Weight'], train['Calories_Burned'], 'g*') plt.title('Weight vs Calories Burned') plt.xlabel('Weight') plt.ylabel('Calories Burned') plt.show()
code
128017503/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.plot(train['Heart_Rate'], train['Calories_Burned'], 'go') plt.title('Heart_Rate vs Calories Burned') plt.xlabel('Heart_Rate') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Body_Temp'], train['Calories_Burned'], 'go') plt.title('Body Temp vs Calories Burned') plt.xlabel('Body Temp') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Duration'], train['Calories_Burned'], 'go') plt.title('Duration vs Calories Burned') plt.xlabel('Duration') plt.ylabel('Calories Burned') plt.show()
code
128017503/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, cmap='YlOrRd') plt.show()
code
105196788/cell_21
[ "text_html_output_1.png" ]
from sklearn.linear_model import Ridge, RidgeCV import numpy as np import numpy as np # linear algebra from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha ridge = Ridge(alpha=ridge_cv.alpha_) ridge.fit(X_train, y_train)
code
105196788/cell_13
[ "image_output_1.png" ]
from sklearn.datasets import load_boston from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) from statsmodels.stats.outliers_influence import variance_inflation_factor vif = pd.DataFrame() vif['VIF'] = [variance_inflation_factor(X_scaled, i) for i in range(X_scaled.shape[1])] vif['Features'] = X.columns vif X.drop(columns=['TAX'], axis=1)
code
105196788/cell_25
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston from sklearn.linear_model import Ridge, RidgeCV from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) from statsmodels.stats.outliers_influence import variance_inflation_factor vif = pd.DataFrame() vif['VIF'] = [variance_inflation_factor(X_scaled, i) for i in range(X_scaled.shape[1])] vif['Features'] = X.columns vif X.drop(columns=['TAX'], axis=1) from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha ridge = Ridge(alpha=ridge_cv.alpha_) ridge.fit(X_train, y_train) ridge.score(X_train, y_train) ridge.score(X_test, y_test) def adj_r2(X, y, model): r2 = model.score(X, y) n = X.shape[0] p = X.shape[1] adjusted_r2 = 1 - (1 - r2) * (n - 1) / (n - p - 1) return adjusted_r2 print(adj_r2(X_train, y_train, ridge))
code
105196788/cell_4
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) print(df)
code
105196788/cell_23
[ "text_html_output_1.png" ]
from sklearn.linear_model import Ridge, RidgeCV import numpy as np import numpy as np # linear algebra from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha ridge = Ridge(alpha=ridge_cv.alpha_) ridge.fit(X_train, y_train) ridge.score(X_train, y_train) ridge.score(X_test, y_test)
code
105196788/cell_20
[ "image_output_1.png" ]
from sklearn.linear_model import Ridge, RidgeCV import numpy as np import numpy as np # linear algebra from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha
code
105196788/cell_6
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize=(20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize=15) plotnumber += 1 plt.tight_layout() plt.show()
code
105196788/cell_26
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston from sklearn.linear_model import Ridge, RidgeCV from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) from statsmodels.stats.outliers_influence import variance_inflation_factor vif = pd.DataFrame() vif['VIF'] = [variance_inflation_factor(X_scaled, i) for i in range(X_scaled.shape[1])] vif['Features'] = X.columns vif X.drop(columns=['TAX'], axis=1) from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha ridge = Ridge(alpha=ridge_cv.alpha_) ridge.fit(X_train, y_train) ridge.score(X_train, y_train) ridge.score(X_test, y_test) def adj_r2(X, y, model): r2 = model.score(X, y) n = X.shape[0] p = X.shape[1] adjusted_r2 = 1 - (1 - r2) * (n - 1) / (n - p - 1) return adjusted_r2 print(adj_r2(X_train, y_train, ridge))
code
105196788/cell_11
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston from sklearn.preprocessing import StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) X_scaled
code
105196788/cell_19
[ "text_html_output_1.png" ]
from sklearn.linear_model import Ridge, RidgeCV import numpy as np import numpy as np # linear algebra from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train)
code
105196788/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105196788/cell_7
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize=(20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x=df['MEDV'], y=df[column]) plotnumber += 1 plt.tight_layout() plt.show()
code
105196788/cell_8
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x = df['MEDV'], y = df[column]) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize=(20, 8)) sns.boxplot(data=df, width=0.8) plt.show()
code
105196788/cell_15
[ "image_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import statsmodels.formula.api as smf from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x = df['MEDV'], y = df[column]) plotnumber += 1 plt.tight_layout() plt.show() X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] # Heatmap fig, ax = plt.subplots(figsize = (16, 8)) sns.heatmap(df.corr(), annot = True, fmt = '1.2f', annot_kws = {'size' : 10}, linewidth = 1) plt.show() import statsmodels.formula.api as smf lm = smf.ols(formula='MEDV ~ RAD', data=df).fit() lm.summary()
code
105196788/cell_16
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import statsmodels.formula.api as smf from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x = df['MEDV'], y = df[column]) plotnumber += 1 plt.tight_layout() plt.show() X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] # Heatmap fig, ax = plt.subplots(figsize = (16, 8)) sns.heatmap(df.corr(), annot = True, fmt = '1.2f', annot_kws = {'size' : 10}, linewidth = 1) plt.show() import statsmodels.formula.api as smf lm = smf.ols(formula='MEDV ~ RAD', data=df).fit() lm.summary() lm = smf.ols(formula='MEDV ~ TAX', data=df).fit() lm.summary()
code
105196788/cell_17
[ "text_html_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x = df['MEDV'], y = df[column]) plotnumber += 1 plt.tight_layout() plt.show() X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] # Heatmap fig, ax = plt.subplots(figsize = (16, 8)) sns.heatmap(df.corr(), annot = True, fmt = '1.2f', annot_kws = {'size' : 10}, linewidth = 1) plt.show() df.drop(columns='RAD', axis=1, inplace=True) df.head()
code
105196788/cell_14
[ "image_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.distplot(df[column]) plt.xlabel(column, fontsize = 15) plotnumber += 1 plt.tight_layout() plt.show() plt.figure(figsize = (20, 15)) plotnumber = 1 for column in df: if plotnumber <= 14: ax = plt.subplot(3, 5, plotnumber) sns.scatterplot(x = df['MEDV'], y = df[column]) plotnumber += 1 plt.tight_layout() plt.show() X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] fig, ax = plt.subplots(figsize=(16, 8)) sns.heatmap(df.corr(), annot=True, fmt='1.2f', annot_kws={'size': 10}, linewidth=1) plt.show()
code
105196788/cell_22
[ "text_html_output_1.png" ]
from sklearn.linear_model import Ridge, RidgeCV import numpy as np import numpy as np # linear algebra from sklearn.linear_model import Ridge, RidgeCV alphas = np.random.uniform(0, 10, 50) ridge_cv = RidgeCV(alphas=alphas, cv=10, normalize=True) ridge_cv.fit(X_train, y_train) alpha = ridge_cv.alpha_ alpha ridge = Ridge(alpha=ridge_cv.alpha_) ridge.fit(X_train, y_train) ridge.score(X_train, y_train)
code
105196788/cell_12
[ "text_html_output_1.png" ]
from sklearn.datasets import load_boston from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target X = df.drop(columns='MEDV', axis=1) y = df['MEDV'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) from statsmodels.stats.outliers_influence import variance_inflation_factor vif = pd.DataFrame() vif['VIF'] = [variance_inflation_factor(X_scaled, i) for i in range(X_scaled.shape[1])] vif['Features'] = X.columns vif
code
105196788/cell_5
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston data = load_boston() df = pd.DataFrame(data.data, columns=data.feature_names) df['MEDV'] = data.target df.head()
code
105210042/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) plt.figure(figsize=(12, 8)) sns.set_palette('bright') plt.title('Car Price Plot') sns.histplot(data['price']) plt.show()
code
105210042/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() data.head()
code
105210042/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.info()
code
105210042/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.metrics import mean_absolute_error , r2_score , mean_squared_error import numpy as np from sklearn.linear_model import LinearRegression Lr = LinearRegression() Lr.fit(X_train, y_train) Lr.score(X_test, y_test) from sklearn.linear_model import Lasso LO = Lasso() LO.fit(X_train, y_train) LO.score(X_test, y_test) from sklearn.linear_model import Ridge RD = Ridge() RD.fit(X_train, y_train) RD.score(X_test, y_test) print('Test RMSE', np.sqrt(mean_squared_error(y_test, RD.predict(X_test)))) print('Train RMSE', np.sqrt(mean_squared_error(y_train, RD.predict(X_train)))) print('R2 Test', r2_score(y_test, RD.predict(X_test)))
code
105210042/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) data.duplicated().sum()
code
105210042/cell_30
[ "image_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import Lasso LO = Lasso() LO.fit(X_train, y_train) LO.score(X_test, y_test)
code
105210042/cell_33
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Ridge from sklearn.linear_model import Ridge RD = Ridge() RD.fit(X_train, y_train) RD.score(X_test, y_test)
code
105210042/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) plt.figure(figsize=(15, 7)) sns.heatmap(data.corr(), annot=True, cmap='Blues') plt.title('Data Correlation', size=15) plt.ylabel('Columns', size=15) plt.xlabel('Columns', size=15) plt.show()
code
105210042/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() data.describe()
code
105210042/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.pairplot(data[['horsepower', 'price', 'symboling']], hue='symboling')
code
105210042/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() data.describe(include=['O'])
code
105210042/cell_18
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.scatterplot(x='horsepower', y='price', data=data, color='b')
code
105210042/cell_28
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error , r2_score , mean_squared_error import numpy as np from sklearn.linear_model import LinearRegression Lr = LinearRegression() Lr.fit(X_train, y_train) Lr.score(X_test, y_test) print('Test RMSE', np.sqrt(mean_squared_error(y_test, Lr.predict(X_test)))) print('Train RMSE', np.sqrt(mean_squared_error(y_train, Lr.predict(X_train)))) print('R2 Test', r2_score(y_test, Lr.predict(X_test)))
code
105210042/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.barplot(x='symboling', y='count', data=df_v)
code
105210042/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.boxplot(x='symboling', y='price', data=data, palette='Pastel1')
code
105210042/cell_3
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.head(5)
code
105210042/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.scatterplot(x='wheelbase', y='price', data=data, color='purple')
code
105210042/cell_31
[ "image_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error , r2_score , mean_squared_error import numpy as np from sklearn.linear_model import LinearRegression Lr = LinearRegression() Lr.fit(X_train, y_train) Lr.score(X_test, y_test) from sklearn.linear_model import Lasso LO = Lasso() LO.fit(X_train, y_train) LO.score(X_test, y_test) print('Test RMSE', np.sqrt(mean_squared_error(y_test, LO.predict(X_test)))) print('Train RMSE', np.sqrt(mean_squared_error(y_train, LO.predict(X_train)))) print('R2 Test', r2_score(y_test, LO.predict(X_test)))
code
105210042/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() sns.boxplot(x='symboling', y='price', data=data, palette='winter_r')
code