path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32071924/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) all_data.head()
code
32071924/cell_4
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') print(test_df.shape) test_df.head()
code
32071924/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') test_df.isna().sum()
code
32071924/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() all_data['Date'] = pd.to_datetime(all_data['Date']) all_data['Day_num'] = le.fit_transform(all_data.Date) all_data['Day'] = all_data['Date'].dt.day all_data['Month'] = all_data['Date'].dt.month all_data['Year'] = all_data['Date'].dt.year all_data.head()
code
32071924/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() train_df['Province_State'].unique()
code
32071924/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import plotly.express as px train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() all_data['Date'] = pd.to_datetime(all_data['Date']) all_data['Day_num'] = le.fit_transform(all_data.Date) all_data['Day'] = all_data['Date'].dt.day all_data['Month'] = all_data['Date'].dt.month all_data['Year'] = all_data['Date'].dt.year train = all_data[all_data['ForecastId'] == -1.0] test = all_data[all_data['ForecastId'] != -1.0] temp = train.groupby('Date')['ConfirmedCases', 'Fatalities'].sum().reset_index() temp = temp.melt(id_vars='Date', value_vars=['ConfirmedCases', 'Fatalities'], var_name='case', value_name='count') fig = px.line(temp, x='Date', y='count', color='case', title='Total cases over the Date ', color_discrete_sequence=['cyan', 'red']) country_max = train.groupby(['Date', 'Country_Region'])['ConfirmedCases', 'Fatalities'].max().reset_index().sort_values(by='ConfirmedCases', ascending=False).groupby('Country_Region').max().reset_index().sort_values(by='ConfirmedCases', ascending=False) country_max[:20].style.background_gradient(cmap='viridis_r')
code
32071924/cell_16
[ "text_plain_output_1.png" ]
from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder import pandas as pd import plotly.express as px train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() all_data['Date'] = pd.to_datetime(all_data['Date']) all_data['Day_num'] = le.fit_transform(all_data.Date) all_data['Day'] = all_data['Date'].dt.day all_data['Month'] = all_data['Date'].dt.month all_data['Year'] = all_data['Date'].dt.year train = all_data[all_data['ForecastId'] == -1.0] test = all_data[all_data['ForecastId'] != -1.0] temp = train.groupby('Date')['ConfirmedCases', 'Fatalities'].sum().reset_index() temp = temp.melt(id_vars='Date', value_vars=['ConfirmedCases', 'Fatalities'], var_name='case', value_name='count') fig = px.line(temp, x='Date', y='count', color='case', title='Total cases over the Date ', color_discrete_sequence=['cyan', 'red']) country_max = train.groupby(['Date', 'Country_Region'])['ConfirmedCases', 'Fatalities'].max().reset_index().sort_values(by='ConfirmedCases', ascending=False).groupby('Country_Region').max().reset_index().sort_values(by='ConfirmedCases', ascending=False) country_max[:20].style.background_gradient(cmap='viridis_r') Top_country = train.groupby('Country_Region')['ConfirmedCases', 'Fatalities'].max().reset_index().sort_values(by='ConfirmedCases', ascending=False).head(15) fig_c = px.bar(Top_country.sort_values('ConfirmedCases'), x='ConfirmedCases', y='Country_Region', text='ConfirmedCases', orientation='h', color_discrete_sequence=['cyan']) fig_d = px.bar(Top_country.sort_values('Fatalities'), x='Fatalities', y='Country_Region', text='Fatalities', orientation='h', color_discrete_sequence=['red']) fig = make_subplots(rows=1, cols=2, shared_xaxes=False, horizontal_spacing=0.14, vertical_spacing=0.08, subplot_titles=('Confirmedcases', 'Fatalities')) fig.add_trace(fig_c['data'][0], row=1, col=1) fig.add_trace(fig_d['data'][0], row=1, col=2)
code
32071924/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') print(train_df.shape) train_df.tail()
code
32071924/cell_14
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import plotly.express as px train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() all_data['Date'] = pd.to_datetime(all_data['Date']) all_data['Day_num'] = le.fit_transform(all_data.Date) all_data['Day'] = all_data['Date'].dt.day all_data['Month'] = all_data['Date'].dt.month all_data['Year'] = all_data['Date'].dt.year train = all_data[all_data['ForecastId'] == -1.0] test = all_data[all_data['ForecastId'] != -1.0] temp = train.groupby('Date')['ConfirmedCases', 'Fatalities'].sum().reset_index() temp = temp.melt(id_vars='Date', value_vars=['ConfirmedCases', 'Fatalities'], var_name='case', value_name='count') fig = px.line(temp, x='Date', y='count', color='case', title='Total cases over the Date ', color_discrete_sequence=['cyan', 'red']) fig.show()
code
32071924/cell_5
[ "text_html_output_2.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum()
code
88078973/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine
code
88078973/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.head()
code
88078973/cell_30
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sns.pairplot(wine_data2)
code
88078973/cell_33
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine wine_data2['rating'].describe()
code
88078973/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes
code
88078973/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sample = wine_data2.sample(frac=0.01) sns.set_style('whitegrid') sample = wine_data2.sample(frac=0.01) sns.boxplot(x='category', y='rating', data=wine_data2)
code
88078973/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sample = wine_data2.sample(frac=0.01) sns.set_style('whitegrid') sample = wine_data2.sample(frac=0.01) sns.set(style='whitegrid') plt.figure(figsize=(12, 10)) boxplot = sns.boxplot(x='category', y='alcohol_num', data=wine_data2) boxplot.set_ylim(0, 100)
code
88078973/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) wine_data2
code
88078973/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88078973/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) sns.displot(wine_data2.loc[lambda _wine_data: _wine_data['price_num'] < 125]['price_num'], bins=30) plt.show()
code
88078973/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.describe()
code
88078973/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data['alcohol']
code
88078973/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data
code
88078973/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sample = wine_data2.sample(frac=0.01) sns.regplot(x='alcohol_num', y='rating', data=sample.loc[lambda _df: _df['alcohol_num'] < 20])
code
88078973/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sns.displot(wine_data2, x='category', height=6)
code
88078973/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum()
code
88078973/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine
code
88078973/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum()
code
88078973/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine
code
88078973/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns wine_data = pd.read_csv('../input/wine-reviews-data/wine.csv') wine_data wine_data.dtypes wine_data.isnull().sum() wine_data.isnull().sum() wine_data = wine_data.assign(alcohol_num=lambda row: row['alcohol'].replace('%', '', regex=True).astype('float')) wine_data1 = wine_data.dropna(subset=['price']) price_nums = [] for index, row in wine_data1.iterrows(): if row['price'].replace('$', '').isdecimal(): price_nums.append(float(row['price'].replace('$', ''))) else: price_nums.append(np.nan) wine_data2 = wine_data1.copy() wine_data2['price_num'] = price_nums wine_data2 = wine_data2.dropna(subset=['price_num']) Top_rated_wine = wine_data2.sort_values('rating', ascending=False).head(10) Top_rated_wine top_10_expensive_wine = wine_data2.sort_values('price', ascending=False).head(10) top_10_expensive_wine Top_10_cheapest_wine = wine_data2.sort_values('price', ascending=True).head(10) Top_10_cheapest_wine sample = wine_data2.sample(frac=0.01) sns.set_style('whitegrid') sample = wine_data2.sample(frac=0.01) sns.regplot(x='price_num', y='rating', data=sample.loc[lambda _wine_data2: _wine_data2['price_num'] < 220])
code
50222837/cell_13
[ "image_output_1.png" ]
"""import xgboost xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, booster='gbtree') xgBoost.fit(X_train, Y_train) print("train score", xgBoost.score(X_train, Y_train)) print("test score", xgBoost.score(X_test, Y_test)) #print("crossVal score", cross_val_score(xgBoost, X, Y, cv=3).mean())"""
code
50222837/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) plt.xticks(rotation=90) df[['Electrical', 'MasVnrType']] = df[['Electrical', 'MasVnrType']].fillna('None') df[['MasVnrArea']] = df[['MasVnrArea']].fillna(0) str_cols = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtFinType1', 'BsmtFinType2', 'BsmtExposure'] df[str_cols] = df[str_cols].fillna('None') df['GarageYrBlt'] = df['GarageYrBlt'].fillna(0) df.drop(['Fence', 'Alley', 'MiscFeature', 'PoolQC'], axis=1, inplace=True) df['FireplaceQu'] = df['FireplaceQu'].fillna('None') df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean())) 'del df["Alley"] #91 delete due to almost all ia NaN\ndel df["PoolQC"] #7\ndel df["MiscFeature"] #54\n\n\ndf["MasVnrType"].fillna(value="0", inplace=True) # 1452\ndf["MasVnrArea"].fillna(value=0.0, inplace=True) # 1452\ndf["BsmtQual"].fillna(value="0", inplace=True) # 1423\ndf["BsmtCond"].fillna(value="0", inplace=True) # 1423\ndf["BsmtExposure"].fillna(value="0", inplace=True) # 1422\ndf["BsmtFinType1"].fillna(value="0", inplace=True) # 1423\ndf["BsmtFinType2"].fillna(value="0", inplace=True) # 1422\ndf["FireplaceQu"].fillna(value="0", inplace=True) #770\n\ndf["Electrical"].fillna(value="0", inplace=True) #1459\ndf["GarageType"].fillna(value="0", inplace=True) #1379 осутствие логично закодировать нулем\ndf["GarageYrBlt"].fillna(value=0.0, inplace=True) # 1379\ndf["GarageFinish"].fillna(value="0", inplace=True) #1379\ndf["GarageQual"].fillna(value="0", inplace=True) #1379\ndf["GarageCond"].fillna(value="0", inplace=True) #1379\ndf["Fence"].fillna(value="0", inplace=True) #281\n\ndf[\'LotFrontage\'] = df.groupby(\'Neighborhood\')[\'LotFrontage\'].transform(lambda val: val.fillna(val.mean()))\n\n#categ data\n#df = pd.get_dummies(df)' sns.heatmap(df.corr(), xticklabels=True, yticklabels=True) plt.show()
code
50222837/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.info()
code
50222837/cell_20
[ "text_plain_output_1.png" ]
"""import lightgbm params = [ { # 'regressor__regressor': [lightgbm.LGBMRegressor()], 'regressor__regressor__boosting_type': ['gbdt'], 'regressor__regressor__n_estimators': [100], 'regressor__regressor__max_depth': [20], 'regressor__regressor__learning_rate' : [0.1], 'regressor__regressor__num_leaves' : [31], }, ] gsc = GridSearchCV( estimator=lightgbm.LGBMRegressor(), param_grid=params, cv=320, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100, max_depth = 20) lgbreg.fit(X_train, Y_train) print("train score", lgbreg.score(X_train, Y_train)) print("test score", lgbreg.score(X_test, Y_test)) #print("crossVal score", cross_val_score(lgbreg, X, Y, cv=3).mean())"""
code
50222837/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) plt.xticks(rotation=90) df[['Electrical', 'MasVnrType']] = df[['Electrical', 'MasVnrType']].fillna('None') df[['MasVnrArea']] = df[['MasVnrArea']].fillna(0) str_cols = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtFinType1', 'BsmtFinType2', 'BsmtExposure'] df[str_cols] = df[str_cols].fillna('None') df['GarageYrBlt'] = df['GarageYrBlt'].fillna(0) df.drop(['Fence', 'Alley', 'MiscFeature', 'PoolQC'], axis=1, inplace=True) df['FireplaceQu'] = df['FireplaceQu'].fillna('None') df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean())) 'del df["Alley"] #91 delete due to almost all ia NaN\ndel df["PoolQC"] #7\ndel df["MiscFeature"] #54\ndf["MasVnrType"].fillna(value="0", inplace=True) # 1452\ndf["MasVnrArea"].fillna(value=0.0, inplace=True) # 1452\ndf["BsmtQual"].fillna(value="0", inplace=True) # 1423\ndf["BsmtCond"].fillna(value="0", inplace=True) # 1423\ndf["BsmtExposure"].fillna(value="0", inplace=True) # 1422\ndf["BsmtFinType1"].fillna(value="0", inplace=True) # 1423\ndf["BsmtFinType2"].fillna(value="0", inplace=True) # 1422\ndf["FireplaceQu"].fillna(value="0", inplace=True) #770\ndf["Electrical"].fillna(value="0", inplace=True) #1459\ndf["GarageType"].fillna(value="0", inplace=True) #1379 осутствие логично закодировать нулем\ndf["GarageYrBlt"].fillna(value=0.0, inplace=True) # 1379\ndf["GarageFinish"].fillna(value="0", inplace=True) #1379\ndf["GarageQual"].fillna(value="0", inplace=True) #1379\ndf["GarageCond"].fillna(value="0", inplace=True) #1379\ndf["Fence"].fillna(value="0", inplace=True) #281\ndf[\'LotFrontage\'] = df.groupby(\'Neighborhood\')[\'LotFrontage\'].transform(lambda val: val.fillna(val.mean()))\n#categ data\n#df = pd.get_dummies(df)'
code
50222837/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) scaled_X_test = scaler.transform(X_test) from sklearn.linear_model import ElasticNet basic_elastic_model = ElasticNet(max_iter=1000000) param_grid = {'alpha': [100, 120, 130, 140, 200], 'l1_ratio': [0.1, 0.7, 0.99, 1]} from sklearn.model_selection import GridSearchCV grid_model = GridSearchCV(estimator=basic_elastic_model, param_grid=param_grid, scoring='neg_mean_squared_error', cv=5, verbose=1) grid_model.fit(scaled_X_train, y_train)
code
50222837/cell_11
[ "image_output_1.png" ]
"""import sklearn sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100) sklearn_boost.fit(X_train, Y_train) print("train score", sklearn_boost.score(X_train, Y_train)) print("test score", sklearn_boost.score(X_test, Y_test)) #print("crossVal score", cross_val_score(sklearn_boost, X, Y, cv=3).mean())"""
code
50222837/cell_19
[ "text_plain_output_1.png" ]
"""import xgboost params = [ { 'learning_rate' : [0.2], 'n_estimators': [250], 'max_depth': [3], }, ] gsc = GridSearchCV( estimator=xgboost.XGBRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.2, n_estimators=250, booster='gbtree') xgBoost.fit(X_train, Y_train) print("train score", xgBoost.score(X_train, Y_train)) print("test score", xgBoost.score(X_test, Y_test)) #print("crossVal score", cross_val_score(xgBoost, X, Y, cv=3).mean())"""
code
50222837/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50222837/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) plt.xticks(rotation=90) df[['Electrical', 'MasVnrType']] = df[['Electrical', 'MasVnrType']].fillna('None') df[['MasVnrArea']] = df[['MasVnrArea']].fillna(0) str_cols = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtFinType1', 'BsmtFinType2', 'BsmtExposure'] df[str_cols] = df[str_cols].fillna('None') df['GarageYrBlt'] = df['GarageYrBlt'].fillna(0) df.drop(['Fence', 'Alley', 'MiscFeature', 'PoolQC'], axis=1, inplace=True) df['FireplaceQu'] = df['FireplaceQu'].fillna('None') df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean())) 'del df["Alley"] #91 delete due to almost all ia NaN\ndel df["PoolQC"] #7\ndel df["MiscFeature"] #54\n\n\ndf["MasVnrType"].fillna(value="0", inplace=True) # 1452\ndf["MasVnrArea"].fillna(value=0.0, inplace=True) # 1452\ndf["BsmtQual"].fillna(value="0", inplace=True) # 1423\ndf["BsmtCond"].fillna(value="0", inplace=True) # 1423\ndf["BsmtExposure"].fillna(value="0", inplace=True) # 1422\ndf["BsmtFinType1"].fillna(value="0", inplace=True) # 1423\ndf["BsmtFinType2"].fillna(value="0", inplace=True) # 1422\ndf["FireplaceQu"].fillna(value="0", inplace=True) #770\n\ndf["Electrical"].fillna(value="0", inplace=True) #1459\ndf["GarageType"].fillna(value="0", inplace=True) #1379 осутствие логично закодировать нулем\ndf["GarageYrBlt"].fillna(value=0.0, inplace=True) # 1379\ndf["GarageFinish"].fillna(value="0", inplace=True) #1379\ndf["GarageQual"].fillna(value="0", inplace=True) #1379\ndf["GarageCond"].fillna(value="0", inplace=True) #1379\ndf["Fence"].fillna(value="0", inplace=True) #281\n\ndf[\'LotFrontage\'] = df.groupby(\'Neighborhood\')[\'LotFrontage\'].transform(lambda val: val.fillna(val.mean()))\n\n#categ data\n#df = pd.get_dummies(df)' plt.figure(figsize=(10, 8)) sns.distplot(df['SalePrice']) plt.show()
code
50222837/cell_18
[ "text_plain_output_1.png" ]
"""from catboost import CatBoost params = { 'depth': [7], 'learning_rate' : [0.15], 'l2_leaf_reg': [15,20, 25], 'iterations': [300], 'verbose' : [False], #shut up!!! } gsc = GridSearchCV( estimator=catboost.CatBoostRegressor(), param_grid=params, cv=3, scoring='r2', verbose=0, n_jobs=-1) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False, learning_rate = 0.15, l2_leaf_reg = 20, iterations = 300) cboost.fit(X_train, Y_train) print("train score", cboost.score(X_train, Y_train)) print("test score", cboost.score(X_test, Y_test)) #print("crossVal score", cross_val_score(cboost, X, Y, cv=3).mean())"""
code
50222837/cell_15
[ "text_plain_output_1.png" ]
"""#стакнем-ка ридж регерссию и метод опорных векторов from sklearn.linear_model import RidgeCV from sklearn.svm import LinearSVR from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import StackingRegressor import warnings warnings.filterwarnings('ignore') #нужна линеризация данных, а мне лень estimators = [('lr', RidgeCV()), ('svr', LinearSVR(random_state=42, max_iter = 1000))] regStack = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42)) regStack.fit(X_train, Y_train) print("train score", regStack.score(X_train, Y_train)) print("test score", regStack.score(X_test, Y_test)) #print("crossVal score", cross_val_score(regStack, X, Y, cv=3).mean())"""
code
50222837/cell_16
[ "text_plain_output_1.png" ]
"""#среднее по рандомным деревьям показывает неплохой результат from sklearn.ensemble import RandomForestRegressor Begging = RandomForestRegressor(max_depth=30, n_estimators=300) Begging.fit(X_train, Y_train) print("train score", Begging.score(X_train, Y_train)) print("test score", Begging.score(X_test, Y_test)) #print("crossVal score", cross_val_score(Begging, X, Y, cv=3).mean())"""
code
50222837/cell_17
[ "text_plain_output_1.png" ]
"""import sklearn params = { 'learning_rate': [0.05], 'n_estimators' : [200], 'max_depth' : [6] } gsc = GridSearchCV( estimator=ensemble.GradientBoostingRegressor(), param_grid=params, cv=3) grid_result = gsc.fit(X_train, Y_train) print('Best params:', grid_result.best_params_) print('Best score:', grid_result.best_score_) sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=200, max_depth=6) sklearn_boost.fit(X_train, Y_train) print("train score", sklearn_boost.score(X_train, Y_train)) print("test score", sklearn_boost.score(X_test, Y_test))"""
code
50222837/cell_14
[ "image_output_1.png" ]
"""import lightgbm lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100) lgbreg.fit(X_train, Y_train) print("train score", lgbreg.score(X_train, Y_train)) print("test score", lgbreg.score(X_test, Y_test)) #print("crossVal score", cross_val_score(lgbreg, X, Y, cv=3).mean())"""
code
50222837/cell_10
[ "text_plain_output_1.png" ]
"""Y = df["SalePrice"] #value for prediction X = df.drop("SalePrice", axis=1) #data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=98987)"""
code
50222837/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNet from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) scaled_X_test = scaler.transform(X_test) from sklearn.linear_model import ElasticNet basic_elastic_model = ElasticNet(max_iter=1000000) param_grid = {'alpha': [100, 120, 130, 140, 200], 'l1_ratio': [0.1, 0.7, 0.99, 1]} from sklearn.model_selection import GridSearchCV grid_model = GridSearchCV(estimator=basic_elastic_model, param_grid=param_grid, scoring='neg_mean_squared_error', cv=5, verbose=1) grid_model.fit(scaled_X_train, y_train) grid_model.best_params_
code
50222837/cell_12
[ "text_plain_output_1.png" ]
"""import catboost cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False) cboost.fit(X_train, Y_train) print("train score", cboost.score(X_train, Y_train)) print("test score", cboost.score(X_test, Y_test)) #print("crossVal score", cross_val_score(cboost, X, Y, cv=3).mean())"""
code
50222837/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') def NanColums(df): percent_nan = 100 * df.isnull().sum() / len(df) percent_nan = percent_nan[percent_nan > 0].sort_values() return percent_nan nanColums = NanColums(df) sns.barplot(x=nanColums.index, y=nanColums) plt.xticks(rotation=90)
code
73075118/cell_9
[ "text_html_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.neighbors import KNeighborsClassifier import pandas as pd glass = pd.read_csv('/kaggle/input/glass/glass.csv') X = glass.copy().drop(['Type'], axis=1) y = glass['Type'].copy() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis(n_components=2) lda_model.fit(X, y) reduced_X = lda_model.transform(X).T knnmodel = KNeighborsClassifier(n_neighbors=3) knnmodel.fit(reduced_X.T, y)
code
73075118/cell_19
[ "text_plain_output_1.png" ]
from plotly.subplots import make_subplots from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.neighbors import KNeighborsClassifier import pandas as pd import plotly.express as px import plotly.graph_objects as go glass = pd.read_csv('/kaggle/input/glass/glass.csv') X = glass.copy().drop(['Type'], axis=1) y = glass['Type'].copy() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis(n_components=2) lda_model.fit(X, y) reduced_X = lda_model.transform(X).T fig = px.scatter( glass, x=reduced_X[0], y=reduced_X[1], color="Type", hover_data=['Type'], color_continuous_scale='portland') fig.show() knnmodel = KNeighborsClassifier(n_neighbors=3) knnmodel.fit(reduced_X.T, y) fig = px.scatter(glass, x="Mg", y="Fe", color='Type', color_continuous_scale='portland') fig.show() fig = make_subplots(rows=1, cols=1) fig.add_trace(go.Contour( x=X['Mg'], y=X['Fe'], z=lda_model.predict(X), showscale=False, opacity=0.40, colorscale='portland' ), row=1, col=1) fig.add_trace(go.Scatter( x=X['Mg'], y=X['Fe'], text=y, mode='markers', marker_symbol=y, marker=dict(color=y, colorscale='portland') ), row=1, col=1) fig.update_layout(showlegend=False) fig.show() fig = make_subplots(rows=1, cols=1) fig.add_trace(go.Contour(x=reduced_X[0], y=reduced_X[1], z=lda_model.predict(X), showscale=False, opacity=0.4, colorscale='portland'), row=1, col=1) fig.add_trace(go.Scatter(x=reduced_X[0], y=reduced_X[1], text=y, mode='markers', marker_symbol=y, marker=dict(color=y, colorscale='portland')), row=1, col=1) fig.show()
code
73075118/cell_7
[ "text_html_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import pandas as pd import plotly.express as px glass = pd.read_csv('/kaggle/input/glass/glass.csv') X = glass.copy().drop(['Type'], axis=1) y = glass['Type'].copy() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis(n_components=2) lda_model.fit(X, y) reduced_X = lda_model.transform(X).T fig = px.scatter(glass, x=reduced_X[0], y=reduced_X[1], color='Type', hover_data=['Type'], color_continuous_scale='portland') fig.show()
code
73075118/cell_16
[ "text_html_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import pandas as pd import plotly.express as px glass = pd.read_csv('/kaggle/input/glass/glass.csv') X = glass.copy().drop(['Type'], axis=1) y = glass['Type'].copy() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis(n_components=2) lda_model.fit(X, y) reduced_X = lda_model.transform(X).T fig = px.scatter( glass, x=reduced_X[0], y=reduced_X[1], color="Type", hover_data=['Type'], color_continuous_scale='portland') fig.show() fig = px.scatter(glass, x='Mg', y='Fe', color='Type', color_continuous_scale='portland') fig.show()
code
73075118/cell_3
[ "text_html_output_1.png" ]
import pandas as pd glass = pd.read_csv('/kaggle/input/glass/glass.csv') glass
code
73075118/cell_17
[ "text_html_output_2.png" ]
from plotly.subplots import make_subplots from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import pandas as pd import plotly.express as px import plotly.graph_objects as go glass = pd.read_csv('/kaggle/input/glass/glass.csv') X = glass.copy().drop(['Type'], axis=1) y = glass['Type'].copy() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis(n_components=2) lda_model.fit(X, y) reduced_X = lda_model.transform(X).T fig = px.scatter( glass, x=reduced_X[0], y=reduced_X[1], color="Type", hover_data=['Type'], color_continuous_scale='portland') fig.show() fig = px.scatter(glass, x="Mg", y="Fe", color='Type', color_continuous_scale='portland') fig.show() fig = make_subplots(rows=1, cols=1) fig.add_trace(go.Contour(x=X['Mg'], y=X['Fe'], z=lda_model.predict(X), showscale=False, opacity=0.4, colorscale='portland'), row=1, col=1) fig.add_trace(go.Scatter(x=X['Mg'], y=X['Fe'], text=y, mode='markers', marker_symbol=y, marker=dict(color=y, colorscale='portland')), row=1, col=1) fig.update_layout(showlegend=False) fig.show()
code
33108543/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() labels = ['A', 'S', 'T'] sizes = [] sizes.append(list(cleaned_data['Drug'].value_counts())[0]) sizes.append(list(cleaned_data['Drug'].value_counts())[1]) sizes.append(list(cleaned_data['Drug'].value_counts())[2]) explode = (0, 0.1, 0) colors = ['#ffcc99', '#66b3ff', '#ff9999'] plt.axis('equal') plt.tight_layout() from pandas.plotting import scatter_matrix fig, ax = plt.subplots(figsize=(12,12)) scatter_matrix(cleaned_data, alpha=1, ax=ax) df_plot = cleaned_data[cleaned_data['Diff'] > 0] plt.figure(figsize=(10, 6)) ax = sns.boxplot(x='Drug', y='Mem_Score_After', hue='Drug', data=cleaned_data, palette='Set3')
code
33108543/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show()
code
33108543/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() plt.figure(figsize=(16, 6)) sns.barplot(x='Drug', y='Mem_Score_Before', data=cleaned_data, order=cleaned_data.Drug.unique().tolist()) plt.title('Distribution of Drugs')
code
33108543/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') data.head()
code
33108543/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() df_plot = cleaned_data[cleaned_data['Diff'] > 0] sns.boxplot('AgeRange', 'Diff', data=df_plot)
code
33108543/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') data.describe()
code
33108543/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() cleaned_data.Drug.unique()
code
33108543/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() cleaned_data
code
33108543/cell_18
[ "text_html_output_1.png" ]
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() labels = ['A', 'S', 'T'] sizes = [] sizes.append(list(cleaned_data['Drug'].value_counts())[0]) sizes.append(list(cleaned_data['Drug'].value_counts())[1]) sizes.append(list(cleaned_data['Drug'].value_counts())[2]) explode = (0, 0.1, 0) colors = ['#ffcc99', '#66b3ff', '#ff9999'] plt.axis('equal') plt.tight_layout() from pandas.plotting import scatter_matrix fig, ax = plt.subplots(figsize=(12, 12)) scatter_matrix(cleaned_data, alpha=1, ax=ax)
code
33108543/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() labels = ['A', 'S', 'T'] sizes = [] sizes.append(list(cleaned_data['Drug'].value_counts())[0]) sizes.append(list(cleaned_data['Drug'].value_counts())[1]) sizes.append(list(cleaned_data['Drug'].value_counts())[2]) explode = (0, 0.1, 0) colors = ['#ffcc99', '#66b3ff', '#ff9999'] plt.figure(figsize=(15, 10)) plt.title('Distribution of Drug', fontsize=20) plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90) plt.axis('equal') plt.tight_layout()
code
33108543/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show() sns.pairplot(cleaned_data)
code
33108543/cell_14
[ "text_html_output_2.png", "text_html_output_3.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_Before', y='first_name', title='Patient with Higest Mem_Score_Before', text='Mem_Score_Before', orientation='h') fig.show() fig = px.bar(cleaned_data.sort_values('age', ascending=False)[:10][::-1], x='Mem_Score_After', y='first_name', title='Patient with Higest Mem_Score_After', text='Mem_Score_After', orientation='h') fig.show()
code
33108543/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x='age', y='Mem_Score_Before', title='Mem_Score_Before over Age', color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x='age', y='Mem_Score_After', title='Mem_Score_After over Age', log_y=True, color_discrete_sequence=['#F42272']) fig.show()
code
33108543/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') cleaned_data = data.copy() fig = px.bar(cleaned_data, x="age", y="Mem_Score_Before", title="Mem_Score_Before over Age", color_discrete_sequence=['#F42272']) fig.show() fig = px.bar(cleaned_data, x="age", y="Mem_Score_After", title="Mem_Score_After over Age", log_y=True, color_discrete_sequence=['#F42272']) fig.show() cleaned_data.Drug.unique() fig = px.sunburst(cleaned_data.sort_values(by='age', ascending=False).reset_index(drop=True), path=['first_name'], values='Mem_Score_Before', height=700, title='Sunburst for Mem_Score_Before ', color_discrete_sequence=px.colors.qualitative.Prism) fig.data[0].textinfo = 'label+text+value' fig.show()
code
33108543/cell_5
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/memory-test-on-drugged-islanders-data/Islander_data.csv') data.info()
code
130025642/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.isnull().sum() df_missing = df[df['x_e_out [-]'].isnull()] df_non_missing = df[~df['x_e_out [-]'].isnull()] df_missing.count() df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_train.isnull().sum() import pandas as pd from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error df = df_train import re feature_names = df.columns pattern = '[\\[\\]<>]' new_feature_names = [] for name in feature_names: new_name = re.sub(pattern, '_', name) new_feature_names.append(new_name) df.columns = new_feature_names df.columns df = df.dropna(subset=['x_e_out _-_']) X = df.drop('x_e_out _-_', axis=1) y = df['x_e_out _-_'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mse = mean_squared_error(y_test, y_pred) print(f'Root Mean squared error: {np.sqrt(mse):.2f}')
code
130025642/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.isnull().sum() df_missing = df[df['x_e_out [-]'].isnull()] df_non_missing = df[~df['x_e_out [-]'].isnull()] df_missing.count() df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_train.isnull().sum()
code
130025642/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.info()
code
130025642/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum()
code
130025642/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130025642/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns
code
130025642/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt
code
130025642/cell_17
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.isnull().sum() df_missing = df[df['x_e_out [-]'].isnull()] df_non_missing = df[~df['x_e_out [-]'].isnull()] df_missing.count()
code
130025642/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.isnull().sum()
code
130025642/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.isnull().sum() df_missing = df[df['x_e_out [-]'].isnull()] df_non_missing = df[~df['x_e_out [-]'].isnull()] df_missing.count() df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_test = df_missing.drop('x_e_out [-]', axis=1) df_train = df_non_missing.drop('id', axis=1) df_train.isnull().sum() import pandas as pd from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error df = df_train import re feature_names = df.columns pattern = '[\\[\\]<>]' new_feature_names = [] for name in feature_names: new_name = re.sub(pattern, '_', name) new_feature_names.append(new_name) df.columns = new_feature_names df.columns
code
130025642/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() plt.figure(figsize=(10, 8)) sns.heatmap(corr_matrix, cmap='coolwarm', annot=True) plt.title('Correlation Matrix') plt.show()
code
130025642/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.columns corr_matrix = df.corr() df.isnull().sum() df.describe()
code
130025642/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') df.head()
code
106212685/cell_21
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb['last review'] = pd.to_datetime(abnb['last review']) abnb['Construction year'] = pd.to_datetime(abnb['Construction year']) abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['review_rate_number'].unique() print('\n') abnb['review_rate_number'].value_counts() print('\n') abnb['rating'] = pd.cut(abnb['review_rate_number'], bins=[0, 1, 2, 3, 4, 5], labels=['One-Star', 'Two-Star', 'Three-Star', 'Four-Star', 'Five-Star'], include_lowest=True) abnb['rating'] = abnb['rating'].cat.add_categories('missing values').fillna('missing values') abnb[['review_rate_number', 'rating']].head(5)
code
106212685/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape
code
106212685/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum()
code
106212685/cell_25
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['cancellation_policy'].unique() print('\n') abnb['cancellation_policy'].value_counts()
code
106212685/cell_34
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['price'] = abnb['price'].str.replace('$', '').str.replace(' ', '').str.replace(',', '').astype(float) abnb['price'].head(5)
code
106212685/cell_23
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['license'].unique() print('\n') abnb['license'].value_counts()
code
106212685/cell_33
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb[['price', 'service_fee']].head(5)
code
106212685/cell_44
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.columns abnb.columns 'Present memory: {} '.format(abnb.memory_usage().sum()) print() gc.collect()
code
106212685/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns
code
106212685/cell_40
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns def monthQ(month): if 0 < month <= 3: return 1 if 3 < month <= 6: return 2 if 6 < month <= 9: return 3 if 9 < month <= 12: return 4 abnb['last_review'].head(3) abnb['last_reviewed_year'] = abnb['last_review'].dt.year abnb['last_reviewed_month'] = abnb['last_review'].dt.month abnb['last_reviewed_day'] = abnb['last_review'].dt.day print('\n') abnb['quarterly_review'] = abnb['last_reviewed_month'].map(lambda m: monthQ(m)) print('\n') abnb[['last_review', 'last_reviewed_year', 'last_reviewed_month', 'last_reviewed_day', 'quarterly_review']].head(5)
code
106212685/cell_29
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['instant_bookable'].unique() print('\n') abnb['instant_bookable'].value_counts()
code
106212685/cell_48
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.columns abnb.columns 'Present memory: {} '.format(abnb.memory_usage().sum()) gc.collect() abnb.drop(columns=['lat', 'long', 'cancellation_policy', 'room_type', 'license'], inplace=True) 'Current memory usage: {} '.format(abnb.memory_usage().sum()) grpd = abnb.groupby(['host_id']) grpd
code
106212685/cell_41
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.columns
code
106212685/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull()
code
106212685/cell_19
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.info()
code
106212685/cell_18
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns
code
106212685/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes
code