path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2007618/cell_14
[ "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np def priceOverTime(data, label): """Plot price over time""" priceOverTime(newdf3, 'California') priceOverTime(newdf4, 'Colorado') priceOverTime(newdf5, 'Michigan') def priceOverTime2(data, label): pass priceOverTime2(newdf6, 'San Francisco') priceOverTime2(newdf7, 'Denver') priceOverTime2(newdf8, 'Detroit') State_raw_house = State_house.groupby(['RegionName', State_house.Date.dt.year])['ZHVI_SingleFamilyResidence'].mean().unstack() State_raw_house.columns.name = None State_raw_house = State_raw_house.reset_index() State_raw_house = State_raw_house[['RegionName', 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]] State_raw_house = State_raw_house.dropna() Feature = State_raw_house['RegionName'] weightage = State_raw_house[2010] total = State_raw_house[2017] percent = (State_raw_house[2017] - State_raw_house[2010]) / State_raw_house[2010] * 100 mid_pos = (State_raw_house[2010] + State_raw_house[2017]) / 2 weightage = np.array(weightage) Feature = np.array(Feature) total = np.array(total) percent = np.array(percent) mid_pos = np.array(mid_pos) idx = percent.argsort() Feature, total, percent, mid_pos, weightage = [np.take(x, idx) for x in [Feature, total, percent, mid_pos, weightage]] s = 1 size = [] for i, cn in enumerate(weightage): s = s + 1 size.append(s) fig, ax = plt.subplots(figsize=(13, 13)) ax.scatter(total, size, marker='o', color='lightBlue', s=size, linewidths=10) ax.scatter(weightage, size, marker='o', color='LightGreen', s=size, linewidths=10) ax.set_xlabel('Home Value') ax.set_ylabel('States') ax.spines['right'].set_visible(False) ax.grid() for i, txt in enumerate(Feature): ax.annotate(txt, (720000, size[i]), fontsize=12, rotation=0, color='Red') ax.annotate('.', xy=(total[i], size[i]), xytext=(weightage[i], size[i]), arrowprops=dict(facecolor='LightGreen', shrink=0.06)) for i, pct in enumerate(percent): ax.annotate(str(pct)[0:4], (mid_pos[i], size[i]), fontsize=12, rotation=0, color='Brown') ax.annotate('2010 Home Value', (300000, 26), fontsize=14, rotation=0, color='Green') ax.annotate('2017 Home Value', (300000, 25), fontsize=14, rotation=0, color='Blue') ax.annotate('w/ Percent Change', (300000, 24), fontsize=14, rotation=0, color='Brown')
code
2007618/cell_5
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import seaborn as sns def plotDistribution(data, metric): """ Plot distributions """ sns.set_style('whitegrid') distributionTwo = sns.FacetGrid(data, hue='RegionName', aspect=2.5) distributionTwo.map(sns.kdeplot, metric, shade=True) distributionTwo.set(xlim=(100000, 550000)) distributionTwo.add_legend() distributionTwo.set_axis_labels(str(metric), 'Proportion') distributionTwo.fig.suptitle(str(metric) + ' vs Region (2016)') plotDistribution(newdf2, 'MedianListingPrice_SingleFamilyResidence') plotDistribution(newdf2, 'MedianSoldPrice_AllHomes')
code
74050915/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') fig, ax = plt.subplots(figsize=(12,6)) sns.heatmap(df.isnull(), ax=ax) ax.set_title('Null values') df.loc[df_notnull_col.index, col].unique()
code
74050915/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') df.describe()
code
74050915/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') df.head(5)
code
74050915/cell_1
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt plt.style.use('ggplot') import numpy as np import pandas as pd import os import seaborn as sns from scipy import stats for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74050915/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') fig, ax = plt.subplots(figsize=(12,6)) sns.heatmap(df.isnull(), ax=ax) ax.set_title('Null values') df['Potability'].value_counts().plot(kind='pie', autopct='%1.1f%%', radius=1.5, textprops={'fontsize': 16})
code
74050915/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') df.info()
code
74050915/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') fig, ax = plt.subplots(figsize=(12, 6)) sns.heatmap(df.isnull(), ax=ax) ax.set_title('Null values')
code
105207156/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() plt.figure(figsize=(20, 15)) sns.heatmap(data.corr(), annot=True, cmap='YlGnBu')
code
105207156/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x='Flight Distance', data=data, lw=0, color='red') _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550))
code
105207156/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt plt.figure(figsize=(10, 6)) data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] plt.pie(data_pie, labels=labels, explode=explode, autopct='%1.2f%%', shadow=True, colors=['#256D85', '#3BACB6']) plt.legend() plt.show()
code
105207156/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data = data.dropna(subset=['Arrival Delay']) sns.heatmap(data.isnull())
code
105207156/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns
code
105207156/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) data.isnull().sum()
code
105207156/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data['ID'].duplicated().sum()
code
105207156/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data = data.dropna(subset=['Arrival Delay']) data.info()
code
105207156/cell_2
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.head(10)
code
105207156/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1, 2) sns.histplot(x='Gender', data=data, stat='density', shrink=0.9, color='steelblue', ax=ax[0]) sns.histplot(x='Customer Type', data=data, stat='density', shrink=0.9, color='steelblue', ax=ax[1]) fig.show()
code
105207156/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) sns.heatmap(data.isnull())
code
105207156/cell_1
[ "text_plain_output_1.png" ]
import os import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) file = os.path.join(dirname, filename)
code
105207156/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20, 12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar', stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show()
code
105207156/cell_32
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data = data.dropna(subset=['Arrival Delay']) data.shape
code
105207156/cell_8
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data['Satisfaction'].value_counts()
code
105207156/cell_3
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.tail()
code
105207156/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20, 12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar', stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show()
code
105207156/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data = data.dropna(subset=['Arrival Delay']) sns.catplot(x='Satisfaction', y='Departure Delay', data=data, palette='cubehelix')
code
105207156/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data = data.dropna(subset=['Arrival Delay']) data['Arrival Delay'].isnull().sum()
code
105207156/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) sns.kdeplot(data[data['Satisfaction'] == 0]['Age'], shade=True, color='b') sns.kdeplot(data[data['Satisfaction'] == 1]['Age'], shade=True, color='r')
code
105207156/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize= (20, 20)) sns.histplot(x="Type of Travel", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[0]); sns.histplot(x="Class", data=data, stat="density", shrink=0.8, color="steelblue",ax=ax[1]); fig.show() g = sns.countplot(x="Flight Distance", data=data, lw=0, color="red") _xticklabels = g.get_xticklabels() for ind, label in enumerate(_xticklabels): if int(label.get_text()) % 200 == 0: label.set_visible(True) else: label.set_visible(False) g.set_xticklabels(_xticklabels, rotation=45) g.set(ylim=(0, 550)) x_predictor_col = ['Baggage Handling', 'Departure and Arrival Time Convenience', 'In-flight Wifi Service', 'Ease of Online Booking', 'In-flight Entertainment', 'Check-in Service', 'Online Boarding', 'Gate Location'] def create_plot_pivot(df, x_column): _df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0) return _df_plot fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4): create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[i]) plt.xlabel(x_predictor_col[i]) axe[i].set_ylabel('Count of Respondants') fig.show() fig, ax = plt.subplots(2, 2, figsize=(20,12)) axe = ax.ravel() for i in range(4, 8): j = i - 4 create_plot_pivot(data, x_predictor_col[i]).plot(kind='bar',stacked=True, ax=axe[j]) plt.xlabel(x_predictor_col[i]) axe[j].set_ylabel('Count of Respondants') fig.show() data.isnull().sum() data['Arrival Delay'].describe()
code
105207156/cell_10
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data['Gender'].value_counts()
code
105207156/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape data_pie = [73452, 56428] labels = ['1', '0'] explode = [0.1, 0] fig, ax = plt.subplots(1,2) sns.histplot(x="Gender", data=data, stat="density", shrink=0.9, color="steelblue", ax=ax[0]) sns.histplot(x="Customer Type", data=data,stat="density", shrink=0.9, color="steelblue",ax=ax[1]) fig.show() fig, ax = plt.subplots(1, 2) plt.figure(figsize=(20, 20)) sns.histplot(x='Type of Travel', data=data, stat='density', shrink=0.8, color='steelblue', ax=ax[0]) sns.histplot(x='Class', data=data, stat='density', shrink=0.8, color='steelblue', ax=ax[1]) fig.show()
code
105207156/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import matplotlib.ticker as ticker sns.set(rc={'figure.figsize': (11.7, 8.27)}) from sklearn.neighbors import KNeighborsClassifier for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: file = os.path.join(dirname, filename) data = pd.read_csv(file) data.columns data.shape
code
73080128/cell_21
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import pandas as pd import tensorflow as tf import tensorflow as tf df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(X_train) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(X_test) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) (training_padded.shape, y_train.shape, testing_padded.shape, y_test.shape) model = tf.keras.models.Sequential([layers.Embedding(vocab_size, embedding_dim, input_length=max_length), layers.Flatten(), layers.Dense(32, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='relu'), layers.Dropout(0.5), layers.Dense(1, activation='sigmoid')], name='sarcasm-detection-model') model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy']) model.summary() from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)] history = model.fit(training_padded, y_train, batch_size=256, epochs=1000, validation_split=0.1, callbacks=callbacks)
code
73080128/cell_9
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape)
code
73080128/cell_25
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf import tensorflow as tf df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(X_train) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(X_test) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) (training_padded.shape, y_train.shape, testing_padded.shape, y_test.shape) model = tf.keras.models.Sequential([layers.Embedding(vocab_size, embedding_dim, input_length=max_length), layers.Flatten(), layers.Dense(32, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='relu'), layers.Dropout(0.5), layers.Dense(1, activation='sigmoid')], name='sarcasm-detection-model') model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy']) model.summary() from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)] history = model.fit(training_padded, y_train, batch_size=256, epochs=1000, validation_split=0.1, callbacks=callbacks) epochs = history.epoch plt.plot(epochs, history.history['accuracy'], 'g', label='Training Accuracy') plt.plot(epochs, history.history['val_accuracy'], 'b', label='Validation Accuracy') plt.title('Training and Validation Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show()
code
73080128/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) df.head()
code
73080128/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import pandas as pd import tensorflow as tf import tensorflow as tf df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(X_train) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(X_test) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) (training_padded.shape, y_train.shape, testing_padded.shape, y_test.shape) model = tf.keras.models.Sequential([layers.Embedding(vocab_size, embedding_dim, input_length=max_length), layers.Flatten(), layers.Dense(32, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='relu'), layers.Dropout(0.5), layers.Dense(1, activation='sigmoid')], name='sarcasm-detection-model') model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy']) model.summary() from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)] history = model.fit(training_padded, y_train, batch_size=256, epochs=1000, validation_split=0.1, callbacks=callbacks) model.evaluate(testing_padded, y_test)
code
73080128/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) df['is_sarcastic'].value_counts()
code
73080128/cell_26
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf import tensorflow as tf df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(X_train) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(X_test) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) (training_padded.shape, y_train.shape, testing_padded.shape, y_test.shape) model = tf.keras.models.Sequential([layers.Embedding(vocab_size, embedding_dim, input_length=max_length), layers.Flatten(), layers.Dense(32, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='relu'), layers.Dropout(0.5), layers.Dense(1, activation='sigmoid')], name='sarcasm-detection-model') model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy']) model.summary() from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)] history = model.fit(training_padded, y_train, batch_size=256, epochs=1000, validation_split=0.1, callbacks=callbacks) epochs = history.epoch epochs = history.epoch plt.plot(epochs, history.history['loss'], 'g', label='Training Loss') plt.plot(epochs, history.history['val_loss'], 'b', label='Validation Loss') plt.title('Training and Validation Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show()
code
73080128/cell_7
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) sns.set_style('whitegrid') sns.countplot(x='is_sarcastic', data=df)
code
73080128/cell_17
[ "text_plain_output_1.png" ]
from tensorflow.keras import layers import tensorflow as tf import tensorflow as tf vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' model = tf.keras.models.Sequential([layers.Embedding(vocab_size, embedding_dim, input_length=max_length), layers.Flatten(), layers.Dense(32, activation='relu'), layers.Dropout(0.5), layers.Dense(10, activation='relu'), layers.Dropout(0.5), layers.Dense(1, activation='sigmoid')], name='sarcasm-detection-model') model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics=['accuracy']) model.summary()
code
73080128/cell_12
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import pandas as pd df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) from sklearn.model_selection import train_test_split X = df.headline.values y = df.is_sarcastic.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) vocab_size = 10000 max_length = 32 embedding_dim = 16 oov_token = '<oov>' padding_type = 'post' trunc_type = 'post' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(X_train) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(X_test) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) (training_padded.shape, y_train.shape, testing_padded.shape, y_test.shape)
code
73080128/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', lines=True) dfv2 = pd.read_json('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json', lines=True) df = pd.concat([df, dfv2]) df.info()
code
128019578/cell_1
[ "text_plain_output_1.png" ]
!pip install torchsummary
code
34136064/cell_33
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve import matplotlib.pyplot as plt import matplotlib.pyplot as plt best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve y_pred_prob = model.predict_proba(val_x)[:, 1] precision, recall, thresholds = precision_recall_curve(val_y, y_pred_prob) plt.plot(recall, precision, label='Random Forest') plt.xlabel('Recall') plt.ylabel('Precision') plt.plot([0, 1], [0.68837209, 0.68837209], label='Baseline') plt.legend() plt.show()
code
34136064/cell_40
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve y_pred_prob = model.predict_proba(val_x)[:, 1] precision, recall, thresholds = precision_recall_curve(val_y, y_pred_prob) df = pd.DataFrame(data={'Precision': precision[:-1], 'Recall': recall[:-1], 'Thresholds': thresholds}) df targets = df.loc[(df['Precision'] >= 1) & (df['Thresholds'] != 1)] targets best = -1 thresh_best = -1 y_test_prob = model.predict_proba(test_x)[:, 1] for target in targets.to_numpy(): true_prediction = (y_test_prob > target[2]).astype(int) score = precision_score(test_y, true_prediction) if score > best: best = score thresh_best = target[2] ypred = (model.predict_proba(test_x)[:, 1] > thresh_best).astype(int) score = accuracy_score(ypred, test_y) ypred = (model.predict_proba(val_x)[:, 1] > thresh_best).astype(int) score = accuracy_score(ypred, val_y) print('Test accuracy with threshold: %f' % (score * 100)) print('True Negatives: %d, False Positives: %d, False Negatives: %d, True Positives: %d' % tuple(confusion_matrix(val_y, ypred).ravel()))
code
34136064/cell_39
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve y_pred_prob = model.predict_proba(val_x)[:, 1] precision, recall, thresholds = precision_recall_curve(val_y, y_pred_prob) df = pd.DataFrame(data={'Precision': precision[:-1], 'Recall': recall[:-1], 'Thresholds': thresholds}) df targets = df.loc[(df['Precision'] >= 1) & (df['Thresholds'] != 1)] targets best = -1 thresh_best = -1 y_test_prob = model.predict_proba(test_x)[:, 1] for target in targets.to_numpy(): true_prediction = (y_test_prob > target[2]).astype(int) score = precision_score(test_y, true_prediction) if score > best: best = score thresh_best = target[2] ypred = (model.predict_proba(test_x)[:, 1] > thresh_best).astype(int) score = accuracy_score(ypred, test_y) print('Test accuracy with threshold: %f' % (score * 100)) print('True Negatives: %d, False Positives: %d, False Negatives: %d, True Positives: %d' % tuple(confusion_matrix(test_y, ypred).ravel()))
code
34136064/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum()
code
34136064/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) print('Test Accuracy: %f' % (score * 100))
code
34136064/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique()
code
34136064/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y)
code
34136064/cell_38
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve y_pred_prob = model.predict_proba(val_x)[:, 1] precision, recall, thresholds = precision_recall_curve(val_y, y_pred_prob) df = pd.DataFrame(data={'Precision': precision[:-1], 'Recall': recall[:-1], 'Thresholds': thresholds}) df targets = df.loc[(df['Precision'] >= 1) & (df['Thresholds'] != 1)] targets best = -1 thresh_best = -1 y_test_prob = model.predict_proba(test_x)[:, 1] for target in targets.to_numpy(): true_prediction = (y_test_prob > target[2]).astype(int) score = precision_score(test_y, true_prediction) if score > best: best = score thresh_best = target[2] print('Score for threshold %f: %f' % (target[2], score * 100)) print('Best precision score of %f achieved with threshold %f.' % (best, thresh_best))
code
34136064/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data
code
34136064/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) print('Guessing always placed accuracy: %f' % ((data['status'] == 'Placed').sum() / data['status'].count() * 100))
code
34136064/cell_35
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) df = pd.DataFrame(data={'Precision': precision[:-1], 'Recall': recall[:-1], 'Thresholds': thresholds}) df
code
34136064/cell_31
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt import matplotlib.pyplot as plt best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) from sklearn.metrics import confusion_matrix, precision_score, plot_confusion_matrix import matplotlib.pyplot as plt print('True Negatives: %d, False Positives: %d, False Negatives: %d, True Positives: %d' % tuple(confusion_matrix(test_y, pred).ravel())) print('Precision Score: %f' % (precision_score(test_y, pred) * 100)) plot_confusion_matrix(model, test_x, test_y, cmap=plt.cm.Reds) plt.title('Confusion Matrix') plt.show()
code
34136064/cell_24
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i print('The best number of estiamtors was %d with accuracy score %f' % (best_estimators, best_score * 100))
code
34136064/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data
code
34136064/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2)
code
34136064/cell_27
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score best_score = -1 best_estimators = 0 for i in range(10, 250): model = RandomForestClassifier(n_estimators=i, random_state=0) model.fit(train_x, train_y) pred = model.predict(test_x) score = accuracy_score(pred, test_y) if score > best_score: best_score = score best_estimators = i model = RandomForestClassifier(n_estimators=best_estimators, random_state=0) model.fit(train_x, train_y)
code
34136064/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) unique_vals = data.nunique() col_log = data.columns for i in range(0, len(unique_vals)): coln = str(col_log[i]) if int(unique_vals[i]) < 5 and coln != 'status': data = pd.concat([data.drop(coln, axis=1), pd.get_dummies(data[coln], prefix=coln)], axis=1) data_y = pd.DataFrame(data['status']) data_x = data.drop('status', axis=1) status_encoder = LabelEncoder() data_y = status_encoder.fit_transform(data_y) df = pd.DataFrame(data={'Precision': precision[:-1], 'Recall': recall[:-1], 'Thresholds': thresholds}) df targets = df.loc[(df['Precision'] >= 1) & (df['Thresholds'] != 1)] targets
code
34136064/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum() data.drop(['salary', 'sl_no'], axis=1, inplace=True) data.isna().sum() data.nunique() corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2) data
code
34136064/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv') data data.isna().sum()
code
50236508/cell_4
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 35 array = list(range(1, 1000)) n = 1000 print('Searching for {}'.format(element)) print('Index of {}: {}'.format(element, binary_search_recursive(array, element, 0, len(array))))
code
50236508/cell_6
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 35 array = list(range(1, 1000)) n = 1000 def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] print('Angka yang dicari : {}'.format(element)) print('Index ke {}: {}'.format(element, binary_search_recursive(array, element, 0, len(array))))
code
50236508/cell_2
[ "text_plain_output_1.png" ]
for num in range(1, 1001): if num > 0: for i in range(1000, num): if num % i == 0: break else: print(num)
code
50236508/cell_7
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 35 array = list(range(1, 1000)) n = 1000 def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] def selectionSort(array, size): for step in range(size): min_idx = step for i in range(step + 1, size): if array[i] < array[min_idx]: min_idx = i array[step], array[min_idx] = (array[min_idx], array[step]) data = [10, 5, 30, 15, 50, 6, 25] size = len(data) selectionSort(data, size) print('Menurut Selection Sort:') print(data)
code
50236508/cell_8
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 35 array = list(range(1, 1000)) n = 1000 def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] def selectionSort(array, size): for step in range(size): min_idx = step for i in range(step + 1, size): if array[i] < array[min_idx]: min_idx = i array[step], array[min_idx] = (array[min_idx], array[step]) data = [10, 5, 30, 15, 50, 6, 25] size = len(data) selectionSort(data, size) def insertionSort(array): for step in range(1, len(array)): key = array[step] j = step - 1 while j >= 0 and key < array[j]: array[j + 1] = array[j] j = j - 1 array[j + 1] = key data = [10, 5, 30, 15, 50, 6, 25] insertionSort(data) print('Menurut Insertion Sort:') print(data)
code
50236508/cell_5
[ "text_plain_output_1.png" ]
def sequentialSearch(x, array): position = 0 global iterations iterations = 0 while position < len(List): iterations += 1 if Target == List[position]: return position position += 1 return -1 if __name__ == '__main__': List = ['10', '20', '30', '40', '50', '60', '70'] Target = '50' answer = sequentialSearch(Target, List) if answer != -1: print('Target ditemukan diindex :', answer, 'melalui', iterations, 'iterasi') else: print('Target tidak ditemukan')
code
74052566/cell_42
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.model_selection import KFold from sklearn.model_selection import cross_validate from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') from sklearn.impute import SimpleImputer imput_mean = SimpleImputer(missing_values=np.NaN, strategy='mean') imput_with_zero = SimpleImputer(strategy='constant', fill_value=0) df_train['LotFrontage'] = imput_mean.fit_transform(df_train[['LotFrontage']]).astype('int') df_test['LotFrontage'] = imput_mean.transform(df_test[['LotFrontage']]).astype('int') df_train['MasVnrArea'] = imput_with_zero.fit_transform(df_train[['MasVnrArea']]).astype('int') df_test['MasVnrArea'] = imput_mean.transform(df_test[['MasVnrArea']]).astype('int') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) ordinal_categories = ['Street', 'LotShape', 'LandContour', 'LandSlope', 'ExterQual', 'CentralAir', 'PavedDrive'] nominal_categories = ['MSZoning', 'Utilities', 'LotConfig', 'BldgType', 'RoofStyle', 'KitchenQual', 'Foundation', 'Heating', 'SaleCondition', 'HeatingQC', 'ExterCond'] X = df_train.drop(['SalePrice'], axis=1) y = df_train['SalePrice'] ordinal_encoder = OrdinalEncoder() one_hot = OneHotEncoder(handle_unknown='ignore') imput_mean = SimpleImputer(strategy='mean') imput_with_zero = SimpleImputer(strategy='constant', fill_value=0) preprocessor = ColumnTransformer(transformers=[('ordinal', ordinal_encoder, ordinal_categories), ('one hot', one_hot, nominal_categories), ('mean', imput_mean, ['LotFrontage']), ('zero', imput_with_zero, ['MasVnrArea'])]) from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from sklearn.linear_model import LinearRegression model_random_regressor = Pipeline(steps=[('preprocessor', preprocessor), ('model', RandomForestRegressor(random_state=0))]) model_xgb = Pipeline(steps=[('preprocessor', preprocessor), ('model', XGBRegressor(random_state=0))]) model_linear = Pipeline(steps=[('preprocessor', preprocessor), ('model', LinearRegression())]) kfold = KFold(n_splits=5, shuffle=True, random_state=0) rf_model_result = cross_validate(model_random_regressor, X, y, cv=kfold, scoring='neg_root_mean_squared_error') xgb_result = cross_validate(model_xgb, X, y, cv=kfold, scoring='neg_root_mean_squared_error') linear_result = cross_validate(model_linear, X, y, cv=kfold, scoring='neg_root_mean_squared_error') print(f"The RMSE of Random Forest model was: {-rf_model_result['test_score'].mean()}") print(f"The RMSE of XGB model was: {-xgb_result['test_score'].mean()}") print(f"The RMSE of linear model was: {-linear_result['test_score'].mean()}")
code
74052566/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False)
code
74052566/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False)
code
74052566/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) df_train.info()
code
74052566/cell_23
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) for df in data_frames: df.drop(categorical_missing_columns, axis=1, inplace=True) print(df.shape)
code
74052566/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) print(f'Number of features that remains: {len(categorical_features_to_investigate)}')
code
74052566/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] print(f'len of numerical features: {len(numerical_features)}') print(f'len of categorical features: {len(categorical_features)}')
code
74052566/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) df_test.isnull().sum()
code
74052566/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) categorical_features_to_investigate = [col for col in categorical_features_to_investigate if col not in categorical_missing_columns] len(categorical_features) for col in categorical_features_to_investigate: print(f'{col} \n{df_train[col].unique()}')
code
74052566/cell_48
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import mutual_info_regression from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) import matplotlib.pyplot as plt import seaborn as sns def plot_count(feature): pass data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') from sklearn.impute import SimpleImputer imput_mean = SimpleImputer(missing_values=np.NaN, strategy='mean') imput_with_zero = SimpleImputer(strategy='constant', fill_value=0) df_train['LotFrontage'] = imput_mean.fit_transform(df_train[['LotFrontage']]).astype('int') df_test['LotFrontage'] = imput_mean.transform(df_test[['LotFrontage']]).astype('int') df_train['MasVnrArea'] = imput_with_zero.fit_transform(df_train[['MasVnrArea']]).astype('int') df_test['MasVnrArea'] = imput_mean.transform(df_test[['MasVnrArea']]).astype('int') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) df_test.isnull().sum() ordinal_var_dict = {'Street': ['Grvl', 'Pave'], 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg'], 'LandContour': ['Low', 'HLS', 'Bnk', 'Lvl'], 'LandSlope': ['Sev', 'Mod', 'Gtl'], 'ExterQual': ['Fa', 'TA', 'Gd', 'Ex'], 'CentralAir': ['N', 'Y'], 'PavedDrive': ['N', 'P', 'Y']} for var in ordinal_var_dict: ordered_var = pd.api.types.CategoricalDtype(categories=ordinal_var_dict[var], ordered=True) df_train[var] = df_train[var].astype(ordered_var) df_test[var] = df_test[var].astype(ordered_var) X = df_train.drop(['SalePrice'], axis=1) y = df_train['SalePrice'] for colname in X.select_dtypes(include=['object', 'category']): X[colname], _ = X[colname].factorize() discrete_features = X.dtypes == int for colname in df_test.select_dtypes(include=['object', 'category']): df_test[colname], _ = df_test[colname].factorize() from sklearn.feature_selection import mutual_info_regression mi_scores = mutual_info_regression(X, y) mi_scores = pd.Series(mi_scores, name='Mi scores', index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) def plot_mi_scores(scores): scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title('Mutual Information Scores') plt.figure(dpi=100, figsize=(12, 5)) plot_mi_scores(mi_scores[:20])
code
74052566/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) df_train[numerical_missing_columns].describe()
code
74052566/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74052566/cell_49
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import mutual_info_regression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) df_test.isnull().sum() ordinal_var_dict = {'Street': ['Grvl', 'Pave'], 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg'], 'LandContour': ['Low', 'HLS', 'Bnk', 'Lvl'], 'LandSlope': ['Sev', 'Mod', 'Gtl'], 'ExterQual': ['Fa', 'TA', 'Gd', 'Ex'], 'CentralAir': ['N', 'Y'], 'PavedDrive': ['N', 'P', 'Y']} for var in ordinal_var_dict: ordered_var = pd.api.types.CategoricalDtype(categories=ordinal_var_dict[var], ordered=True) df_train[var] = df_train[var].astype(ordered_var) df_test[var] = df_test[var].astype(ordered_var) X = df_train.drop(['SalePrice'], axis=1) y = df_train['SalePrice'] for colname in X.select_dtypes(include=['object', 'category']): X[colname], _ = X[colname].factorize() discrete_features = X.dtypes == int for colname in df_test.select_dtypes(include=['object', 'category']): df_test[colname], _ = df_test[colname].factorize() from sklearn.feature_selection import mutual_info_regression mi_scores = mutual_info_regression(X, y) mi_scores = pd.Series(mi_scores, name='Mi scores', index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) selected_mi = mi_scores[mi_scores >= 0.2].index X = X[selected_mi] X
code
74052566/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) df_test[['LotFrontage', 'MasVnrArea']].isnull().sum() df_train[['LotFrontage', 'MasVnrArea']].isnull().sum()
code
74052566/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) df_test['KitchenQual'].value_counts()
code
74052566/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) print(numerical_missing_columns)
code
74052566/cell_47
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import mutual_info_regression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) df_test.isnull().sum() ordinal_var_dict = {'Street': ['Grvl', 'Pave'], 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg'], 'LandContour': ['Low', 'HLS', 'Bnk', 'Lvl'], 'LandSlope': ['Sev', 'Mod', 'Gtl'], 'ExterQual': ['Fa', 'TA', 'Gd', 'Ex'], 'CentralAir': ['N', 'Y'], 'PavedDrive': ['N', 'P', 'Y']} for var in ordinal_var_dict: ordered_var = pd.api.types.CategoricalDtype(categories=ordinal_var_dict[var], ordered=True) df_train[var] = df_train[var].astype(ordered_var) df_test[var] = df_test[var].astype(ordered_var) X = df_train.drop(['SalePrice'], axis=1) y = df_train['SalePrice'] for colname in X.select_dtypes(include=['object', 'category']): X[colname], _ = X[colname].factorize() discrete_features = X.dtypes == int for colname in df_test.select_dtypes(include=['object', 'category']): df_test[colname], _ = df_test[colname].factorize() from sklearn.feature_selection import mutual_info_regression mi_scores = mutual_info_regression(X, y) mi_scores = pd.Series(mi_scores, name='Mi scores', index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) mi_scores[:15]
code
74052566/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape)
code
74052566/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) categorical_features_to_investigate = [col for col in categorical_features_to_investigate if col not in categorical_missing_columns] len(categorical_features)
code
74052566/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) garage_features = df_train[['YearBuilt', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageCars', 'GarageArea', 'GarageQual', 'GarageCond']] garage_features[garage_features['GarageYrBlt'].isnull()].describe()
code
74052566/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) data_frames = [df_train, df_test] for df in data_frames: df.drop('GarageYrBlt', axis=1, inplace=True) numerical_features.remove('GarageYrBlt') categorical_features_to_investigate = [col for col in categorical_features if df_train[col].nunique() <= 6] categorical_features_to_delete = [col for col in categorical_features if col not in categorical_features_to_investigate] data_frames = [df_train, df_test] for df in data_frames: df.drop(categorical_features_to_delete, axis=1, inplace=True) categorical_missing_columns = list(df_train[categorical_features_to_investigate].columns[df_train[categorical_features_to_investigate].isnull().any()]) (df_train[categorical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) list_of_suspicious = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual'] for col in list_of_suspicious: print(f'{col} \n{df_train[col].unique()}') print(f'{col} \n{df_test[col].unique()}') print('\n')
code
74052566/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') (df_train.shape, df_test.shape) numerical_features = [cols for cols in df_train if df_train[cols].dtype in ['int', 'float']] categorical_features = [cols for cols in df_train if df_train[cols].dtype == 'object'] numerical_missing_columns = list(df_train[numerical_features].columns[df_train[numerical_features].isna().any()]) (df_train[numerical_missing_columns].isnull().sum() / df_train.shape[0] * 100).sort_values(ascending=False) import matplotlib.pyplot as plt import seaborn as sns def plot_count(feature): plt.hist(feature, color='g') plt.figure(figsize=(16, 4)) for count, value in enumerate(numerical_missing_columns): plt.subplot(1, 3, count + 1) plt.title(f'{value} distribution') plot_count(df_train[value])
code
104130018/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts() data['Purchase'].skew()
code
104130018/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts()
code
104130018/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100
code
104130018/cell_25
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts() data.groupby('Gender')['Purchase'].mean() data['Gender'].value_counts(normalize=True) * 100
code
104130018/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape
code
104130018/cell_34
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts() data.groupby('Gender')['Purchase'].mean() data.groupby('Marital_Status')['Purchase'].mean() plt.figure(figsize=(12, 8)) sns.countplot(data['Occupation']) plt.show()
code
104130018/cell_23
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts() sns.countplot(data['Gender']) plt.show()
code
104130018/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv') data data.shape data.describe().T data.isnull().sum() / data.shape[0] * 100 data.nunique() data.Gender.value_counts() plt.figure(figsize=(12, 8)) sns.boxplot(x='Age', y='Purchase', data=data, hue='Gender', palette='Set3') plt.show()
code