path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
90150781/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.info() | code |
90150781/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns
df.airline.value_counts()
plt.figure(figsize=(10, 8))
plt1 = df.airline.value_counts().plot(kind='bar')
plt.title('Airline histogram', fontsize=20)
plt1.set(xlabel = 'airline', ylabel='Frequency of airline')
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.title('Source histogram')
plt1 = df['source_city'].value_counts().plot(kind='bar')
plt1.set(xlabel = 'Source city', ylabel='Frequency of source city')
plt.subplot(1,2,2)
plt.title('Destination histogram')
plt1 = df['destination_city'].value_counts().plot(kind='bar')
plt1.set(xlabel = 'Destination city', ylabel='Frequency of destination city')
plt.show()
df.departure_time.value_counts()
plt.figure(figsize=(20, 8))
plt.subplot(1, 2, 1)
plt.title('Departure time histogram')
plt1 = df.departure_time.value_counts().plot(kind='bar')
plt1.set(xlabel='Departure time', ylabel='Frequency of Departure time')
plt.subplot(1, 2, 2)
plt.title('Arrival time histogram')
plt1 = df.arrival_time.value_counts().plot(kind='bar')
plt1.set(xlabel='Arrival time', ylabel='Frequency of Arrival time')
plt.show() | code |
90150781/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df['class'].value_counts() | code |
90150781/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns
df.airline.value_counts()
plt.figure(figsize=(10, 8))
plt1 = df.airline.value_counts().plot(kind='bar')
plt.title('Airline histogram', fontsize=20)
plt1.set(xlabel='airline', ylabel='Frequency of airline') | code |
90150781/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns
df.airline.value_counts()
plt.figure(figsize=(10, 8))
plt1 = df.airline.value_counts().plot(kind='bar')
plt.title('Airline histogram', fontsize=20)
plt1.set(xlabel = 'airline', ylabel='Frequency of airline')
plt.figure(figsize=(20, 8))
plt.subplot(1, 2, 1)
plt.title('Source histogram')
plt1 = df['source_city'].value_counts().plot(kind='bar')
plt1.set(xlabel='Source city', ylabel='Frequency of source city')
plt.subplot(1, 2, 2)
plt.title('Destination histogram')
plt1 = df['destination_city'].value_counts().plot(kind='bar')
plt1.set(xlabel='Destination city', ylabel='Frequency of destination city')
plt.show() | code |
90150781/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns
df.airline.value_counts()
plt.figure(figsize=(10, 8))
plt1 = df.airline.value_counts().plot(kind='bar')
plt.title('Airline histogram', fontsize=20)
plt1.set(xlabel = 'airline', ylabel='Frequency of airline')
df.departure_time.value_counts() | code |
90150781/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns
df.airline.value_counts()
plt.figure(figsize=(10, 8))
plt1 = df.airline.value_counts().plot(kind='bar')
plt.title('Airline histogram', fontsize=20)
plt1.set(xlabel = 'airline', ylabel='Frequency of airline')
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.title('Source histogram')
plt1 = df['source_city'].value_counts().plot(kind='bar')
plt1.set(xlabel = 'Source city', ylabel='Frequency of source city')
plt.subplot(1,2,2)
plt.title('Destination histogram')
plt1 = df['destination_city'].value_counts().plot(kind='bar')
plt1.set(xlabel = 'Destination city', ylabel='Frequency of destination city')
plt.show()
df.departure_time.value_counts()
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.title('Departure time histogram')
plt1 = df.departure_time.value_counts().plot(kind='bar')
plt1.set(xlabel = 'Departure time', ylabel='Frequency of Departure time')
plt.subplot(1,2,2)
plt.title('Arrival time histogram')
plt1 = df.arrival_time.value_counts().plot(kind='bar')
plt1.set(xlabel = 'Arrival time', ylabel='Frequency of Arrival time')
plt.show()
df.stops.value_counts()
df.columns | code |
90150781/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.describe() | code |
90150781/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df.shape
df.drop('Unnamed: 0', axis=1, inplace=True)
df.isnull().sum()
df.columns | code |
90150781/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv')
df1 = pd.read_csv('/kaggle/input/flight-price-prediction/business.csv')
df2 = pd.read_csv('/kaggle/input/flight-price-prediction/economy.csv')
print(df1.shape)
print(df2.shape) | code |
105201476/cell_11 | [
"text_plain_output_1.png"
] | from fastai.tabular.all import df_shrink
from time import sleep
import gc
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
files_static = [f'/kaggle/input/cccscicandmal2020/StaticAnalysis/{f}' for f in os.listdir('/kaggle/input/cccscicandmal2020/StaticAnalysis') if f.endswith('.csv') and (not f.endswith('Riskware.csv'))]
for f in files_static:
df_static = pd.read_csv(f, sep=',', encoding='utf-8')
df_static = df_shrink(df_static)
nans = df_static.isna().sum().sort_index(ascending=False)
if nans.iloc[0] > 0:
df_static = df_static.dropna(axis=0)
label = f.split('/')[-1].split('.')[0]
if 'Ben' in label:
df_static['Label'] = 'Benign'
else:
df_static['Label'] = label
dupli = df_static.duplicated().sum()
df_static.drop_duplicates(inplace=True)
df_static.reset_index(inplace=True, drop=True)
df_static.columns = [f'F{i}' for i in range(9504)] + ['Label']
df_static.to_parquet(f'static-{label}.parquet')
df_static.drop(df_static.index[:], inplace=True)
del df_static, nans, dupli
sleep(5)
gc.collect()
files_static = [f'/kaggle/working/{f}' for f in os.listdir('/kaggle/working') if f.endswith('.parquet')]
df_static = pd.concat(objs=[pd.read_parquet(f) for f in files_static], copy=False, ignore_index=True)
df_static.to_parquet('cicandmal2020-static.parquet')
df_static.shape | code |
105201476/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
if 'StaticAnalysis' in dirname:
print(os.path.join(dirname, filename)) | code |
105201476/cell_7 | [
"text_plain_output_1.png"
] | from fastai.tabular.all import df_shrink
from time import sleep
import gc
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
files_static = [f'/kaggle/input/cccscicandmal2020/StaticAnalysis/{f}' for f in os.listdir('/kaggle/input/cccscicandmal2020/StaticAnalysis') if f.endswith('.csv') and (not f.endswith('Riskware.csv'))]
for f in files_static:
print(f)
df_static = pd.read_csv(f, sep=',', encoding='utf-8')
df_static = df_shrink(df_static)
nans = df_static.isna().sum().sort_index(ascending=False)
if nans.iloc[0] > 0:
print(f'Found N/A values in any of the columns of {f}, DROPPING')
df_static = df_static.dropna(axis=0)
label = f.split('/')[-1].split('.')[0]
if 'Ben' in label:
df_static['Label'] = 'Benign'
else:
df_static['Label'] = label
dupli = df_static.duplicated().sum()
if dupli > 0:
print(f, dupli, 'fully duplicate rows to remove')
df_static.drop_duplicates(inplace=True)
df_static.reset_index(inplace=True, drop=True)
df_static.columns = [f'F{i}' for i in range(9504)] + ['Label']
print(df_static.Label.value_counts())
df_static.to_parquet(f'static-{label}.parquet')
df_static.drop(df_static.index[:], inplace=True)
del df_static, nans, dupli
sleep(5)
gc.collect() | code |
105201476/cell_5 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | !ls -lath /kaggle/input/cccscicandmal2020/StaticAnalysis | code |
329772/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def cleanResults(raceColumns,dfResultsTemp,appendScore):
for raceCol in raceColumns:
dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2")
dfResultsTemp.index = dfResultsTemp.index.str.title()
dfResultsTemp.index = dfResultsTemp.index.str.replace('\([A-Z\ 0-9]*\)','')
dfResultsTemp.index = dfResultsTemp.index.str.strip()
dfResultsTemp.index = dfResultsTemp.index.str.replace('Riccardo Andrea Leccese','Rikki Leccese')
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str)
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('^DNF$',str(len(dfResults)+1))
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('\(|\)|UFD|DNF|RET|SCP|RDG|RCT|DCT|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*|[0-9\.]*DNC|-|\/','')
dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol])
dfResultsTemp[raceCol] = dfResultsTemp[raceCol] + appendScore
return dfResultsTemp
def mergeResults(raceColumns, raceName, dfResultsTemp, dfResults):
for raceCol in raceColumns:
raceIndex = raceName + '-' + raceCol
dfResultsTemp[raceIndex] = dfResultsTemp[raceCol]
del dfResultsTemp[raceCol]
dfResults = pd.merge(dfResults, dfResultsTemp[[raceIndex]], left_index=True, right_index=True, how='outer')
return dfResults
dfResults = pd.DataFrame()
raceName = '20160323-LaVentana-HydrofoilProTour'
raceColumns = ['Q2', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6']
dfResultsTempGold = pd.read_csv('../input/' + raceName + '-Gold.csv')
dfResultsTempGold = dfResultsTempGold.set_index(dfResultsTempGold['Name'] + ' ' + dfResultsTempGold['LastName'])
dfResultsTempGold = cleanResults(raceColumns, dfResultsTempGold, 0)
dfResultsTempSilver = pd.read_csv('../input/' + raceName + '-Silver.csv')
dfResultsTempSilver = dfResultsTempSilver.set_index(dfResultsTempSilver['Name'] + ' ' + dfResultsTempSilver['LastName'])
dfResultsTempSilver = cleanResults(raceColumns, dfResultsTempSilver, len(dfResultsTempGold))
dfResultsTemp = dfResultsTempGold.append(dfResultsTempSilver)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults) | code |
329772/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def cleanResults(raceColumns,dfResultsTemp,appendScore):
for raceCol in raceColumns:
dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2")
dfResultsTemp.index = dfResultsTemp.index.str.title()
dfResultsTemp.index = dfResultsTemp.index.str.replace('\([A-Z\ 0-9]*\)','')
dfResultsTemp.index = dfResultsTemp.index.str.strip()
dfResultsTemp.index = dfResultsTemp.index.str.replace('Riccardo Andrea Leccese','Rikki Leccese')
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str)
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('^DNF$',str(len(dfResults)+1))
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('\(|\)|UFD|DNF|RET|SCP|RDG|RCT|DCT|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*|[0-9\.]*DNC|-|\/','')
dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol])
dfResultsTemp[raceCol] = dfResultsTemp[raceCol] + appendScore
return dfResultsTemp
def mergeResults(raceColumns, raceName, dfResultsTemp, dfResults):
for raceCol in raceColumns:
raceIndex = raceName + '-' + raceCol
dfResultsTemp[raceIndex] = dfResultsTemp[raceCol]
del dfResultsTemp[raceCol]
dfResults = pd.merge(dfResults, dfResultsTemp[[raceIndex]], left_index=True, right_index=True, how='outer')
return dfResults
dfResults = pd.DataFrame()
raceName = '20160323-LaVentana-HydrofoilProTour'
raceColumns = ['Q2', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6']
dfResultsTempGold = pd.read_csv('../input/' + raceName + '-Gold.csv')
dfResultsTempGold = dfResultsTempGold.set_index(dfResultsTempGold['Name'] + ' ' + dfResultsTempGold['LastName'])
dfResultsTempGold = cleanResults(raceColumns, dfResultsTempGold, 0)
dfResultsTempSilver = pd.read_csv('../input/' + raceName + '-Silver.csv')
dfResultsTempSilver = dfResultsTempSilver.set_index(dfResultsTempSilver['Name'] + ' ' + dfResultsTempSilver['LastName'])
dfResultsTempSilver = cleanResults(raceColumns, dfResultsTempSilver, len(dfResultsTempGold))
dfResultsTemp = dfResultsTempGold.append(dfResultsTempSilver)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults)
raceName = '20160717-Gizzeria-IKAGoldCup'
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 7', 'F 8', 'F 9', 'F 10']
dfResultsTempGold = pd.read_csv('../input/' + raceName + '-Gold.csv')
dfResultsTempGold = dfResultsTempGold.set_index(dfResultsTempGold['Name'])
dfResultsTempGold = cleanResults(raceColumns, dfResultsTempGold, 0)
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 8']
dfResultsTempSilver = pd.read_csv('../input/' + raceName + '-Silver.csv')
dfResultsTempSilver = dfResultsTempSilver.set_index(dfResultsTempSilver['Name'])
dfResultsTempSilver = cleanResults(raceColumns, dfResultsTempSilver, len(dfResultsTempGold))
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6']
dfResultsTemp = dfResultsTempGold.append(dfResultsTempSilver)
dfResultsTempBronze = pd.read_csv('../input/' + raceName + '-Bronze.csv', encoding='ISO-8859-1')
dfResultsTempBronze = dfResultsTempBronze.set_index(dfResultsTempBronze['Name'])
dfResultsTempBronze = cleanResults(raceColumns, dfResultsTempBronze, len(dfResultsTemp))
dfResultsTemp = dfResultsTemp.append(dfResultsTempBronze)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults) | code |
329772/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def cleanResults(raceColumns,dfResultsTemp,appendScore):
for raceCol in raceColumns:
dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2")
dfResultsTemp.index = dfResultsTemp.index.str.title()
dfResultsTemp.index = dfResultsTemp.index.str.replace('\([A-Z\ 0-9]*\)','')
dfResultsTemp.index = dfResultsTemp.index.str.strip()
dfResultsTemp.index = dfResultsTemp.index.str.replace('Riccardo Andrea Leccese','Rikki Leccese')
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str)
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('^DNF$',str(len(dfResults)+1))
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('\(|\)|UFD|DNF|RET|SCP|RDG|RCT|DCT|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*|[0-9\.]*DNC|-|\/','')
dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol])
dfResultsTemp[raceCol] = dfResultsTemp[raceCol] + appendScore
return dfResultsTemp
def mergeResults(raceColumns, raceName, dfResultsTemp, dfResults):
for raceCol in raceColumns:
raceIndex = raceName + '-' + raceCol
dfResultsTemp[raceIndex] = dfResultsTemp[raceCol]
del dfResultsTemp[raceCol]
dfResults = pd.merge(dfResults, dfResultsTemp[[raceIndex]], left_index=True, right_index=True, how='outer')
return dfResults
dfResults = pd.DataFrame()
raceName = '20160323-LaVentana-HydrofoilProTour'
raceColumns = ['Q2', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6']
dfResultsTempGold = pd.read_csv('../input/' + raceName + '-Gold.csv')
dfResultsTempGold = dfResultsTempGold.set_index(dfResultsTempGold['Name'] + ' ' + dfResultsTempGold['LastName'])
dfResultsTempGold = cleanResults(raceColumns, dfResultsTempGold, 0)
dfResultsTempSilver = pd.read_csv('../input/' + raceName + '-Silver.csv')
dfResultsTempSilver = dfResultsTempSilver.set_index(dfResultsTempSilver['Name'] + ' ' + dfResultsTempSilver['LastName'])
dfResultsTempSilver = cleanResults(raceColumns, dfResultsTempSilver, len(dfResultsTempGold))
dfResultsTemp = dfResultsTempGold.append(dfResultsTempSilver)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults)
raceName = '20160717-Gizzeria-IKAGoldCup'
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 7', 'F 8', 'F 9', 'F 10']
dfResultsTempGold = pd.read_csv('../input/' + raceName + '-Gold.csv')
dfResultsTempGold = dfResultsTempGold.set_index(dfResultsTempGold['Name'])
dfResultsTempGold = cleanResults(raceColumns, dfResultsTempGold, 0)
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 8']
dfResultsTempSilver = pd.read_csv('../input/' + raceName + '-Silver.csv')
dfResultsTempSilver = dfResultsTempSilver.set_index(dfResultsTempSilver['Name'])
dfResultsTempSilver = cleanResults(raceColumns, dfResultsTempSilver, len(dfResultsTempGold))
raceColumns = ['CF 2', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6']
dfResultsTemp = dfResultsTempGold.append(dfResultsTempSilver)
dfResultsTempBronze = pd.read_csv('../input/' + raceName + '-Bronze.csv', encoding='ISO-8859-1')
dfResultsTempBronze = dfResultsTempBronze.set_index(dfResultsTempBronze['Name'])
dfResultsTempBronze = cleanResults(raceColumns, dfResultsTempBronze, len(dfResultsTemp))
dfResultsTemp = dfResultsTemp.append(dfResultsTempBronze)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults)
raceName = '20160807-SanFracisco-HydrofoilProTour'
dfResultsTemp = pd.read_csv('../input/' + raceName + '.csv')
dfResultsTemp = dfResultsTemp.set_index(dfResultsTemp['Name'])
raceColumns = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16']
dfResultsTemp = cleanResults(raceColumns, dfResultsTemp, 0)
dfResults = mergeResults(raceColumns, raceName, dfResultsTemp, dfResults) | code |
329772/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def cleanResults(raceColumns,dfResultsTemp,appendScore):
for raceCol in raceColumns:
dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2")
dfResultsTemp.index = dfResultsTemp.index.str.title()
dfResultsTemp.index = dfResultsTemp.index.str.replace('\([A-Z\ 0-9]*\)','')
dfResultsTemp.index = dfResultsTemp.index.str.strip()
dfResultsTemp.index = dfResultsTemp.index.str.replace('Riccardo Andrea Leccese','Rikki Leccese')
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str)
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('^DNF$',str(len(dfResults)+1))
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('\(|\)|UFD|DNF|RET|SCP|RDG|RCT|DCT|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*|[0-9\.]*DNC|-|\/','')
dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol])
dfResultsTemp[raceCol] = dfResultsTemp[raceCol] + appendScore
return dfResultsTemp
def mergeResults(raceColumns, raceName, dfResultsTemp, dfResults):
for raceCol in raceColumns:
raceIndex = raceName + '-' + raceCol
dfResultsTemp[raceIndex] = dfResultsTemp[raceCol]
del dfResultsTemp[raceCol]
dfResults = pd.merge(dfResults, dfResultsTemp[[raceIndex]], left_index=True, right_index=True, how='outer')
return dfResults
dfResults = pd.DataFrame() | code |
33098146/cell_4 | [
"text_html_output_1.png"
] | import os
import pandas as pd
base_path = '/kaggle'
if os.path.exists(base_path):
input_path = os.path.join(base_path, 'input', 'nlp-getting-started')
output_path = os.path.join(base_path, 'working')
else:
base_path = 'data'
input_path = base_path
output_path = os.path.join(base_path, 'submissions')
train_file = os.path.join(input_path, 'train.csv')
test_file = os.path.join(input_path, 'test.csv')
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
train_df.head() | code |
33098146/cell_1 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | # Upgrade packages for work with new Pandas version
!pip install --upgrade pandas-profiling
!pip install --upgrade hypertools
!pip install --upgrade pandas | code |
90109387/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
train_data.describe() | code |
90109387/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
train_data['target'].value_counts().plot(kind='bar', color='red') | code |
90109387/cell_34 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns
for i in range(train_data[num_columns].shape[1]):
plt.figure()
plt.hist(train_data[num_columns].iloc[:, i])
plt.xlabel(train_data[num_columns].columns[i])
plt.ylabel('frequency') | code |
90109387/cell_44 | [
"image_output_11.png",
"image_output_17.png",
"image_output_14.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png"
] | from sklearn.decomposition import PCA
import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
pca = PCA(n_components=1)
cont1_2 = pca.fit_transform(all_data[['cont1', 'cont2']])
print(pca.explained_variance_ratio_)
cont1_2 | code |
90109387/cell_55 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
pca = PCA(n_components=1)
cont1_2 = pca.fit_transform(all_data[['cont1', 'cont2']])
cont1_2
all_data['cont1_2'] = cont1_2
all_data.drop('cont1', axis=1, inplace=True)
all_data.drop('cont2', axis=1, inplace=True)
pca1 = PCA(n_components=1)
cont0_10 = pca1.fit_transform(all_data[['cont0', 'cont10']])
cont0_10
all_data['cont0_10'] = cont0_10
all_data.drop('cont10', axis=1, inplace=True)
all_data.drop('cont0', axis=1, inplace=True)
all_data = all_data[['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14', 'cat15', 'cat16', 'cat17', 'cat18', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont1_2', 'cont0_10', 'target']]
train_data = all_data.iloc[0:300000, :]
test_data = all_data.iloc[300000:, :].drop(['target'], axis=1)
all_data.head() | code |
90109387/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
test_data | code |
90109387/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
plt.figure(figsize=(10, 10))
sns.heatmap(corr_matrix, xticklabels=corr_matrix.columns.values, yticklabels=corr_matrix.columns.values, annot=True) | code |
90109387/cell_41 | [
"image_output_11.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
corr_matrix[corr_matrix > 0.8][corr_matrix != 1].fillna('OK') | code |
90109387/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
train_data.shape | code |
90109387/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
len(train_data['cat10'].unique()) | code |
90109387/cell_50 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns
#thanks to @ANDRESHG
num_rows, num_cols = len(num_columns),2
f, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(15, 15))
f.suptitle('Distribution of Features', fontsize=16)
for index, column in enumerate(num_columns):
i,j = (index // num_cols, index % num_cols)
sns.kdeplot(train_data.loc[train_data['target'] == 0, column], color="r", shade=True, ax=axes[index,0])
sns.kdeplot(train_data.loc[train_data['target'] == 1, column], color="g", shade=True, ax=axes[index,0])
sns.histplot(data=train_data,x=column,hue='target', kde=False, palette='Paired_r', bins=10, ax=axes[index,1],multiple='stack')
pca = PCA(n_components=1)
cont1_2 = pca.fit_transform(all_data[['cont1', 'cont2']])
cont1_2
all_data['cont1_2'] = cont1_2
all_data.drop('cont1', axis=1, inplace=True)
all_data.drop('cont2', axis=1, inplace=True)
pca1 = PCA(n_components=1)
cont0_10 = pca1.fit_transform(all_data[['cont0', 'cont10']])
cont0_10
all_data['cont0_10'] = cont0_10
all_data.drop('cont10', axis=1, inplace=True)
all_data.drop('cont0', axis=1, inplace=True)
all_data = all_data[['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14', 'cat15', 'cat16', 'cat17', 'cat18', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont1_2', 'cont0_10', 'target']]
train_data = all_data.iloc[0:300000, :]
test_data = all_data.iloc[300000:, :].drop(['target'], axis=1)
train_data.head() | code |
90109387/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data | code |
90109387/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns | code |
90109387/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum() | code |
90109387/cell_38 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns
num_rows, num_cols = (len(num_columns), 2)
f, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(15, 15))
f.suptitle('Distribution of Features', fontsize=16)
for index, column in enumerate(num_columns):
i, j = (index // num_cols, index % num_cols)
sns.kdeplot(train_data.loc[train_data['target'] == 0, column], color='r', shade=True, ax=axes[index, 0])
sns.kdeplot(train_data.loc[train_data['target'] == 1, column], color='g', shade=True, ax=axes[index, 0])
sns.histplot(data=train_data, x=column, hue='target', kde=False, palette='Paired_r', bins=10, ax=axes[index, 1], multiple='stack') | code |
90109387/cell_3 | [
"text_plain_output_1.png"
] | import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt | code |
90109387/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
for i in range(18):
print('category{}'.format(i), train_data['cat{}'.format(i)].unique(), '\n') | code |
90109387/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns | code |
90109387/cell_46 | [
"image_output_1.png"
] | from sklearn.decomposition import PCA
import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
pca = PCA(n_components=1)
cont1_2 = pca.fit_transform(all_data[['cont1', 'cont2']])
cont1_2
all_data['cont1_2'] = cont1_2
all_data.drop('cont1', axis=1, inplace=True)
all_data.drop('cont2', axis=1, inplace=True)
pca1 = PCA(n_components=1)
cont0_10 = pca1.fit_transform(all_data[['cont0', 'cont10']])
print(pca1.explained_variance_ratio_)
cont0_10 | code |
90109387/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.info() | code |
90109387/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
train_data['target'].value_counts() | code |
90109387/cell_53 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns
#thanks to @ANDRESHG
num_rows, num_cols = len(num_columns),2
f, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(15, 15))
f.suptitle('Distribution of Features', fontsize=16)
for index, column in enumerate(num_columns):
i,j = (index // num_cols, index % num_cols)
sns.kdeplot(train_data.loc[train_data['target'] == 0, column], color="r", shade=True, ax=axes[index,0])
sns.kdeplot(train_data.loc[train_data['target'] == 1, column], color="g", shade=True, ax=axes[index,0])
sns.histplot(data=train_data,x=column,hue='target', kde=False, palette='Paired_r', bins=10, ax=axes[index,1],multiple='stack')
pca = PCA(n_components=1)
cont1_2 = pca.fit_transform(all_data[['cont1', 'cont2']])
cont1_2
all_data['cont1_2'] = cont1_2
all_data.drop('cont1', axis=1, inplace=True)
all_data.drop('cont2', axis=1, inplace=True)
pca1 = PCA(n_components=1)
cont0_10 = pca1.fit_transform(all_data[['cont0', 'cont10']])
cont0_10
all_data['cont0_10'] = cont0_10
all_data.drop('cont10', axis=1, inplace=True)
all_data.drop('cont0', axis=1, inplace=True)
all_data = all_data[['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14', 'cat15', 'cat16', 'cat17', 'cat18', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont1_2', 'cont0_10', 'target']]
train_data = all_data.iloc[0:300000, :]
test_data = all_data.iloc[300000:, :].drop(['target'], axis=1)
corr_matrix_new = train_data.corr()
corr_matrix_new[corr_matrix_new > 0.8][corr_matrix != 1].fillna('OK') | code |
90109387/cell_27 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
correlation_with_target = corr_matrix['target']
correlation_with_target.abs().sort_values(ascending=False) | code |
90109387/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
train_data.shape
train_data.head() | code |
90109387/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
train_data | code |
90109387/cell_36 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/tabular-playground-series-mar-2021/train.csv')
test_data = pd.read_csv('../input/tabular-playground-series-mar-2021/test.csv')
all_data = pd.concat([train_data, test_data])
all_data
train_data.shape
all_data.drop('id', axis=1, inplace=True)
train_data.drop('id', axis=1, inplace=True)
test_data.drop('id', axis=1, inplace=True)
train_data.isnull().sum()
corr_matrix = train_data.corr()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_columns = [col for col in train_data.columns if train_data[col].dtype in numerics and col != 'target']
num_columns
cat_columns = [col for col in train_data.columns if train_data[col].dtype not in numerics]
cat_columns
for i in range(train_data[cat_columns].shape[1]):
plt.figure()
plt.hist(train_data[cat_columns].iloc[:, i])
plt.xlabel(train_data[cat_columns].columns[i])
plt.ylabel('frequency') | code |
1004405/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import re
import re
import nltk
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
def description_to_wordlist(description, remove_stopwords=True):
description_text = re.sub('[^a-zA-Z]', ' ', description)
words = review_text.lower().split()
if remove_stopwords:
stops = set(stopwords.words('english'))
words = [w for w in words if not w in stops]
b = []
stemmer = english_stemmer
for word in words:
b.append(stemmer.stem(word))
return b
description_low = []
for description in X_train_low['description']:
description_low.append(' '.join(description_to_wordlist(review)))
description_med = []
for description in X_train_med['description']:
description_med.append(' '.join(description_to_wordlist(review)))
description_high = []
for description in X_train_high['description']:
description_high.append(' '.join(description_to_wordlist(review))) | code |
1004405/cell_13 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(df['building_id']) | code |
1004405/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df.columns
num_feats = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price', 'num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'building_id', 'price_per_bedroom', 'price_per_bathroom']
X = df[num_feats]
y = df['interest_level']
X.head() | code |
1004405/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred) | code |
1004405/cell_33 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
from sklearn.neighbors import KNeighborsClassifier
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X_train, y_train)
y_val_pred = neigh.predict_proba(X_val)
log_loss(y_val, y_val_pred) | code |
1004405/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
print(df.shape) | code |
1004405/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1004405/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df['street_address'].value_counts().plot(kind='hist', bins=50) | code |
1004405/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df.head() | code |
1004405/cell_32 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred) | code |
1004405/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred) | code |
1004405/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
print(df.shape) | code |
1004405/cell_15 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(df['building_id'])
df['building_id'] = le.fit_transform(df['building_id'])
df['building_id'].head() | code |
1004405/cell_35 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
df['price_per_bedroom'] = df['bedrooms'] / df['price']
df['price_per_bathroom'] = df['bathrooms'] / df['price']
df.columns
num_feats = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price', 'num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'building_id', 'price_per_bedroom', 'price_per_bathroom']
X = df[num_feats]
y = df['interest_level']
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
df = pd.read_json(open('../input/test.json', 'r'))
print(df.shape)
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
X = df[num_feats]
y = clf.predict_proba(X) | code |
1004405/cell_31 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred) | code |
1004405/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df.columns | code |
1004405/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
print(df['building_id'].value_counts().nlargest(50)) | code |
1004405/cell_37 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
df['price_per_bedroom'] = df['bedrooms'] / df['price']
df['price_per_bathroom'] = df['bathrooms'] / df['price']
df.columns
num_feats = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price', 'num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'building_id', 'price_per_bedroom', 'price_per_bathroom']
X = df[num_feats]
y = df['interest_level']
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
df = pd.read_json(open('../input/test.json', 'r'))
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
X = df[num_feats]
y = clf.predict_proba(X)
labels2idx = {label: i for i, label in enumerate(clf.classes_)}
labels2idx
sub = pd.DataFrame()
sub['listing_id'] = df['listing_id']
for label in ['high', 'medium', 'low']:
sub[label] = y[:, labels2idx[label]]
sub.to_csv('submission_rf.csv', index=False) | code |
1004405/cell_36 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_json(open('../input/train.json', 'r'))
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
df['price_per_bedroom'] = df['bedrooms'] / df['price']
df['price_per_bathroom'] = df['bathrooms'] / df['price']
df.columns
num_feats = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price', 'num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'building_id', 'price_per_bedroom', 'price_per_bathroom']
X = df[num_feats]
y = df['interest_level']
clf = RandomForestClassifier(n_estimators=1500)
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import BaggingClassifier
b1 = BaggingClassifier(n_estimators=2000)
b1.fit(X_train, y_train)
y_val_pred = b1.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_val_pred = gbc.predict_proba(X_val)
log_loss(y_val, y_val_pred)
from sklearn import svm
clf = svm.SVC()
clf.fit(X_train, y_train)
y_val_pred = clf.predict_proba(X_val)
log_loss(y_val, y_val_pred)
df = pd.read_json(open('../input/test.json', 'r'))
df['num_photos'] = df['photos'].apply(len)
df['num_features'] = df['features'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
df['created'] = pd.to_datetime(df['created'])
df['created_year'] = df['created'].dt.year
df['created_month'] = df['created'].dt.month
df['created_day'] = df['created'].dt.day
X = df[num_feats]
y = clf.predict_proba(X)
labels2idx = {label: i for i, label in enumerate(clf.classes_)}
labels2idx | code |
128027861/cell_42 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
sorted_idx = clf_rf.feature_importances_.argsort()
plt.barh(scaled_df.iloc[:, :-1].columns[sorted_idx], clf_rf.feature_importances_[sorted_idx])
scaled_df.label.value_counts()
scaled_df_0 = scaled_df[scaled_df.label == 0].sample(n=8000)
scaled_df_new = pd.concat([scaled_df[scaled_df.label == 1], scaled_df_0], ignore_index=True)
scaled_df_new = scaled_df_new.sample(frac=1).reset_index(drop=True)
scaled_df_new | code |
128027861/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
df_final | code |
128027861/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from yellowbrick.classifier import ConfusionMatrix
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_train, y_train)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_test, y_test) | code |
128027861/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
print(f'Accuracy, precision and f1-score for training data are {acc}, {pre} and {f1} respectively') | code |
128027861/cell_20 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
plt.figure(figsize=(10, 7))
sns.heatmap(corr) | code |
128027861/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
print(f'Accuracy, precision and f1-score for training data are {acc}, {pre} and {f1} respectively') | code |
128027861/cell_2 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14}) | code |
128027861/cell_45 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
sorted_idx = clf_rf.feature_importances_.argsort()
plt.barh(scaled_df.iloc[:, :-1].columns[sorted_idx], clf_rf.feature_importances_[sorted_idx])
clf_rf = RandomForestClassifier(n_estimators=50, max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
recall = round(recall_score(y_test, y_pred_test), 2)
specificity = round(recall_score(y_test, y_pred_test, pos_label=0), 2)
print(f'Accuracy, precision, recall and f1-score for training data are {acc}, {pre}, {recall}, {specificity} and {f1} respectively') | code |
128027861/cell_18 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
scaled_df | code |
128027861/cell_38 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
sorted_idx = clf_rf.feature_importances_.argsort()
plt.barh(scaled_df.iloc[:, :-1].columns[sorted_idx], clf_rf.feature_importances_[sorted_idx])
scaled_df.label.value_counts() | code |
128027861/cell_35 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
plt.figure(figsize=(10, 5))
sorted_idx = clf_rf.feature_importances_.argsort()
plt.barh(scaled_df.iloc[:, :-1].columns[sorted_idx], clf_rf.feature_importances_[sorted_idx])
plt.xlabel('Random Forest Feature Importance') | code |
128027861/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from yellowbrick.classifier import ConfusionMatrix
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_train, y_train) | code |
128027861/cell_46 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from yellowbrick.classifier import ConfusionMatrix
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size': 14})
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df_final = df
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore')
encoder_df = pd.DataFrame(encoder.fit_transform(df[['sales_channel']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'Internet', 1: 'Mobile'})
df_final = df_final.join(encoder_df)
encoder_df = pd.DataFrame(encoder.fit_transform(df[['trip_type']]).toarray())
encoder_df = encoder_df.rename(columns={0: 'RoundTRip', 1: 'OneWayTrip', 2: 'CircleTrip'})
df_final = df_final.join(encoder_df)
df_final.drop(['sales_channel', 'trip_type', 'booking_origin', 'route'], axis=1, inplace=True)
df_final = df_final.drop('booking_complete', axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df_final)
scaled_df = pd.DataFrame(scaled_df, columns=df_final.columns)
corr = scaled_df.corr()
from sklearn.model_selection import train_test_split
X = scaled_df.iloc[:, :-1]
y = scaled_df['label']
X_train, X_test, y_train, y_test = train_test_split(X.to_numpy(), y.to_numpy(), test_size=0.2, random_state=42)
"""
Create functions to fit and predict the values of whether customer would complete the booking.
Also functions with metrics to evaluate the model prediction.
"""
def model_fit_predict(model, X, y, X_predict):
model.fit(X, y)
return model.predict(X_predict)
def acc_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def pre_score(y_true, y_pred):
return precision_score(y_true, y_pred)
def f_score(y_true, y_pred):
return f1_score(y_true, y_pred)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_train = model_fit_predict(clf_rf, X_train, y_train, X_train)
set(y_pred_train)
f1 = round(f1_score(y_train, y_pred_train), 2)
acc = round(accuracy_score(y_train, y_pred_train), 2)
pre = round(precision_score(y_train, y_pred_train), 2)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_train, y_train)
clf_rf = RandomForestClassifier(max_depth=50, min_samples_split=5, random_state=0)
y_pred_test = model_fit_predict(clf_rf, X_train, y_train, X_test)
f1 = round(f1_score(y_test, y_pred_test), 2)
acc = round(accuracy_score(y_test, y_pred_test), 2)
pre = round(precision_score(y_test, y_pred_test), 2)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
sorted_idx = clf_rf.feature_importances_.argsort()
plt.barh(scaled_df.iloc[:, :-1].columns[sorted_idx], clf_rf.feature_importances_[sorted_idx])
clf_rf = RandomForestClassifier(n_estimators=50, max_depth=50, min_samples_split=5, random_state=0)
cm = ConfusionMatrix(clf_rf, classes=[0, 1])
cm.fit(X_train, y_train)
cm.score(X_test, y_test) | code |
128027861/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/airways-customer-data/filtered_customer_booking.csv', index_col=0)
df = df.reset_index(drop=True)
df | code |
17109112/cell_21 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torchvision
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_classes = dsets['valid'].classes
dset_classes
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
inputs_try.shape
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0, 1)
plt.pause(0.001)
out = torchvision.utils.make_grid(inputs_try)
inputs, classes = next(iter(load_train))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
inputs, classes = next(iter(load_test))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
imshow(out, title=[dset_classes[x] for x in classes[0:n_images]]) | code |
17109112/cell_4 | [
"image_output_1.png"
] | import sys
import sys
sys.version | code |
17109112/cell_34 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torchvision
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_classes = dsets['valid'].classes
dset_classes
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
inputs_try.shape
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0, 1)
plt.pause(0.001)
out = torchvision.utils.make_grid(inputs_try)
inputs, classes = next(iter(load_train))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
inputs, classes = next(iter(load_test))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
out = torchvision.utils.make_grid(inputs_try.data.cpu())
imshow(out, title=[dset_classes[x] for x in labels_try.data.cpu()]) | code |
17109112/cell_23 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
model_vgg = models.vgg16(pretrained=True) | code |
17109112/cell_33 | [
"text_plain_output_1.png"
] | import json
import json
fpath = '../input/imagenet-class-index/imagenet_class_index.json'
with open(fpath) as f:
class_dict = json.load(f)
dic_imagenet = [class_dict[str(i)][1] for i in range(len(class_dict))]
print([dic_imagenet[i] for i in preds_try.data]) | code |
17109112/cell_20 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torchvision
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_classes = dsets['valid'].classes
dset_classes
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
inputs_try.shape
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0, 1)
plt.pause(0.001)
out = torchvision.utils.make_grid(inputs_try)
inputs, classes = next(iter(load_train))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
imshow(out, title=[dset_classes[x] for x in classes[0:n_images]]) | code |
17109112/cell_29 | [
"image_output_1.png"
] | from torchvision import models, transforms, datasets
import torch
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
inputs_try.shape
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
outputs_try = model_vgg(inputs_try)
outputs_try.shape | code |
17109112/cell_39 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import os
import torch
import torch.nn as nn
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
inputs_try.shape
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
outputs_try = model_vgg(inputs_try)
outputs_try.shape
m_softm = nn.Softmax(dim=1)
probs = m_softm(outputs_try)
vals_try, preds_try = torch.max(probs, dim=1)
torch.sum(probs, 1)
for param in model_vgg.parameters():
param.requires_grad = False
model_vgg.classifier._modules['6'] = nn.Linear(4096, 2)
model_vgg.classifier._modules['7'] = torch.nn.LogSoftmax(dim=1)
print(model_vgg.classifier) | code |
17109112/cell_48 | [
"text_plain_output_1.png"
] | predictions, all_proba, all_classes = test_model(model_vgg, load_test, size=dset_sizes['valid']) | code |
17109112/cell_11 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import os
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'valid']}
dset_sizes | code |
17109112/cell_19 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import matplotlib.pyplot as plt
import numpy as np
import os
import torchvision
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_classes = dsets['valid'].classes
dset_classes
inputs_try.shape
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0, 1)
plt.pause(0.001)
out = torchvision.utils.make_grid(inputs_try)
imshow(out, title=[dset_classes[x] for x in labels_try]) | code |
17109112/cell_7 | [
"text_plain_output_1.png"
] | import os
data_dir = '../input/dogscats/dogscats/dogscats/'
print(os.listdir('../input/dogscats/dogscats/dogscats/')) | code |
17109112/cell_49 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torch.nn as nn
import torchvision
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
dset_classes = dsets['valid'].classes
dset_classes
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
inputs_try.shape
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0, 1)
plt.pause(0.001)
out = torchvision.utils.make_grid(inputs_try)
inputs, classes = next(iter(load_train))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
inputs, classes = next(iter(load_test))
n_images = 8
out = torchvision.utils.make_grid(inputs[0:n_images])
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
outputs_try = model_vgg(inputs_try)
outputs_try.shape
m_softm = nn.Softmax(dim=1)
probs = m_softm(outputs_try)
vals_try, preds_try = torch.max(probs, dim=1)
torch.sum(probs, 1)
out = torchvision.utils.make_grid(inputs_try.data.cpu())
for param in model_vgg.parameters():
param.requires_grad = False
model_vgg.classifier._modules['6'] = nn.Linear(4096, 2)
model_vgg.classifier._modules['7'] = torch.nn.LogSoftmax(dim=1)
model_vgg = model_vgg.to(device)
criterion = nn.NLLLoss()
lr = 0.001
optimizer_vgg = torch.optim.SGD(model_vgg.classifier[6].parameters(), lr=lr)
def train_model(model, dataloader, size, epochs=1, optimizer=None):
model.train()
for epoch in range(epochs):
running_loss = 0.0
running_corrects = 0
for inputs, classes in dataloader:
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
loss = criterion(outputs, classes)
optimizer = optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, preds = torch.max(outputs.data, 1)
running_loss += loss.data.item()
running_corrects += torch.sum(preds == classes.data)
epoch_loss = running_loss / size
epoch_acc = running_corrects.data.item() / size
def test_model(model, dataloader, size):
model.eval()
predictions = np.zeros(size)
all_classes = np.zeros(size)
all_proba = np.zeros((size, 2))
i = 0
running_loss = 0.0
running_corrects = 0
for inputs, classes in dataloader:
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
loss = criterion(outputs, classes)
_, preds = torch.max(outputs.data, 1)
running_loss += loss.data.item()
running_corrects += torch.sum(preds == classes.data)
predictions[i:i + len(classes)] = preds.to('cpu').numpy()
all_classes[i:i + len(classes)] = classes.to('cpu').numpy()
all_proba[i:i + len(classes)] = outputs.data.to('cpu').numpy()
i += len(classes)
epoch_loss = running_loss / size
epoch_acc = running_corrects.data.item() / size
return (predictions, all_proba, all_classes)
inputs, classes = next(iter(load_test))
out = torchvision.utils.make_grid(inputs[0:n_images])
imshow(out, title=[dset_classes[x] for x in classes[0:n_images]]) | code |
17109112/cell_32 | [
"text_plain_output_1.png"
] | vals_try | code |
17109112/cell_28 | [
"image_output_1.png"
] | from torchvision import models, transforms, datasets
import torch
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
inputs_try.shape
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
outputs_try = model_vgg(inputs_try)
outputs_try | code |
17109112/cell_15 | [
"text_plain_output_1.png"
] | from torchvision import models, transforms, datasets
import os
import torch
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
count = 1
for data in load_test:
print(count, end=',')
if count == 1:
inputs_try, labels_try = data
count += 1 | code |
17109112/cell_16 | [
"text_plain_output_1.png"
] | labels_try | code |
17109112/cell_3 | [
"image_output_1.png"
] | import torch
torch.__version__ | code |
17109112/cell_17 | [
"text_plain_output_1.png"
] | inputs_try.shape | code |
17109112/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from torchvision import models, transforms, datasets
import os
import torch
import torch.nn as nn
torch.__version__
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '../input/dogscats/dogscats/dogscats/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize])
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']}
load_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
load_test = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=True, num_workers=6)
inputs_try.shape
model_vgg = models.vgg16(pretrained=True)
inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device))
model_vgg = model_vgg.to(device)
outputs_try = model_vgg(inputs_try)
outputs_try.shape
m_softm = nn.Softmax(dim=1)
probs = m_softm(outputs_try)
vals_try, preds_try = torch.max(probs, dim=1)
torch.sum(probs, 1) | code |
17109112/cell_46 | [
"text_plain_output_1.png"
] | train_model(model_vgg, load_train, size=dset_sizes['train'], epochs=2, optimizer=optimizer_vgg) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.