path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
32065703/cell_8 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
freq_all = nltk.FreqDist(key_words)
freq_all.plot(25, cumulative=False) | code |
32065703/cell_14 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
def transformations(sentences):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in sentences.split():
temp = word_tokenize(word.lower())
for txt in temp:
txt = lemmatizer.lemmatize(txt)
if txt not in stop_words:
key_words.append(txt)
return key_words
Label_df = pd.DataFrame(columns=['Task_text'], data=['What has been published about medical care?', ' What has been published concerning surge capacity and nursing homes?', 'What has been published concerning efforts to inform allocation of scarce resources?', 'What do we know about personal protective equipment?', 'What has been published concerning alternative methods to advise on disease management?', 'What has been published concerning processes of care?', 'What do we know about the clinical characterization and management of the virus?', 'Resources to support skilled nursing facilities and long term care facilities.', 'Mobilization of surge medical staff to address shortages in overwhelmed communities Age-adjusted mortality data for Acute Respiratory Distress Syndrome (ARDS) with/without other organ failure – particularly for viral etiologies', 'Extracorporeal membrane oxygenation (ECMO) outcomes data of COVID-19 patients Outcomes data for COVID-19 after mechanical ventilation adjusted for age.', 'Knowledge of the frequency, manifestations, and course of extrapulmonary manifestations of COVID-19, including, but not limited to, possible cardiomyopathy and cardiac arrest.', 'Application of regulatory standards (e.g., EUA, CLIA) and ability to adapt care to crisis standards of care level.', 'Approaches for encouraging and facilitating the production of elastomeric respirators, which can save thousands of N95 masks. Best telemedicine practices, barriers and faciitators, and specific actions to remove/expand them within and across state boundaries. Guidance on the simple things people can do at home to take care of sick people and manage disease. Oral medications that might potentially work.', 'Use of AI in real-time health care delivery to evaluate interventions, risk factors, and outcomes in a way that could not be done manually. Best practices and critical challenges and innovative solutions and technologies in hospital flow and organization, workforce protection, workforce allocation, community-based support resources, payment, and supply chain management to enhance capacity, efficiency, and outcomes. Efforts to define the natural history of disease to inform clinical care, public health interventions, infection prevention control, transmission, and clinical trials Efforts to develop a core clinical outcome set to maximize usability of data across a range of trials Efforts to determine adjunctive and supportive interventions that can improve the clinical outcomes of infected patients (e.g. steroids, high flow oxygen)'])
Label_df['Bag_of_words'] = Label_df['Task_text'].apply(lambda x: transformations(x))
Label_df
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_with_pid = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_with_pid.drop_duplicates(['abstract'], inplace=True)
metadata_with_pid.dropna(subset=['abstract'], inplace=True)
metadata_with_pid.drop(columns=['WHO #Covidence', 'journal', 'authors', 'full_text_file', 'license'])
metadata_with_pid.shape | code |
32065703/cell_22 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
def transformations(sentences):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in sentences.split():
temp = word_tokenize(word.lower())
for txt in temp:
txt = lemmatizer.lemmatize(txt)
if txt not in stop_words:
key_words.append(txt)
return key_words
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
Label_df = pd.DataFrame(columns=['Task_text'], data=['What has been published about medical care?', ' What has been published concerning surge capacity and nursing homes?', 'What has been published concerning efforts to inform allocation of scarce resources?', 'What do we know about personal protective equipment?', 'What has been published concerning alternative methods to advise on disease management?', 'What has been published concerning processes of care?', 'What do we know about the clinical characterization and management of the virus?', 'Resources to support skilled nursing facilities and long term care facilities.', 'Mobilization of surge medical staff to address shortages in overwhelmed communities Age-adjusted mortality data for Acute Respiratory Distress Syndrome (ARDS) with/without other organ failure – particularly for viral etiologies', 'Extracorporeal membrane oxygenation (ECMO) outcomes data of COVID-19 patients Outcomes data for COVID-19 after mechanical ventilation adjusted for age.', 'Knowledge of the frequency, manifestations, and course of extrapulmonary manifestations of COVID-19, including, but not limited to, possible cardiomyopathy and cardiac arrest.', 'Application of regulatory standards (e.g., EUA, CLIA) and ability to adapt care to crisis standards of care level.', 'Approaches for encouraging and facilitating the production of elastomeric respirators, which can save thousands of N95 masks. Best telemedicine practices, barriers and faciitators, and specific actions to remove/expand them within and across state boundaries. Guidance on the simple things people can do at home to take care of sick people and manage disease. Oral medications that might potentially work.', 'Use of AI in real-time health care delivery to evaluate interventions, risk factors, and outcomes in a way that could not be done manually. Best practices and critical challenges and innovative solutions and technologies in hospital flow and organization, workforce protection, workforce allocation, community-based support resources, payment, and supply chain management to enhance capacity, efficiency, and outcomes. Efforts to define the natural history of disease to inform clinical care, public health interventions, infection prevention control, transmission, and clinical trials Efforts to develop a core clinical outcome set to maximize usability of data across a range of trials Efforts to determine adjunctive and supportive interventions that can improve the clinical outcomes of infected patients (e.g. steroids, high flow oxygen)'])
Label_df['Bag_of_words'] = Label_df['Task_text'].apply(lambda x: transformations(x))
Label_df
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_with_pid = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_with_pid.drop_duplicates(['abstract'], inplace=True)
metadata_with_pid.dropna(subset=['abstract'], inplace=True)
metadata_with_pid.drop(columns=['WHO #Covidence', 'journal', 'authors', 'full_text_file', 'license'])
metadata_with_pid.shape
for pid in range(metadata_with_pid.shape[0]):
try:
if metadata_with_pid.loc[pid, 'sha'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'sha']
elif metadata_with_pid.loc[pid, 'pmcid'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'pmcid']
except:
metadata_with_pid.loc[pid, 'paper_id'] = ''
metadata_with_pid
metadata_with_pid.dropna(subset=['sha', 'pmcid'], how='all')
metadata_with_pid[:200]
dict_ = {'paper_id': [], 'doi': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'journal': [], 'abstract_summary': []}
for idx, entry in enumerate(all_json):
try:
content = FileReader(entry)
except Exception as e:
continue
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['abstract'].append(content.abstract)
dict_['paper_id'].append(content.paper_id)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append(get_breaks('. '.join(authors), 40))
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
dict_['doi'].append(meta_data['doi'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'doi', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary'])
df_covid = pd.read_csv('/kaggle/input/cosine-df/cosine_df.csv', index_col=0)
sort_by_q1 = df_covid.sort_values('Q1cosine_similarity', ascending=False)
sort_by_q2 = df_covid.sort_values('Q2cosine_similarity', ascending=False)
sort_by_q2.loc[:, ['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary', 'Q2cosine_similarity']].head(n=10) | code |
32065703/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape | code |
104116798/cell_21 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0) | code |
104116798/cell_25 | [
"text_plain_output_1.png"
] | help(open) | code |
104116798/cell_4 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read() | code |
104116798/cell_34 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='w')
f.write('This is my second line') | code |
104116798/cell_44 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='w')
f.write('This is my second line')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='a')
f.write('Great')
f.close()
f = open('text.txt')
print(f.read()) | code |
104116798/cell_20 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4) | code |
104116798/cell_6 | [
"text_plain_output_1.png"
] | print('My name is Foofoo', end=', ')
print('We are learning Python') | code |
104116798/cell_48 | [
"text_plain_output_1.png"
] | img = open('../input/cifar10-pngs-in-folders/cifar10/test/airplane/0001.png', mode='rb')
img.readline() | code |
104116798/cell_41 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='w')
f.write('This is my second line')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='a')
f.write('Great') | code |
104116798/cell_19 | [
"text_plain_output_1.png"
] | help(open) | code |
104116798/cell_52 | [
"text_plain_output_1.png"
] | img = open('../input/cifar10-pngs-in-folders/cifar10/test/airplane/0001.png', mode='rb')
img.readline()
store = img.read()
a = open('airplane.png', mode='wb')
a.write(store) | code |
104116798/cell_7 | [
"text_plain_output_1.png"
] | help(print) | code |
104116798/cell_18 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8) | code |
104116798/cell_28 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE') | code |
104116798/cell_31 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE')
f.close()
f = open('text.txt')
print(f.read()) | code |
104116798/cell_14 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline() | code |
104116798/cell_10 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
print(f.read()) | code |
104116798/cell_37 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
f.readline()
f = open('../input/poetry/Kanye_West.txt', encoding='utf-8-sig')
f.close()
f.read(8)
f.read(4)
f.seek(0)
f.close()
f = open('text.txt', mode='w')
f.write('THIS IS MY FIRST LINE')
f.close()
f = open('text.txt')
f.close()
f = open('text.txt', mode='w')
f.write('This is my second line')
f.close()
f = open('text.txt')
print(f.read()) | code |
104116798/cell_12 | [
"text_plain_output_1.png"
] | f = open('../input/poetry/Kanye_West.txt')
f.read()
f.close()
for i in f:
print(i, end='\n') | code |
130027580/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.tail() | code |
130027580/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.isnull().sum() | code |
130027580/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
130027580/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.isnull().sum()
df.duplicated().sum() | code |
130027580/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.isnull().sum()
df.duplicated().sum()
df.describe() | code |
130027580/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.head() | code |
130027580/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.isnull().sum()
df.duplicated().sum()
df[['Movie Title', 'Score']].sort_values('Score', ascending=False).head(10) | code |
130027580/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.isnull().sum()
df.duplicated().sum()
df[['Movie Title', 'Rank']].sort_values('Rank').head(10) | code |
130027580/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/best-movies-on-netflix/100 Best Movies on Netflix.csv')
df.info() | code |
2028522/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def z(theta, x):
assert theta.shape[1] == 1
assert theta.shape[0] == x.shape[1]
return np.dot(x, theta)
def hypothesis(theta, x):
return sigmoid(z(theta, x))
def cost(theta, x, y):
assert x.shape[1] == theta.shape[0]
assert x.shape[0] == y.shape[0]
assert y.shape[1] == 1
assert theta.shape[1] == 1
h = hypothesis(theta, x)
one_case = np.matmul(-y.T, np.log(h))
zero_case = np.matmul(-(1 - y).T, np.log(1 - h))
return (one_case + zero_case) / len(x)
def gradient_descent(theta, x, y, learning_rate, regularization=0):
regularization = theta * regularization
error = hypothesis(theta, x) - y
n = learning_rate / len(x) * (np.matmul(x.T, error) + regularization)
return theta - n
def minimize(theta, x, y, iterations, learning_rate, regularization=0):
costs = []
for _ in range(iterations):
theta = gradient_descent(theta, x, y, learning_rate, regularization)
costs.append(cost(theta, x, y)[0][0])
return (theta, costs)
mushroom_data = pd.read_csv('../input/mushrooms.csv').dropna()
mushroom_x = pd.get_dummies(mushroom_data.drop('class', axis=1))
mushroom_x['bias'] = 1
mushroom_x = mushroom_x.values
mushroom_y = (np.atleast_2d(mushroom_data['class']).T == 'p').astype(int)
x_train, x_test, y_train, y_test = train_test_split(mushroom_x, mushroom_y, train_size=0.85, test_size=0.15)
candidate = np.atleast_2d([np.random.uniform(-1, 1, 118)]).T
theta, costs = minimize(candidate, x_train, y_train, 1200, 1.2, 0.5)
plt.plot(range(len(costs)), costs)
plt.show()
print(costs[-1])
predictions = x_test.dot(theta) > 0
len(list(filter(lambda x: x[0] == x[1], np.dstack((predictions, y_test))[:, 0]))) / len(predictions) | code |
1005077/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.head() | code |
1005077/cell_20 | [
"text_html_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y, train_sizes=np.linspace(0.1, 1.0, 10), cv=5, n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
train_sizes = np.linspace(0.1, 1.0, 10)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
train_scores_mean = np.mean(train_scores, axis=1)[3:]
train_scores_std = np.std(train_scores, axis=1)[3:]
test_scores_mean = np.mean(valid_scores, axis=1)[3:]
test_scores_std = np.std(valid_scores, axis=1)[3:]
train_sizes = np.linspace(0.1, 1.0, 10)[3:]
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid()
plt.legend(loc='best')
plt.show() | code |
1005077/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
print('Percent who left: {:.2f}'.format(np.sum(hr.left) / len(hr.left) * 100)) | code |
1005077/cell_26 | [
"image_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y, train_sizes=np.linspace(0.1, 1.0, 10), cv=5, n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
train_sizes = np.linspace(0.1, 1.0, 10)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
train_scores_mean = np.mean(train_scores, axis=1)[3:]
train_scores_std = np.std(train_scores, axis=1)[3:]
test_scores_mean = np.mean(valid_scores, axis=1)[3:]
test_scores_std = np.std(valid_scores, axis=1)[3:]
train_sizes = np.linspace(0.1, 1.0, 10)[3:]
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
clf.feature_importances_
hr.corr()['left'] | code |
1005077/cell_18 | [
"text_plain_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y, train_sizes=np.linspace(0.1, 1.0, 10), cv=5, n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
train_sizes = np.linspace(0.1, 1.0, 10)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid()
plt.legend(loc='best')
plt.show() | code |
1005077/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
hr[['sales', 'salary']].head() | code |
1005077/cell_15 | [
"text_html_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
classifiers = [RandomForestClassifier(n_estimators=500, n_jobs=-1), RandomForestClassifier(n_estimators=500, criterion='entropy', n_jobs=-1)]
for i, clf in enumerate(classifiers):
print('Classifier ', i)
cross_val_left(hr, clf) | code |
1005077/cell_24 | [
"text_plain_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
classifiers = [RandomForestClassifier(n_estimators=500, n_jobs=-1), RandomForestClassifier(n_estimators=500, criterion='entropy', n_jobs=-1)]
for i, clf in enumerate(classifiers):
cross_val_left(hr, clf)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y, train_sizes=np.linspace(0.1, 1.0, 10), cv=5, n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
train_sizes = np.linspace(0.1, 1.0, 10)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
train_scores_mean = np.mean(train_scores, axis=1)[3:]
train_scores_std = np.std(train_scores, axis=1)[3:]
test_scores_mean = np.mean(valid_scores, axis=1)[3:]
test_scores_std = np.std(valid_scores, axis=1)[3:]
train_sizes = np.linspace(0.1, 1.0, 10)[3:]
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
clf.feature_importances_
drop = ['left', 'promotion_last_5years', 'Work_accident', 'sales', 'salary']
for i, clf in enumerate(classifiers):
print('Classifier ', i)
cross_val_left(hr, clf, drop=drop) | code |
1005077/cell_22 | [
"text_plain_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
predict_left(hr, clf, test_size=0.4)
def cross_val_left(hr, clf, cv_folds=CV_FOLDS, drop=['left']):
X = hr.drop(drop, 1)
y = hr.left
scores = cross_val_score(clf, X, y, cv=cv_folds, n_jobs=-1)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y, train_sizes=np.linspace(0.1, 1.0, 10), cv=5, n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
train_sizes = np.linspace(0.1, 1.0, 10)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
train_scores_mean = np.mean(train_scores, axis=1)[3:]
train_scores_std = np.std(train_scores, axis=1)[3:]
test_scores_mean = np.mean(valid_scores, axis=1)[3:]
test_scores_std = np.std(valid_scores, axis=1)[3:]
train_sizes = np.linspace(0.1, 1.0, 10)[3:]
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
X = hr.drop(['left'], 1)
y = hr.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
print(X.columns)
clf.feature_importances_ | code |
1005077/cell_12 | [
"text_html_output_1.png"
] | from sklearn import neighbors, svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,\
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import cross_val_score, train_test_split,\
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.sales = hr.sales.astype('category').cat.codes
hr.salary = hr.salary.astype('category').cat.codes
def predict_left(df, clf, test_size=0.2):
X = df.drop(['left'], 1)
y = df.left
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
classifiers = [RandomForestClassifier(n_jobs=-1), RandomForestClassifier(criterion='entropy', n_jobs=-1), svm.SVC(), LogisticRegressionCV(n_jobs=-1), AdaBoostClassifier(), GradientBoostingClassifier(), neighbors.KNeighborsClassifier(n_jobs=-1), MultinomialNB(class_prior=[76.19, 23.81])]
np.random.seed(0)
for i, clf in enumerate(classifiers):
print('Classifier ', i)
predict_left(hr, clf, test_size=0.4) | code |
1005077/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
hr = pd.read_csv(DIR_DATA + '/HR_comma_sep.csv')
hr.describe() | code |
16123553/cell_42 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim') | code |
16123553/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df['Duration'].describe() | code |
16123553/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum() | code |
16123553/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df['Gender'].isnull().sum() | code |
16123553/cell_56 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y)
lr_model = lr.fit(X_train, y_train)
lr_pred = lr.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, lr_pred)) | code |
16123553/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import missingno
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
missingno.matrix(df) | code |
16123553/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.head(5) | code |
16123553/cell_54 | [
"text_html_output_1.png"
] | from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y)
lr_model = lr.fit(X_train, y_train) | code |
16123553/cell_60 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVC
lsvc = LinearSVC()
svc_model = lsvc.fit(X_train, y_train)
lsvc_pred = lsvc.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, lsvc_pred)) | code |
16123553/cell_50 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y)
print('Features sorted by their rank:')
print(sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), x_dummy.columns))) | code |
16123553/cell_52 | [
"application_vnd.jupyter.stderr_output_116.png",
"application_vnd.jupyter.stderr_output_74.png",
"application_vnd.jupyter.stderr_output_268.png",
"application_vnd.jupyter.stderr_output_362.png",
"text_plain_output_673.png",
"text_plain_output_445.png",
"text_plain_output_201.png",
"text_plain_output_261.png",
"text_plain_output_565.png",
"application_vnd.jupyter.stderr_output_566.png",
"application_vnd.jupyter.stderr_output_578.png",
"application_vnd.jupyter.stderr_output_516.png",
"application_vnd.jupyter.stderr_output_672.png",
"text_plain_output_521.png",
"text_plain_output_205.png",
"application_vnd.jupyter.stderr_output_732.png",
"application_vnd.jupyter.stderr_output_222.png",
"application_vnd.jupyter.stderr_output_626.png",
"application_vnd.jupyter.stderr_output_96.png",
"text_plain_output_693.png",
"application_vnd.jupyter.stderr_output_642.png",
"application_vnd.jupyter.stderr_output_640.png",
"text_plain_output_511.png",
"text_plain_output_271.png",
"text_plain_output_475.png",
"application_vnd.jupyter.stderr_output_296.png",
"text_plain_output_455.png",
"text_plain_output_223.png",
"application_vnd.jupyter.stderr_output_110.png",
"text_plain_output_715.png",
"text_plain_output_579.png",
"text_plain_output_629.png",
"text_plain_output_287.png",
"application_vnd.jupyter.stderr_output_112.png",
"text_plain_output_181.png",
"text_plain_output_137.png",
"application_vnd.jupyter.stderr_output_400.png",
"application_vnd.jupyter.stderr_output_212.png",
"application_vnd.jupyter.stderr_output_700.png",
"application_vnd.jupyter.stderr_output_458.png",
"application_vnd.jupyter.stderr_output_634.png",
"text_plain_output_139.png",
"application_vnd.jupyter.stderr_output_420.png",
"text_plain_output_35.png",
"text_plain_output_697.png",
"text_plain_output_501.png",
"text_plain_output_593.png",
"application_vnd.jupyter.stderr_output_24.png",
"application_vnd.jupyter.stderr_output_354.png",
"text_plain_output_685.png",
"application_vnd.jupyter.stderr_output_16.png",
"application_vnd.jupyter.stderr_output_274.png",
"application_vnd.jupyter.stderr_output_610.png",
"application_vnd.jupyter.stderr_output_632.png",
"application_vnd.jupyter.stderr_output_368.png",
"text_plain_output_449.png",
"text_plain_output_117.png",
"application_vnd.jupyter.stderr_output_474.png",
"application_vnd.jupyter.stderr_output_258.png",
"text_plain_output_367.png",
"application_vnd.jupyter.stderr_output_668.png",
"application_vnd.jupyter.stderr_output_622.png",
"text_plain_output_395.png",
"application_vnd.jupyter.stderr_output_286.png",
"application_vnd.jupyter.stderr_output_426.png",
"text_plain_output_617.png",
"application_vnd.jupyter.stderr_output_152.png",
"application_vnd.jupyter.stderr_output_156.png",
"text_plain_output_307.png",
"application_vnd.jupyter.stderr_output_522.png",
"application_vnd.jupyter.stderr_output_710.png",
"application_vnd.jupyter.stderr_output_684.png",
"application_vnd.jupyter.stderr_output_70.png",
"application_vnd.jupyter.stderr_output_310.png",
"application_vnd.jupyter.stderr_output_554.png",
"application_vnd.jupyter.stderr_output_204.png",
"text_plain_output_399.png",
"application_vnd.jupyter.stderr_output_284.png",
"text_plain_output_671.png",
"application_vnd.jupyter.stderr_output_124.png",
"text_plain_output_195.png",
"application_vnd.jupyter.stderr_output_498.png",
"text_plain_output_471.png",
"text_plain_output_219.png",
"application_vnd.jupyter.stderr_output_52.png",
"text_plain_output_485.png",
"text_plain_output_237.png",
"text_plain_output_43.png",
"application_vnd.jupyter.stderr_output_172.png",
"text_plain_output_187.png",
"text_plain_output_309.png",
"application_vnd.jupyter.stderr_output_512.png",
"text_plain_output_143.png",
"application_vnd.jupyter.stderr_output_348.png",
"text_plain_output_37.png",
"application_vnd.jupyter.stderr_output_32.png",
"application_vnd.jupyter.stderr_output_246.png",
"application_vnd.jupyter.stderr_output_704.png",
"application_vnd.jupyter.stderr_output_502.png",
"application_vnd.jupyter.stderr_output_722.png",
"application_vnd.jupyter.stderr_output_176.png",
"application_vnd.jupyter.stderr_output_356.png",
"text_plain_output_477.png",
"text_plain_output_627.png",
"application_vnd.jupyter.stderr_output_506.png",
"text_plain_output_613.png",
"text_plain_output_147.png",
"text_plain_output_443.png",
"text_plain_output_327.png",
"application_vnd.jupyter.stderr_output_346.png",
"text_plain_output_79.png",
"text_plain_output_331.png",
"application_vnd.jupyter.stderr_output_382.png",
"application_vnd.jupyter.stderr_output_170.png",
"application_vnd.jupyter.stderr_output_132.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"application_vnd.jupyter.stderr_output_692.png",
"application_vnd.jupyter.stderr_output_540.png",
"application_vnd.jupyter.stderr_output_48.png",
"application_vnd.jupyter.stderr_output_236.png",
"application_vnd.jupyter.stderr_output_418.png",
"application_vnd.jupyter.stderr_output_636.png",
"text_plain_output_167.png",
"application_vnd.jupyter.stderr_output_550.png",
"text_plain_output_213.png",
"text_plain_output_73.png",
"text_plain_output_687.png",
"application_vnd.jupyter.stderr_output_378.png",
"application_vnd.jupyter.stderr_output_432.png",
"text_plain_output_321.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_472.png",
"text_plain_output_115.png",
"application_vnd.jupyter.stderr_output_504.png",
"text_plain_output_407.png",
"application_vnd.jupyter.stderr_output_552.png",
"application_vnd.jupyter.stderr_output_694.png",
"text_plain_output_355.png",
"text_plain_output_15.png",
"application_vnd.jupyter.stderr_output_618.png",
"text_plain_output_133.png",
"application_vnd.jupyter.stderr_output_392.png",
"application_vnd.jupyter.stderr_output_690.png",
"text_plain_output_651.png",
"application_vnd.jupyter.stderr_output_666.png",
"application_vnd.jupyter.stderr_output_414.png",
"application_vnd.jupyter.stderr_output_436.png",
"application_vnd.jupyter.stderr_output_608.png",
"text_plain_output_437.png",
"application_vnd.jupyter.stderr_output_146.png",
"text_plain_output_699.png",
"text_plain_output_387.png",
"text_plain_output_555.png",
"application_vnd.jupyter.stderr_output_324.png",
"application_vnd.jupyter.stderr_output_528.png",
"application_vnd.jupyter.stderr_output_360.png",
"application_vnd.jupyter.stderr_output_484.png",
"application_vnd.jupyter.stderr_output_674.png",
"text_plain_output_375.png",
"text_plain_output_659.png",
"text_plain_output_515.png",
"text_plain_output_157.png",
"application_vnd.jupyter.stderr_output_190.png",
"application_vnd.jupyter.stderr_output_380.png",
"application_vnd.jupyter.stderr_output_270.png",
"text_plain_output_317.png",
"text_plain_output_251.png",
"application_vnd.jupyter.stderr_output_344.png",
"application_vnd.jupyter.stderr_output_18.png",
"text_plain_output_423.png",
"application_vnd.jupyter.stderr_output_86.png",
"text_plain_output_9.png",
"application_vnd.jupyter.stderr_output_334.png",
"application_vnd.jupyter.stderr_output_526.png",
"text_plain_output_633.png",
"application_vnd.jupyter.stderr_output_38.png",
"application_vnd.jupyter.stderr_output_482.png",
"application_vnd.jupyter.stderr_output_568.png",
"text_plain_output_325.png",
"application_vnd.jupyter.stderr_output_240.png",
"text_plain_output_203.png",
"text_plain_output_505.png",
"application_vnd.jupyter.stderr_output_272.png",
"application_vnd.jupyter.stderr_output_88.png",
"text_plain_output_603.png",
"text_plain_output_655.png",
"text_plain_output_119.png",
"text_plain_output_373.png",
"application_vnd.jupyter.stderr_output_148.png",
"application_vnd.jupyter.stderr_output_520.png",
"text_plain_output_551.png",
"text_plain_output_583.png",
"application_vnd.jupyter.stderr_output_58.png",
"application_vnd.jupyter.stderr_output_638.png",
"application_vnd.jupyter.stderr_output_66.png",
"text_plain_output_131.png",
"text_plain_output_343.png",
"application_vnd.jupyter.stderr_output_724.png",
"application_vnd.jupyter.stderr_output_718.png",
"text_plain_output_123.png",
"application_vnd.jupyter.stderr_output_68.png",
"text_plain_output_31.png",
"application_vnd.jupyter.stderr_output_106.png",
"text_plain_output_379.png",
"application_vnd.jupyter.stderr_output_224.png",
"text_plain_output_281.png",
"text_plain_output_639.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_557.png",
"application_vnd.jupyter.stderr_output_26.png",
"application_vnd.jupyter.stderr_output_178.png",
"text_plain_output_273.png",
"application_vnd.jupyter.stderr_output_322.png",
"text_plain_output_263.png",
"text_plain_output_229.png",
"application_vnd.jupyter.stderr_output_384.png",
"text_plain_output_111.png",
"application_vnd.jupyter.stderr_output_406.png",
"application_vnd.jupyter.stderr_output_620.png",
"application_vnd.jupyter.stderr_output_238.png",
"application_vnd.jupyter.stderr_output_564.png",
"text_plain_output_669.png",
"text_plain_output_461.png",
"application_vnd.jupyter.stderr_output_650.png",
"application_vnd.jupyter.stderr_output_450.png",
"application_vnd.jupyter.stderr_output_524.png",
"text_plain_output_589.png",
"text_plain_output_101.png",
"application_vnd.jupyter.stderr_output_490.png",
"text_plain_output_169.png",
"text_plain_output_531.png",
"text_plain_output_161.png",
"text_plain_output_489.png",
"application_vnd.jupyter.stderr_output_136.png",
"text_plain_output_305.png",
"text_plain_output_275.png",
"application_vnd.jupyter.stderr_output_6.png",
"text_plain_output_725.png",
"text_plain_output_301.png",
"application_vnd.jupyter.stderr_output_422.png",
"application_vnd.jupyter.stderr_output_162.png",
"application_vnd.jupyter.stderr_output_376.png",
"application_vnd.jupyter.stderr_output_676.png",
"application_vnd.jupyter.stderr_output_232.png",
"text_plain_output_691.png",
"application_vnd.jupyter.stderr_output_260.png",
"text_plain_output_467.png",
"text_plain_output_221.png",
"application_vnd.jupyter.stderr_output_576.png",
"application_vnd.jupyter.stderr_output_134.png",
"text_plain_output_155.png",
"application_vnd.jupyter.stderr_output_194.png",
"text_plain_output_65.png",
"text_plain_output_419.png",
"application_vnd.jupyter.stderr_output_302.png",
"text_plain_output_215.png",
"application_vnd.jupyter.stderr_output_664.png",
"application_vnd.jupyter.stderr_output_546.png",
"text_plain_output_189.png",
"text_plain_output_415.png",
"text_plain_output_637.png",
"application_vnd.jupyter.stderr_output_476.png",
"text_plain_output_13.png",
"application_vnd.jupyter.stderr_output_478.png",
"application_vnd.jupyter.stderr_output_656.png",
"text_plain_output_107.png",
"application_vnd.jupyter.stderr_output_336.png",
"application_vnd.jupyter.stderr_output_402.png",
"application_vnd.jupyter.stderr_output_542.png",
"text_plain_output_567.png",
"application_vnd.jupyter.stderr_output_518.png",
"text_plain_output_695.png",
"application_vnd.jupyter.stderr_output_316.png",
"application_vnd.jupyter.stderr_output_468.png",
"application_vnd.jupyter.stderr_output_662.png",
"text_plain_output_417.png",
"text_plain_output_707.png",
"text_plain_output_545.png",
"application_vnd.jupyter.stderr_output_714.png",
"text_plain_output_393.png",
"application_vnd.jupyter.stderr_output_570.png",
"application_vnd.jupyter.stderr_output_404.png",
"text_plain_output_243.png",
"application_vnd.jupyter.stderr_output_330.png",
"text_plain_output_611.png",
"application_vnd.jupyter.stderr_output_366.png",
"application_vnd.jupyter.stderr_output_278.png",
"text_plain_output_45.png",
"text_plain_output_599.png",
"application_vnd.jupyter.stderr_output_716.png",
"text_plain_output_665.png",
"application_vnd.jupyter.stderr_output_174.png",
"text_plain_output_257.png",
"text_plain_output_405.png",
"text_plain_output_353.png",
"application_vnd.jupyter.stderr_output_454.png",
"text_plain_output_277.png",
"text_plain_output_457.png",
"application_vnd.jupyter.stderr_output_510.png",
"application_vnd.jupyter.stderr_output_12.png",
"text_plain_output_361.png",
"text_plain_output_171.png",
"application_vnd.jupyter.stderr_output_720.png",
"application_vnd.jupyter.stderr_output_574.png",
"text_plain_output_561.png",
"text_plain_output_431.png",
"application_vnd.jupyter.stderr_output_644.png",
"application_vnd.jupyter.stderr_output_342.png",
"text_plain_output_159.png",
"text_plain_output_713.png",
"text_plain_output_29.png",
"text_plain_output_359.png",
"text_plain_output_529.png",
"text_plain_output_347.png",
"application_vnd.jupyter.stderr_output_82.png",
"application_vnd.jupyter.stderr_output_288.png",
"text_plain_output_129.png",
"application_vnd.jupyter.stderr_output_358.png",
"application_vnd.jupyter.stderr_output_398.png",
"application_vnd.jupyter.stderr_output_388.png",
"text_plain_output_349.png",
"application_vnd.jupyter.stderr_output_332.png",
"application_vnd.jupyter.stderr_output_72.png",
"text_plain_output_483.png",
"text_plain_output_363.png",
"text_plain_output_289.png",
"application_vnd.jupyter.stderr_output_290.png",
"application_vnd.jupyter.stderr_output_586.png",
"text_plain_output_255.png",
"application_vnd.jupyter.stderr_output_8.png",
"text_plain_output_329.png",
"text_plain_output_49.png",
"application_vnd.jupyter.stderr_output_308.png",
"text_plain_output_63.png",
"application_vnd.jupyter.stderr_output_394.png",
"application_vnd.jupyter.stderr_output_580.png",
"text_plain_output_27.png",
"application_vnd.jupyter.stderr_output_496.png",
"text_plain_output_177.png",
"text_plain_output_607.png",
"application_vnd.jupyter.stderr_output_306.png",
"application_vnd.jupyter.stderr_output_604.png",
"application_vnd.jupyter.stderr_output_424.png",
"application_vnd.jupyter.stderr_output_534.png",
"text_plain_output_681.png",
"text_plain_output_333.png",
"text_plain_output_581.png",
"application_vnd.jupyter.stderr_output_592.png",
"application_vnd.jupyter.stderr_output_80.png",
"text_plain_output_269.png",
"application_vnd.jupyter.stderr_output_300.png",
"text_plain_output_503.png",
"text_plain_output_735.png",
"text_plain_output_153.png",
"text_plain_output_57.png",
"application_vnd.jupyter.stderr_output_600.png",
"application_vnd.jupyter.stderr_output_728.png",
"text_plain_output_469.png",
"application_vnd.jupyter.stderr_output_10.png",
"application_vnd.jupyter.stderr_output_396.png",
"text_plain_output_357.png",
"text_plain_output_21.png",
"application_vnd.jupyter.stderr_output_464.png",
"application_vnd.jupyter.stderr_output_220.png",
"text_plain_output_47.png",
"text_plain_output_623.png",
"application_vnd.jupyter.stderr_output_98.png",
"text_plain_output_121.png",
"text_plain_output_25.png",
"text_plain_output_523.png",
"text_plain_output_401.png",
"text_plain_output_77.png",
"text_plain_output_421.png",
"application_vnd.jupyter.stderr_output_34.png",
"text_plain_output_535.png",
"text_plain_output_527.png",
"text_plain_output_183.png",
"application_vnd.jupyter.stderr_output_536.png",
"text_plain_output_149.png",
"text_plain_output_383.png",
"text_plain_output_207.png",
"application_vnd.jupyter.stderr_output_444.png",
"application_vnd.jupyter.stderr_output_90.png",
"text_plain_output_391.png",
"application_vnd.jupyter.stderr_output_538.png",
"application_vnd.jupyter.stderr_output_352.png",
"text_plain_output_413.png",
"text_plain_output_709.png",
"application_vnd.jupyter.stderr_output_584.png",
"application_vnd.jupyter.stderr_output_144.png",
"application_vnd.jupyter.stderr_output_140.png",
"text_plain_output_663.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_217.png",
"text_plain_output_657.png",
"text_plain_output_427.png",
"application_vnd.jupyter.stderr_output_214.png",
"application_vnd.jupyter.stderr_output_44.png",
"text_plain_output_141.png",
"application_vnd.jupyter.stderr_output_590.png",
"text_plain_output_225.png",
"text_plain_output_701.png",
"text_plain_output_191.png",
"text_plain_output_609.png",
"application_vnd.jupyter.stderr_output_320.png",
"application_vnd.jupyter.stderr_output_544.png",
"text_plain_output_259.png",
"application_vnd.jupyter.stderr_output_440.png",
"text_plain_output_447.png",
"application_vnd.jupyter.stderr_output_160.png",
"text_plain_output_283.png",
"text_plain_output_495.png",
"text_plain_output_247.png",
"application_vnd.jupyter.stderr_output_42.png",
"text_plain_output_113.png",
"text_plain_output_371.png",
"application_vnd.jupyter.stderr_output_602.png",
"application_vnd.jupyter.stderr_output_298.png",
"application_vnd.jupyter.stderr_output_598.png",
"application_vnd.jupyter.stderr_output_192.png",
"text_plain_output_479.png",
"application_vnd.jupyter.stderr_output_678.png",
"application_vnd.jupyter.stderr_output_702.png",
"text_plain_output_81.png",
"text_plain_output_69.png",
"application_vnd.jupyter.stderr_output_670.png",
"application_vnd.jupyter.stderr_output_84.png",
"text_plain_output_667.png",
"application_vnd.jupyter.stderr_output_180.png",
"text_plain_output_175.png",
"text_plain_output_165.png",
"text_plain_output_145.png",
"application_vnd.jupyter.stderr_output_230.png",
"text_plain_output_125.png",
"application_vnd.jupyter.stderr_output_428.png",
"application_vnd.jupyter.stderr_output_314.png",
"application_vnd.jupyter.stderr_output_120.png",
"text_plain_output_487.png",
"text_plain_output_595.png",
"text_plain_output_643.png",
"text_plain_output_575.png",
"application_vnd.jupyter.stderr_output_558.png",
"text_plain_output_197.png",
"application_vnd.jupyter.stderr_output_60.png",
"application_vnd.jupyter.stderr_output_648.png",
"application_vnd.jupyter.stderr_output_216.png",
"text_plain_output_315.png",
"text_plain_output_429.png",
"application_vnd.jupyter.stderr_output_372.png",
"application_vnd.jupyter.stderr_output_202.png",
"text_plain_output_517.png",
"text_plain_output_433.png",
"text_plain_output_7.png",
"application_vnd.jupyter.stderr_output_184.png",
"application_vnd.jupyter.stderr_output_594.png",
"text_plain_output_513.png",
"application_vnd.jupyter.stderr_output_390.png",
"application_vnd.jupyter.stderr_output_596.png",
"text_plain_output_645.png",
"text_plain_output_411.png",
"text_plain_output_91.png",
"application_vnd.jupyter.stderr_output_688.png",
"text_plain_output_245.png",
"application_vnd.jupyter.stderr_output_660.png",
"text_plain_output_497.png",
"application_vnd.jupyter.stderr_output_514.png",
"application_vnd.jupyter.stderr_output_30.png",
"text_plain_output_265.png",
"application_vnd.jupyter.stderr_output_416.png",
"application_vnd.jupyter.stderr_output_108.png",
"application_vnd.jupyter.stderr_output_62.png",
"text_plain_output_435.png",
"text_plain_output_689.png",
"application_vnd.jupyter.stderr_output_328.png",
"text_plain_output_59.png",
"text_plain_output_409.png",
"text_plain_output_103.png",
"text_plain_output_71.png",
"application_vnd.jupyter.stderr_output_470.png",
"text_plain_output_539.png",
"application_vnd.jupyter.stderr_output_250.png",
"application_vnd.jupyter.stderr_output_686.png",
"text_plain_output_211.png",
"application_vnd.jupyter.stderr_output_242.png",
"application_vnd.jupyter.stderr_output_654.png",
"application_vnd.jupyter.stderr_output_294.png",
"text_plain_output_601.png",
"application_vnd.jupyter.stderr_output_588.png",
"text_plain_output_541.png",
"application_vnd.jupyter.stderr_output_612.png",
"application_vnd.jupyter.stderr_output_130.png",
"application_vnd.jupyter.stderr_output_28.png",
"application_vnd.jupyter.stderr_output_364.png",
"application_vnd.jupyter.stderr_output_448.png",
"application_vnd.jupyter.stderr_output_658.png",
"application_vnd.jupyter.stderr_output_680.png",
"text_plain_output_653.png",
"text_plain_output_543.png",
"text_plain_output_451.png",
"application_vnd.jupyter.stderr_output_256.png",
"text_plain_output_109.png",
"application_vnd.jupyter.stderr_output_46.png",
"text_plain_output_459.png",
"text_plain_output_615.png",
"text_plain_output_41.png",
"application_vnd.jupyter.stderr_output_206.png",
"application_vnd.jupyter.stderr_output_456.png",
"text_plain_output_253.png",
"application_vnd.jupyter.stderr_output_234.png",
"application_vnd.jupyter.stderr_output_734.png",
"application_vnd.jupyter.stderr_output_312.png",
"text_plain_output_723.png",
"application_vnd.jupyter.stderr_output_682.png",
"application_vnd.jupyter.stderr_output_630.png",
"text_plain_output_291.png",
"application_vnd.jupyter.stderr_output_616.png",
"application_vnd.jupyter.stderr_output_606.png",
"application_vnd.jupyter.stderr_output_708.png",
"text_plain_output_241.png",
"text_plain_output_231.png",
"text_plain_output_533.png",
"text_plain_output_345.png",
"text_plain_output_649.png",
"application_vnd.jupyter.stderr_output_252.png",
"application_vnd.jupyter.stderr_output_64.png",
"application_vnd.jupyter.stderr_output_76.png",
"text_plain_output_209.png",
"text_plain_output_185.png",
"application_vnd.jupyter.stderr_output_262.png",
"text_plain_output_85.png",
"text_plain_output_605.png",
"text_plain_output_549.png",
"text_plain_output_67.png",
"text_plain_output_573.png",
"text_plain_output_297.png",
"text_plain_output_53.png",
"text_plain_output_313.png",
"application_vnd.jupyter.stderr_output_480.png",
"application_vnd.jupyter.stderr_output_572.png",
"application_vnd.jupyter.stderr_output_386.png",
"application_vnd.jupyter.stderr_output_20.png",
"text_plain_output_635.png",
"text_plain_output_703.png",
"text_plain_output_711.png",
"text_plain_output_193.png",
"text_plain_output_441.png",
"text_plain_output_403.png",
"application_vnd.jupyter.stderr_output_338.png",
"application_vnd.jupyter.stderr_output_126.png",
"application_vnd.jupyter.stderr_output_560.png",
"text_plain_output_23.png",
"application_vnd.jupyter.stderr_output_218.png",
"application_vnd.jupyter.stderr_output_446.png",
"application_vnd.jupyter.stderr_output_494.png",
"text_plain_output_173.png",
"application_vnd.jupyter.stderr_output_36.png",
"text_plain_output_683.png",
"application_vnd.jupyter.stderr_output_100.png",
"text_plain_output_235.png",
"application_vnd.jupyter.stderr_output_430.png",
"application_vnd.jupyter.stderr_output_266.png",
"text_plain_output_151.png",
"text_plain_output_89.png",
"application_vnd.jupyter.stderr_output_22.png",
"text_plain_output_299.png",
"text_plain_output_51.png",
"text_plain_output_677.png",
"application_vnd.jupyter.stderr_output_166.png",
"application_vnd.jupyter.stderr_output_508.png",
"text_plain_output_525.png",
"application_vnd.jupyter.stderr_output_318.png",
"text_plain_output_731.png",
"text_plain_output_705.png",
"application_vnd.jupyter.stderr_output_292.png",
"application_vnd.jupyter.stderr_output_726.png",
"text_plain_output_99.png",
"text_plain_output_381.png",
"text_plain_output_571.png",
"text_plain_output_163.png",
"text_plain_output_179.png",
"text_plain_output_537.png",
"application_vnd.jupyter.stderr_output_408.png",
"application_vnd.jupyter.stderr_output_374.png",
"text_plain_output_569.png",
"text_plain_output_239.png",
"application_vnd.jupyter.stderr_output_186.png",
"application_vnd.jupyter.stderr_output_168.png",
"text_plain_output_127.png",
"text_plain_output_559.png",
"text_plain_output_311.png",
"text_plain_output_719.png",
"text_plain_output_295.png",
"text_plain_output_279.png",
"text_plain_output_507.png",
"application_vnd.jupyter.stderr_output_56.png",
"application_vnd.jupyter.stderr_output_452.png",
"text_plain_output_509.png",
"application_vnd.jupyter.stderr_output_104.png",
"text_plain_output_337.png",
"application_vnd.jupyter.stderr_output_196.png",
"text_plain_output_499.png",
"application_vnd.jupyter.stderr_output_50.png",
"text_plain_output_563.png",
"application_vnd.jupyter.stderr_output_736.png",
"application_vnd.jupyter.stderr_output_114.png",
"text_plain_output_97.png",
"text_plain_output_729.png",
"application_vnd.jupyter.stderr_output_492.png",
"text_plain_output_717.png",
"text_plain_output_227.png",
"application_vnd.jupyter.stderr_output_226.png",
"text_plain_output_453.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"application_vnd.jupyter.stderr_output_128.png",
"application_vnd.jupyter.stderr_output_150.png",
"text_plain_output_631.png",
"text_plain_output_39.png",
"application_vnd.jupyter.stderr_output_556.png",
"text_plain_output_335.png",
"application_vnd.jupyter.stderr_output_142.png",
"application_vnd.jupyter.stderr_output_326.png",
"text_plain_output_233.png",
"text_plain_output_473.png",
"application_vnd.jupyter.stderr_output_304.png",
"text_plain_output_385.png",
"text_plain_output_55.png",
"text_plain_output_293.png",
"text_plain_output_199.png",
"application_vnd.jupyter.stderr_output_530.png",
"text_plain_output_463.png",
"text_plain_output_319.png",
"application_vnd.jupyter.stderr_output_138.png",
"application_vnd.jupyter.stderr_output_412.png",
"application_vnd.jupyter.stderr_output_548.png",
"text_plain_output_93.png",
"application_vnd.jupyter.stderr_output_200.png",
"text_plain_output_19.png",
"text_plain_output_439.png",
"text_plain_output_341.png",
"application_vnd.jupyter.stderr_output_280.png",
"text_plain_output_105.png",
"text_plain_output_465.png",
"text_plain_output_491.png",
"text_plain_output_679.png",
"text_plain_output_641.png",
"text_plain_output_249.png",
"application_vnd.jupyter.stderr_output_122.png",
"application_vnd.jupyter.stderr_output_488.png",
"application_vnd.jupyter.stderr_output_624.png",
"application_vnd.jupyter.stderr_output_94.png",
"text_plain_output_619.png",
"application_vnd.jupyter.stderr_output_282.png",
"application_vnd.jupyter.stderr_output_730.png",
"text_plain_output_17.png",
"text_plain_output_323.png",
"application_vnd.jupyter.stderr_output_462.png",
"application_vnd.jupyter.stderr_output_652.png",
"application_vnd.jupyter.stderr_output_182.png",
"application_vnd.jupyter.stderr_output_158.png",
"text_plain_output_597.png",
"application_vnd.jupyter.stderr_output_78.png",
"text_plain_output_11.png",
"application_vnd.jupyter.stderr_output_698.png",
"application_vnd.jupyter.stderr_output_370.png",
"text_plain_output_481.png",
"application_vnd.jupyter.stderr_output_276.png",
"application_vnd.jupyter.stderr_output_188.png",
"application_vnd.jupyter.stderr_output_696.png",
"application_vnd.jupyter.stderr_output_14.png",
"text_plain_output_267.png",
"application_vnd.jupyter.stderr_output_562.png",
"text_plain_output_553.png",
"text_plain_output_425.png",
"text_plain_output_591.png",
"application_vnd.jupyter.stderr_output_706.png",
"text_plain_output_625.png",
"application_vnd.jupyter.stderr_output_350.png",
"text_plain_output_577.png",
"application_vnd.jupyter.stderr_output_54.png",
"application_vnd.jupyter.stderr_output_118.png",
"application_vnd.jupyter.stderr_output_154.png",
"text_plain_output_727.png",
"application_vnd.jupyter.stderr_output_438.png",
"application_vnd.jupyter.stderr_output_442.png",
"application_vnd.jupyter.stderr_output_198.png",
"text_plain_output_519.png",
"text_plain_output_733.png",
"text_plain_output_721.png",
"application_vnd.jupyter.stderr_output_712.png",
"text_plain_output_303.png",
"text_plain_output_621.png",
"text_plain_output_377.png",
"application_vnd.jupyter.stderr_output_460.png",
"text_plain_output_95.png",
"text_plain_output_339.png",
"application_vnd.jupyter.stderr_output_228.png",
"application_vnd.jupyter.stderr_output_614.png",
"application_vnd.jupyter.stderr_output_254.png",
"text_plain_output_547.png",
"text_plain_output_369.png",
"application_vnd.jupyter.stderr_output_582.png",
"application_vnd.jupyter.stderr_output_628.png",
"text_plain_output_587.png",
"application_vnd.jupyter.stderr_output_466.png",
"application_vnd.jupyter.stderr_output_340.png",
"text_plain_output_365.png",
"application_vnd.jupyter.stderr_output_208.png",
"text_plain_output_61.png",
"text_plain_output_585.png",
"text_plain_output_83.png",
"application_vnd.jupyter.stderr_output_248.png",
"text_plain_output_647.png",
"application_vnd.jupyter.stderr_output_210.png",
"application_vnd.jupyter.stderr_output_92.png",
"application_vnd.jupyter.stderr_output_164.png",
"application_vnd.jupyter.stderr_output_102.png",
"text_plain_output_397.png",
"text_plain_output_661.png",
"text_plain_output_389.png",
"application_vnd.jupyter.stderr_output_410.png",
"text_plain_output_351.png",
"application_vnd.jupyter.stderr_output_40.png",
"application_vnd.jupyter.stderr_output_532.png",
"application_vnd.jupyter.stderr_output_244.png",
"text_plain_output_135.png",
"text_plain_output_285.png",
"application_vnd.jupyter.stderr_output_264.png",
"application_vnd.jupyter.stderr_output_486.png",
"text_plain_output_675.png",
"application_vnd.jupyter.stderr_output_646.png",
"text_plain_output_493.png",
"application_vnd.jupyter.stderr_output_434.png"
] | from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y)
X = x_dummy[['Agency_EPX', 'Agency_TST', 'Gender_Not Specified', 'Product Name_2 way Comprehensive Plan', 'Product Name_24 Protect', 'Product Name_Basic Plan', 'Product Name_Comprehensive Plan', 'Product Name_Premier Plan', 'Product Name_Travel Cruise Protect', 'Product Name_Value Plan']]
X.head(5) | code |
16123553/cell_64 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import classification_report
rf = RandomForestClassifier(n_estimators=100)
rf_model = rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
print(classification_report(y_test, rf_pred)) | code |
16123553/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import missingno
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
import scipy.stats as ss
import os
print(os.listdir('../input')) | code |
16123553/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
df.info() | code |
16123553/cell_49 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y) | code |
16123553/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
for i, col in enumerate(df_numerical.columns):
plt.figure(i)
sns.distplot(df_numerical[col]) | code |
16123553/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
plt.figure(figsize=(10, 7))
sns.heatmap(cramers, annot=True)
plt.show() | code |
16123553/cell_28 | [
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df6 = df['Net Sales'] < df['Commision (in value)']
df6.sum() | code |
16123553/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df_numerical.info() | code |
16123553/cell_38 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
test = [(df[df['Gender'] == 'Not Specified']['Claim'].value_counts() / len(df[df['Gender'] == 'Not Specified']['Claim']))[1], (df[df['Gender'] == 'M']['Claim'].value_counts() / len(df[df['Gender'] == 'M']['Claim']))[1], (df[df['Gender'] == 'F']['Claim'].value_counts() / len(df[df['Gender'] == 'F']['Claim']))[1]]
test
fig, axes=plt.subplots(1,3,figsize=(24,9))
sns.countplot(df[df['Gender']=='Not Specified']['Claim'],ax=axes[0])
axes[0].set(title='Distribution of claims for null gender')
axes[0].text(x=1,y=30000,s=f'% of 1 class: {round(test[0],2)}',fontsize=16,weight='bold',ha='center',va='bottom',color='navy')
sns.countplot(df[df['Gender']=='M']['Claim'],ax=axes[1])
axes[1].set(title='Distribution of claims for Male')
axes[1].text(x=1,y=6000,s=f'% of 1 class: {round(test[1],2)}',fontsize=16,weight='bold',ha='center',va='bottom',color='navy')
sns.countplot(df[df['Gender']=='F']['Claim'],ax=axes[2])
axes[2].set(title='Distribution of claims for Female')
axes[2].text(x=1,y=6000,s=f'% of 1 class: {round(test[2],2)}',fontsize=16,weight='bold',ha='center',va='bottom',color='navy')
plt.show()
sns.countplot(df['Claim']) | code |
16123553/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.info() | code |
16123553/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
test = [(df[df['Gender'] == 'Not Specified']['Claim'].value_counts() / len(df[df['Gender'] == 'Not Specified']['Claim']))[1], (df[df['Gender'] == 'M']['Claim'].value_counts() / len(df[df['Gender'] == 'M']['Claim']))[1], (df[df['Gender'] == 'F']['Claim'].value_counts() / len(df[df['Gender'] == 'F']['Claim']))[1]]
test | code |
16123553/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df10 = df['Duration'] < 0
df10.sum() | code |
16123553/cell_53 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
from scipy.stats import chi2_contingency
class ChiSquare:
def __init__(self, df):
self.df = df
self.p = None
self.chi2 = None
self.dof = None
self.dfObserved = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ''
if self.p < alpha:
result = '{0} is IMPORTANT for Prediction'.format(colX)
else:
result = '{0} is NOT an important predictor. (Discard {0} from model)'.format(colX)
def TestIndependence(self, colX, colY, alpha=0.05):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y, X)
chi2, p, dof, expected = ss.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index=self.dfObserved.index)
X = df.drop(['Claim'], axis=1)
ct = ChiSquare(df)
for c in X.columns:
ct.TestIndependence(c, 'Claim')
df.drop(columns=['Distribution Channel', 'Agency Type'], axis=1, inplace=True)
y = df['Claim']
x = df
x.drop(columns='Claim', axis=1, inplace=True)
x_dummy = pd.get_dummies(x, columns=['Agency', 'Gender', 'Product Name', 'Destination'], drop_first=True)
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=10, verbose=3)
rfe.fit(x_dummy, y)
rfe_df1 = rfe.fit_transform(x_dummy, y)
X = x_dummy[['Agency_EPX', 'Agency_TST', 'Gender_Not Specified', 'Product Name_2 way Comprehensive Plan', 'Product Name_24 Protect', 'Product Name_Basic Plan', 'Product Name_Comprehensive Plan', 'Product Name_Premier Plan', 'Product Name_Travel Cruise Protect', 'Product Name_Value Plan']]
from imblearn.over_sampling import SMOTE
smote = SMOTE(random_state=7)
X_ov, y_ov = smote.fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_ov, y_ov, train_size=0.7, random_state=7)
pd.value_counts(y_train) | code |
16123553/cell_36 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as ss
import seaborn as sns
df = pd.read_csv('../input/travel insurance.csv')
df1 = df
df.fillna('Not Specified', inplace=True)
df.isnull().sum()
df_numerical = df._get_numeric_data()
df.loc[df['Duration'] < 0, 'Duration'] = 49.317
df.loc[df['Net Sales'] == 0.0, 'Commision (in value)'] = 0
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - (k - 1) * (r - 1) / (n - 1))
rcorr = r - (r - 1) ** 2 / (n - 1)
kcorr = k - (k - 1) ** 2 / (n - 1)
return np.sqrt(phi2corr / min(kcorr - 1, rcorr - 1))
categorical = ['Agency', 'Agency Type', 'Distribution Channel', 'Product Name', 'Destination', 'Gender', 'Claim']
cramers = pd.DataFrame({i: [cramers_v(df[i], df[j]) for j in categorical] for i in categorical})
cramers['column'] = [i for i in categorical if i not in ['memberid']]
cramers.set_index('column', inplace=True)
test = [(df[df['Gender'] == 'Not Specified']['Claim'].value_counts() / len(df[df['Gender'] == 'Not Specified']['Claim']))[1], (df[df['Gender'] == 'M']['Claim'].value_counts() / len(df[df['Gender'] == 'M']['Claim']))[1], (df[df['Gender'] == 'F']['Claim'].value_counts() / len(df[df['Gender'] == 'F']['Claim']))[1]]
test
fig, axes = plt.subplots(1, 3, figsize=(24, 9))
sns.countplot(df[df['Gender'] == 'Not Specified']['Claim'], ax=axes[0])
axes[0].set(title='Distribution of claims for null gender')
axes[0].text(x=1, y=30000, s=f'% of 1 class: {round(test[0], 2)}', fontsize=16, weight='bold', ha='center', va='bottom', color='navy')
sns.countplot(df[df['Gender'] == 'M']['Claim'], ax=axes[1])
axes[1].set(title='Distribution of claims for Male')
axes[1].text(x=1, y=6000, s=f'% of 1 class: {round(test[1], 2)}', fontsize=16, weight='bold', ha='center', va='bottom', color='navy')
sns.countplot(df[df['Gender'] == 'F']['Claim'], ax=axes[2])
axes[2].set(title='Distribution of claims for Female')
axes[2].text(x=1, y=6000, s=f'% of 1 class: {round(test[2], 2)}', fontsize=16, weight='bold', ha='center', va='bottom', color='navy')
plt.show() | code |
89129153/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
print(X.shape)
print(y.shape) | code |
89129153/cell_25 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train) | code |
89129153/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train)
print('Making predictions for the following 5 wines:')
print(X_test.head())
print('The predictions are')
print(dtree.predict(X_test.head())) | code |
89129153/cell_44 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
y_pred = lr.predict(standardized_X_test)
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train)
def get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
return mae
for max_leaf_nodes in [5, 10, 25, 50, 250, 500, 1000, 5000]:
mae = get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test)
model_1 = RandomForestRegressor(n_estimators=50, random_state=0)
model_2 = RandomForestRegressor(n_estimators=100, random_state=0)
model_3 = RandomForestRegressor(n_estimators=100, criterion='mae', random_state=0)
model_4 = RandomForestRegressor(n_estimators=200, min_samples_split=20, random_state=0)
model_5 = RandomForestRegressor(n_estimators=100, max_depth=7, random_state=0)
models = [model_1, model_2, model_3, model_4, model_5]
def score_model(model, X_t=X_train, X_v=X_test, y_t=y_train, y_v=y_test):
model.fit(X_t, y_t)
preds = model.predict(X_v)
return mean_absolute_error(y_v, preds)
for i in range(0, len(models)):
mae = score_model(models[i])
print('Model %d MAE: %f' % (i + 1, mae)) | code |
89129153/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.head() | code |
89129153/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # pyplot plotting tool
import numpy as np # numeric python
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
y_pred = lr.predict(standardized_X_test)
from sklearn.model_selection import cross_val_score
cv_results = cross_val_score(lr, standardized_X, y_train, cv=10)
print('Cross validation results: ', cv_results)
print('Mean cross validation result: ', np.mean(cv_results)) | code |
89129153/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
print('R^2 is: ', lr.score(standardized_X, y_train))
print('The coefficients are: ', lr.coef_) | code |
89129153/cell_41 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
y_pred = lr.predict(standardized_X_test)
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train)
def get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
return mae
for max_leaf_nodes in [5, 10, 25, 50, 250, 500, 1000, 5000]:
mae = get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test)
from sklearn.ensemble import RandomForestRegressor
rforest = RandomForestRegressor(random_state=1)
rforest.fit(X_train, y_train)
y_pred = rforest.predict(X_test)
print(mean_absolute_error(y_test, y_pred)) | code |
89129153/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89129153/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
plt.figure(figsize=(9, 8))
sns.heatmap(corr, cmap='YlGnBu', annot=True)
plt.show() | code |
89129153/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train) | code |
89129153/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
print('winedf shape: ', winedf.shape, '\n')
print('winedf information:')
print(winedf.info()) | code |
89129153/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
winedf.iloc[:, 1:11].hist(figsize=(20, 10), bins=20, edgecolor='black', color='lightgreen') | code |
89129153/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
y_pred = lr.predict(standardized_X_test)
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train)
def get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
return mae
for max_leaf_nodes in [5, 10, 25, 50, 250, 500, 1000, 5000]:
mae = get_mae(max_leaf_nodes, X_train, X_test, y_train, y_test)
print('Max leaf nodes: %d \t\t Mean Absolute Error: %.5f' % (max_leaf_nodes, mae)) | code |
89129153/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
winedf['quality'].hist(align='right', bins=range(3, 9), edgecolor='black', grid=False) | code |
89129153/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
print(winedf.describe(), '\n')
print('The median wine quality is: ', winedf['quality'].median()) | code |
89129153/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # pyplot plotting tool
import pandas as pd # data processing
import seaborn as sns # seaborn plotting tool
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts()
corr = winedf.corr()
X = winedf.iloc[:, :-1]
y = winedf['quality']
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=21, test_size=0.3)
scaler = StandardScaler().fit(X_train)
standardized_X = scaler.transform(X_train)
standardized_X_test = scaler.transform(X_test)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(standardized_X, y_train)
y_pred = lr.predict(standardized_X_test)
print('R^2 for test set: ', lr.score(standardized_X_test, y_test)) | code |
89129153/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing
winedf = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
winedf.quality.value_counts() | code |
89129153/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=22)
dtree.fit(X_train, y_train)
from sklearn.metrics import mean_absolute_error
predicted_wine_quality = dtree.predict(X_test)
mean_absolute_error(y_test, predicted_wine_quality) | code |
72111473/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import numpy as np
import cv2
import pydicom
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from tqdm import tqdm
def load_voxel(study_id, scan_type='FLAIR', split='train', sz=256):
assert sz in (64, 128, 256)
data_root = Path(f'../input/rsna-miccai-voxel-{sz}-dataset')
npy_path = Path(data_root).joinpath('voxel', split, study_id, f'{scan_type}.npy')
voxel = np.load(str(npy_path))
return voxel
def show_animation(images: list):
'''
Displays an animation from the list of images.
set: matplotlib.rcParams['animation.html'] = 'jshtml'
'''
fig = plt.figure(figsize=(6, 6))
plt.axis('off')
im = plt.imshow(images[0], cmap='gray')
def animate_func(i):
im.set_array(images[i])
return [im]
return matplotlib.animation.FuncAnimation(fig, animate_func, frames = len(images), interval = 20)
flair_animation = show_animation(voxel)
flair_animation | code |
72111473/cell_2 | [
"text_html_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
root_dir = '../input/rsna-miccai-voxel-64-dataset/'
df = pd.read_csv('../input/training-labels/train_labels.csv')
sns.countplot(data=df, x='MGMT_value')
def full_ids(data):
zeros = 5 - len(str(data))
if zeros > 0:
prefix = ''.join(['0' for i in range(zeros)])
return prefix + str(data)
df['BraTS21ID_full'] = df['BraTS21ID'].apply(full_ids)
df['flair'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/FLAIR/')
df['t1w'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T1w/')
df['t1wce'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T1wCE/')
df['t2w'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T2w/')
df
to_exclude = [109, 123, 709]
df = df[~df['BraTS21ID'].isin(to_exclude)]
df = df.reset_index(drop=True)
df2 = pd.read_csv('../input/exportdataframe/export_dataframe.csv')
df = pd.concat([df, df2[['flair_axis', 't1w_axis', 't1wce_axis', 't2w_axis']]], axis=1, join='inner')
df | code |
72111473/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
root_dir = '../input/rsna-miccai-voxel-64-dataset/'
df = pd.read_csv('../input/training-labels/train_labels.csv')
def full_ids(data):
zeros = 5 - len(str(data))
if zeros > 0:
prefix = ''.join(['0' for i in range(zeros)])
return prefix + str(data)
df['BraTS21ID_full'] = df['BraTS21ID'].apply(full_ids)
df['flair'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/FLAIR/')
df['t1w'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T1w/')
df['t1wce'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T1wCE/')
df['t2w'] = df['BraTS21ID_full'].apply(lambda file_id: root_dir + 'train/' + file_id + '/T2w/')
df
to_exclude = [109, 123, 709]
df = df[~df['BraTS21ID'].isin(to_exclude)]
df = df.reset_index(drop=True)
df2 = pd.read_csv('../input/exportdataframe/export_dataframe.csv')
df = pd.concat([df, df2[['flair_axis', 't1w_axis', 't1wce_axis', 't2w_axis']]], axis=1, join='inner')
df
new_dataframe = df.loc[df['t1w_axis'] == 'axial']
new_dataframe = new_dataframe[['MGMT_value', 't1w', 't1w_axis']]
new_dataframe | code |
17109150/cell_13 | [
"text_plain_output_1.png"
] | from PIL import Image
from io import BytesIO
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train_on_gpu = torch.cuda.is_available()
labels = pd.read_csv('../input/train.csv')
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_data = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomRotation(25), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0), transforms.RandomAffine(degrees=4, translate=None, scale=None, shear=None, resample=False, fillcolor=0), transforms.ToTensor(), transforms.Normalize(mean, std)])
test_data = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])
def encode_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
y = onehot_encoded
return (y, label_encoder)
y, label_encoder = encode_labels(labels['Id'])
image_datasets = dict()
image_datasets['train'] = WhaleTailDataset(image_folder=train_dir, data_type='train', df=labels, transform=train_data, y=y)
image_datasets['test'] = WhaleTailDataset(image_folder=test_dir, data_type='test', transform=test_data)
print('Number of training images: ', len(image_datasets['train']))
print('Number of test images: ', len(image_datasets['test'])) | code |
17109150/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from io import BytesIO
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train_on_gpu = torch.cuda.is_available()
labels = pd.read_csv('../input/train.csv')
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
def encode_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
print(values)
print(integer_encoded)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
print(integer_encoded)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
print(len(integer_encoded))
print(onehot_encoded)
print(len(onehot_encoded[0]))
y = onehot_encoded
return (y, label_encoder)
y, label_encoder = encode_labels(labels['Id']) | code |
17109150/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = pd.read_csv('../input/train.csv')
num_classes = len(labels['Id'].unique())
print(num_classes) | code |
17109150/cell_6 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
plt.figure()
plt.imshow(tensor.numpy().transpose(1, 2, 0))
plt.show()
plot_image(rgb_image)
print('Image type: ' + str(rgb_image.type()))
print('Image size: ' + str(rgb_image.size())) | code |
17109150/cell_2 | [
"text_plain_output_1.png"
] | import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...') | code |
17109150/cell_11 | [
"text_plain_output_1.png"
] | from PIL import Image
from io import BytesIO
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train_on_gpu = torch.cuda.is_available()
labels = pd.read_csv('../input/train.csv')
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_data = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomRotation(25), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0), transforms.RandomAffine(degrees=4, translate=None, scale=None, shear=None, resample=False, fillcolor=0), transforms.ToTensor(), transforms.Normalize(mean, std)])
test_data = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])
def encode_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
y = onehot_encoded
return (y, label_encoder)
y, label_encoder = encode_labels(labels['Id'])
image_datasets = dict()
image_datasets['train'] = WhaleTailDataset(image_folder=train_dir, data_type='train', df=labels, transform=train_data, y=y)
image_datasets['test'] = WhaleTailDataset(image_folder=test_dir, data_type='test', transform=test_data) | code |
17109150/cell_19 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from PIL import Image
from io import BytesIO
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
import torch.optim as optim
train_on_gpu = torch.cuda.is_available()
labels = pd.read_csv('../input/train.csv')
num_classes = len(labels['Id'].unique())
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_data = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomRotation(25), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0), transforms.RandomAffine(degrees=4, translate=None, scale=None, shear=None, resample=False, fillcolor=0), transforms.ToTensor(), transforms.Normalize(mean, std)])
test_data = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])
def encode_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
y = onehot_encoded
return (y, label_encoder)
y, label_encoder = encode_labels(labels['Id'])
class WhaleTailDataset(Dataset):
def __init__(self, image_folder, data_type='train', df=None, transform=None, y=None):
self.image_folder = image_folder
self.imgs_list = [img for img in os.listdir(image_folder)]
self.data_type = data_type
self.transform = transform
self.y = y
if self.data_type == 'train':
self.df = df.values
def __len__(self):
return len(self.imgs_list)
def __getitem__(self, idx):
if self.data_type == 'train':
img_name = os.path.join(self.image_folder, self.df[idx][0])
label = self.y[idx]
elif self.data_type == 'test':
img_name = os.path.join(self.image_folder, self.imgs_list[idx])
label = np.zeros((num_classes,))
img = Image.open(img_name).convert('RGB')
img = self.transform(img)
if self.data_type == 'train':
return (img, label)
elif self.data_type == 'test':
return (img, label, self.imgs_list[idx])
image_datasets = dict()
image_datasets['train'] = WhaleTailDataset(image_folder=train_dir, data_type='train', df=labels, transform=train_data, y=y)
image_datasets['test'] = WhaleTailDataset(image_folder=test_dir, data_type='test', transform=test_data)
train_size = 512
test_size = 32
num_workers = 0
dataloaders = dict()
dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=train_size, num_workers=num_workers)
dataloaders['test'] = torch.utils.data.DataLoader(image_datasets['test'], batch_size=test_size, num_workers=num_workers)
model = models.resnet152(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(nn.Linear(2048, 1024), nn.ReLU(), nn.Dropout(0.5), nn.Linear(1024, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, 5005), nn.LogSoftmax(dim=1))
model.fc = classifier
from torch.optim import lr_scheduler
num_epochs = 6
learning_rate = 0.001
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate)
scheduler = lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
import matplotlib.pyplot as plt
model = model.cuda()
train_loss = []
for epoch in range(1, num_epochs + 1):
for batch_i, (data, target) in tqdm(enumerate(dataloaders['train']), total=len(dataloaders['train'])):
data, target = (data.cuda(), target.cuda())
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target.float())
train_loss.append(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
sub = pd.read_csv('../input/sample_submission.csv')
model.eval()
for data, target, name in tqdm(dataloaders['test']):
data = data.cuda()
output = model(data)
output = output.cpu().detach().numpy()
for i, (e, n) in enumerate(list(zip(output, name))):
sub.loc[sub['Image'] == n, 'Id'] = ' '.join(label_encoder.inverse_transform(e.argsort()[-5:][::-1]))
sub.to_csv('submission.csv', index=False) | code |
17109150/cell_7 | [
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_7.png",
"application_vnd.jupyter.stderr_output_11.png",
"text_plain_output_4.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png",
"text_plain_output_12.png"
] | from PIL import Image
from io import BytesIO
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import torch
train_on_gpu = torch.cuda.is_available()
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
IPython.display.display(IPython.display.Image(data=f.getvalue()))
show_grayscale_image(torch.cat((r_image, g_image, b_image), 1)) | code |
17109150/cell_18 | [
"image_output_1.png"
] | from PIL import Image
from io import BytesIO
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
import torch.optim as optim
train_on_gpu = torch.cuda.is_available()
labels = pd.read_csv('../input/train.csv')
num_classes = len(labels['Id'].unique())
data_dir = '../input'
train_dir = data_dir + '/train'
test_dir = data_dir + '/test'
pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
pil_image = Image.open(train_dir + '/0a750c2e8.jpg')
rgb_image = pil2tensor(pil_image)
def plot_image(tensor):
pass
from io import BytesIO
import IPython.display
r_image = rgb_image[0]
g_image = rgb_image[1]
b_image = rgb_image[2]
def show_grayscale_image(tensor):
f = BytesIO()
a = np.uint8(tensor.mul(255).numpy())
Image.fromarray(a).save(f, 'png')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_data = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomRotation(25), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0), transforms.RandomAffine(degrees=4, translate=None, scale=None, shear=None, resample=False, fillcolor=0), transforms.ToTensor(), transforms.Normalize(mean, std)])
test_data = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])
def encode_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
y = onehot_encoded
return (y, label_encoder)
y, label_encoder = encode_labels(labels['Id'])
class WhaleTailDataset(Dataset):
def __init__(self, image_folder, data_type='train', df=None, transform=None, y=None):
self.image_folder = image_folder
self.imgs_list = [img for img in os.listdir(image_folder)]
self.data_type = data_type
self.transform = transform
self.y = y
if self.data_type == 'train':
self.df = df.values
def __len__(self):
return len(self.imgs_list)
def __getitem__(self, idx):
if self.data_type == 'train':
img_name = os.path.join(self.image_folder, self.df[idx][0])
label = self.y[idx]
elif self.data_type == 'test':
img_name = os.path.join(self.image_folder, self.imgs_list[idx])
label = np.zeros((num_classes,))
img = Image.open(img_name).convert('RGB')
img = self.transform(img)
if self.data_type == 'train':
return (img, label)
elif self.data_type == 'test':
return (img, label, self.imgs_list[idx])
image_datasets = dict()
image_datasets['train'] = WhaleTailDataset(image_folder=train_dir, data_type='train', df=labels, transform=train_data, y=y)
image_datasets['test'] = WhaleTailDataset(image_folder=test_dir, data_type='test', transform=test_data)
train_size = 512
test_size = 32
num_workers = 0
dataloaders = dict()
dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=train_size, num_workers=num_workers)
dataloaders['test'] = torch.utils.data.DataLoader(image_datasets['test'], batch_size=test_size, num_workers=num_workers)
model = models.resnet152(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(nn.Linear(2048, 1024), nn.ReLU(), nn.Dropout(0.5), nn.Linear(1024, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, 5005), nn.LogSoftmax(dim=1))
model.fc = classifier
from torch.optim import lr_scheduler
num_epochs = 6
learning_rate = 0.001
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate)
scheduler = lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
import matplotlib.pyplot as plt
model = model.cuda()
train_loss = []
for epoch in range(1, num_epochs + 1):
for batch_i, (data, target) in tqdm(enumerate(dataloaders['train']), total=len(dataloaders['train'])):
data, target = (data.cuda(), target.cuda())
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target.float())
train_loss.append(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
print(f'Epoch - {epoch} // Training Loss: {np.mean(train_loss):.4f}')
print(train_loss)
plt.figure()
plt.plot(train_loss)
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.