path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17115723/cell_4 | [
"text_plain_output_1.png"
] | from time import time
import cv2 as cv
import imageio as io
import numpy as np # linear algebra
import os
list_train_img = []
a = 0
timea = time()
print('Converting training images to a numpy array...')
for im in os.listdir('../input/train_images'):
uri = '../input/train_images/' + im
image = io.imread(uri)
image = cv.resize(image, (640, 640), interpolation=cv.INTER_AREA)
list_train_img.append(image)
a += 1
if a % 500 == 0:
print(f'\t{a} images from the training set added to a numpy array')
print('All images from the training set converted to a numpy array!')
train_im = np.asarray(list_train_img)
print(train_im.shape, '\n')
del list_train_img
list_test_img = []
b = 0
print('Converting test images to a numpy array...')
for im_test in os.listdir('../input/test_images'):
uri = '../input/test_images/' + im_test
image = io.imread(uri)
image = cv.resize(image, (640, 640), interpolation=cv.INTER_AREA)
list_test_img.append(image)
b += 1
if b % 500 == 0:
print(f'\t{b} images from the test set added to a numpy array')
print('All images from the test set converted to a numpy array!\n')
test_im = np.asarray(list_test_img)
print(test_im.shape, '\n')
del list_test_img
timeb = time()
print(f'It took {(timeb - timea) / 60} minutes to complete the conversion') | code |
17115723/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
from time import time
import numpy as np
import pandas as pd
import imageio as io
import cv2 as cv
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
print('Setup complete!') | code |
17115723/cell_3 | [
"text_plain_output_1.png"
] | import os
print('Number of images in the training set:', len(os.listdir('../input/train_images')))
print('Number of images in the test set:', len(os.listdir('../input/test_images'))) | code |
32069310/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
pitching.head() | code |
32069310/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching.head() | code |
32069310/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
pitching.SO.idxmax()
pitching.SO.idxmin() | code |
32069310/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
(pitching.groupby('yearID').IP.sum() / pitching.groupby('yearID').GS.sum()).plot.bar() | code |
32069310/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching.head() | code |
32069310/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
pitching.groupby('yearID').SO9.mean().plot.bar() | code |
32069310/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
pitching.groupby('yearID').size().plot.bar() | code |
32069310/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32069310/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching.head() | code |
32069310/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching['BB9'] = (pitching['BB'] * 9 / pitching['IP']).round(2)
pitching['SOtoBB'] = (pitching['SO'] / pitching['BB']).round(2)
pitching.SO.idxmax() | code |
32069310/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.head(7) | code |
32069310/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0]
pitching['IP'] = (pitching['IPouts'] / 3).round(2)
del pitching['IPouts']
pitching.head() | code |
32069310/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.tail(6) | code |
32069310/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pitching = pd.read_csv('/kaggle/input/baseball-databank/Pitching.csv')
pitching = pitching[(pitching['yearID'] >= 1990) & (pitching['GS'] >= 26) & (pitching['G'] == pitching['GS'])].iloc[:, :20]
pitching = pitching.drop(['stint', 'W', 'L', 'CG', 'SHO', 'SV'], axis=1)
pitching = pitching.set_index('playerID')
pitching.shape[0] | code |
105179030/cell_9 | [
"text_plain_output_1.png"
] | i = 0
while i <= 10:
i = i + 1
j = 0
for i in range(1, 21):
j = j + i
print(j) | code |
105179030/cell_6 | [
"text_plain_output_1.png"
] | i = 0
while i <= 10:
i = i + 1
student = ['adnan', 'saad', 'zaheeb']
for i in student:
print(i) | code |
105179030/cell_7 | [
"text_plain_output_1.png"
] | i = 0
while i <= 10:
i = i + 1
a = range(10)
for i in a:
if i % 2 == 1:
print(i) | code |
105179030/cell_3 | [
"text_plain_output_1.png"
] | i = 0
while i <= 10:
if i % 2 == 0:
print(i)
i = i + 1 | code |
105179030/cell_5 | [
"text_plain_output_1.png"
] | i = 0
while i <= 10:
i = i + 1
name = 'Adnan'
for i in name:
print(i) | code |
105174093/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import os
kaggle_survey_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv')
kaggle_survey_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
kaggle_survey_2021 = pd.read_csv('../input/kaggle-survey-2021/kaggle_survey_2021_responses.csv')
indo_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3'] == 'Indonesia']
indo_survey_2020 = kaggle_survey_2020[kaggle_survey_2020['Q3'] == 'Indonesia']
indo_survey_2021 = kaggle_survey_2021[kaggle_survey_2021['Q3'] == 'Indonesia']
# kaggle_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3']=='Indonesia'].drop\
# (kaggle_survey_2019.columns[0], axis=1)
# kaggle_survey_2019.head()
# question = pd.read_csv('../input/kaggle-survey-2019/questions_only.csv')
# question.columns
# question_list = question.loc[0].tolist()
# question_list
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
barcolors = ['darkseagreen', 'peru', 'navy']
barstyle = {"edgecolor":"black", "linewidth":0.5}
indo_survey_2019['Q2'].value_counts().plot.bar(ax=ax[0], color=barcolors[0])
ax[0].set_title("2019")
indo_survey_2020['Q2'].value_counts().plot.bar(ax=ax[1], color=barcolors[1])
ax[1].set_title("2020")
indo_survey_2021['Q2'].value_counts().plot.bar(ax=ax[2], color=barcolors[2])
ax[2].set_title("2021")
plt.show()
def roleDanSize(df):
newdf = df.groupby(['Q4', 'Q5']).size()
newdf = newdf.to_frame(name='size').reset_index()
newdf['role'] = newdf['Q4'] + '-' + newdf['Q5']
newdf = newdf.drop(columns=['Q4', 'Q5']).sort_values(by=['size'])
return newdf
data_2019 = roleDanSize(indo_survey_2019)
plt.barh(data_2019['role'], data_2019['size'])
data_2020 = roleDanSize(indo_survey_2020)
plt.barh(data_2020['role'], data_2020['size'])
data_2021 = roleDanSize(indo_survey_2021)
plt.barh(data_2021['role'], data_2021['size'])
def getting_values(rng):
"""
fungsi bantuan untuk mengeluarkan nilai value_counts() setiap column Q9_Part_1-8 ke
dalam bentuk dictionary
"""
for i in range(1, rng):
yield indo_survey_2019['Q9_Part_' + str(i)].value_counts().to_dict()
values = []
keys = []
for i in getting_values(9):
values.append(list(i.values()))
keys.append(list(i.keys()))
values = [j for sub in values for j in sub]
keys = [j for sub in keys for j in sub]
mypie = plt.pie(values, startangle=0, autopct='%1.0f%%', pctdistance=0.9, radius=1.2)
plt.title('Important part of your role at work', weight='bold', size=12)
plt.legend(mypie[0], keys, bbox_to_anchor=(1, 1))
plt.show() | code |
105174093/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import os
kaggle_survey_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv')
kaggle_survey_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
kaggle_survey_2021 = pd.read_csv('../input/kaggle-survey-2021/kaggle_survey_2021_responses.csv')
indo_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3'] == 'Indonesia']
indo_survey_2020 = kaggle_survey_2020[kaggle_survey_2020['Q3'] == 'Indonesia']
indo_survey_2021 = kaggle_survey_2021[kaggle_survey_2021['Q3'] == 'Indonesia']
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 4))
barcolors = ['darkseagreen', 'peru', 'navy']
barstyle = {'edgecolor': 'black', 'linewidth': 0.5}
indo_survey_2019['Q2'].value_counts().plot.bar(ax=ax[0], color=barcolors[0])
ax[0].set_title('2019')
indo_survey_2020['Q2'].value_counts().plot.bar(ax=ax[1], color=barcolors[1])
ax[1].set_title('2020')
indo_survey_2021['Q2'].value_counts().plot.bar(ax=ax[2], color=barcolors[2])
ax[2].set_title('2021')
plt.show() | code |
105174093/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import os
kaggle_survey_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv')
kaggle_survey_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
kaggle_survey_2021 = pd.read_csv('../input/kaggle-survey-2021/kaggle_survey_2021_responses.csv')
indo_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3'] == 'Indonesia']
indo_survey_2020 = kaggle_survey_2020[kaggle_survey_2020['Q3'] == 'Indonesia']
indo_survey_2021 = kaggle_survey_2021[kaggle_survey_2021['Q3'] == 'Indonesia']
# kaggle_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3']=='Indonesia'].drop\
# (kaggle_survey_2019.columns[0], axis=1)
# kaggle_survey_2019.head()
# question = pd.read_csv('../input/kaggle-survey-2019/questions_only.csv')
# question.columns
# question_list = question.loc[0].tolist()
# question_list
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
barcolors = ['darkseagreen', 'peru', 'navy']
barstyle = {"edgecolor":"black", "linewidth":0.5}
indo_survey_2019['Q2'].value_counts().plot.bar(ax=ax[0], color=barcolors[0])
ax[0].set_title("2019")
indo_survey_2020['Q2'].value_counts().plot.bar(ax=ax[1], color=barcolors[1])
ax[1].set_title("2020")
indo_survey_2021['Q2'].value_counts().plot.bar(ax=ax[2], color=barcolors[2])
ax[2].set_title("2021")
plt.show()
def roleDanSize(df):
newdf = df.groupby(['Q4', 'Q5']).size()
newdf = newdf.to_frame(name='size').reset_index()
newdf['role'] = newdf['Q4'] + '-' + newdf['Q5']
newdf = newdf.drop(columns=['Q4', 'Q5']).sort_values(by=['size'])
return newdf
data_2019 = roleDanSize(indo_survey_2019)
plt.figure(figsize=(10, 15))
plt.barh(data_2019['role'], data_2019['size'])
plt.title('Banyak kemunculan relasi antara pendidikan terakhir dan pekerjaan 2019')
plt.ylabel('Pendidikan-Pekerjaan')
plt.xlabel('Jumlah')
plt.show() | code |
105174093/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import os
kaggle_survey_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv')
kaggle_survey_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
kaggle_survey_2021 = pd.read_csv('../input/kaggle-survey-2021/kaggle_survey_2021_responses.csv')
indo_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3'] == 'Indonesia']
indo_survey_2020 = kaggle_survey_2020[kaggle_survey_2020['Q3'] == 'Indonesia']
indo_survey_2021 = kaggle_survey_2021[kaggle_survey_2021['Q3'] == 'Indonesia']
# kaggle_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3']=='Indonesia'].drop\
# (kaggle_survey_2019.columns[0], axis=1)
# kaggle_survey_2019.head()
# question = pd.read_csv('../input/kaggle-survey-2019/questions_only.csv')
# question.columns
# question_list = question.loc[0].tolist()
# question_list
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
barcolors = ['darkseagreen', 'peru', 'navy']
barstyle = {"edgecolor":"black", "linewidth":0.5}
indo_survey_2019['Q2'].value_counts().plot.bar(ax=ax[0], color=barcolors[0])
ax[0].set_title("2019")
indo_survey_2020['Q2'].value_counts().plot.bar(ax=ax[1], color=barcolors[1])
ax[1].set_title("2020")
indo_survey_2021['Q2'].value_counts().plot.bar(ax=ax[2], color=barcolors[2])
ax[2].set_title("2021")
plt.show()
def roleDanSize(df):
newdf = df.groupby(['Q4', 'Q5']).size()
newdf = newdf.to_frame(name='size').reset_index()
newdf['role'] = newdf['Q4'] + '-' + newdf['Q5']
newdf = newdf.drop(columns=['Q4', 'Q5']).sort_values(by=['size'])
return newdf
data_2019 = roleDanSize(indo_survey_2019)
plt.barh(data_2019['role'], data_2019['size'])
data_2020 = roleDanSize(indo_survey_2020)
plt.figure(figsize=(10, 17))
plt.barh(data_2020['role'], data_2020['size'])
plt.title('Banyak kemunculan relasi antara pendidikan terakhir dan pekerjaan 2020')
plt.ylabel('Pendidikan-Pekerjaan')
plt.xlabel('Jumlah')
plt.show() | code |
105174093/cell_10 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import os
kaggle_survey_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv')
kaggle_survey_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
kaggle_survey_2021 = pd.read_csv('../input/kaggle-survey-2021/kaggle_survey_2021_responses.csv')
indo_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3'] == 'Indonesia']
indo_survey_2020 = kaggle_survey_2020[kaggle_survey_2020['Q3'] == 'Indonesia']
indo_survey_2021 = kaggle_survey_2021[kaggle_survey_2021['Q3'] == 'Indonesia']
# kaggle_survey_2019 = kaggle_survey_2019[kaggle_survey_2019['Q3']=='Indonesia'].drop\
# (kaggle_survey_2019.columns[0], axis=1)
# kaggle_survey_2019.head()
# question = pd.read_csv('../input/kaggle-survey-2019/questions_only.csv')
# question.columns
# question_list = question.loc[0].tolist()
# question_list
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
barcolors = ['darkseagreen', 'peru', 'navy']
barstyle = {"edgecolor":"black", "linewidth":0.5}
indo_survey_2019['Q2'].value_counts().plot.bar(ax=ax[0], color=barcolors[0])
ax[0].set_title("2019")
indo_survey_2020['Q2'].value_counts().plot.bar(ax=ax[1], color=barcolors[1])
ax[1].set_title("2020")
indo_survey_2021['Q2'].value_counts().plot.bar(ax=ax[2], color=barcolors[2])
ax[2].set_title("2021")
plt.show()
def roleDanSize(df):
newdf = df.groupby(['Q4', 'Q5']).size()
newdf = newdf.to_frame(name='size').reset_index()
newdf['role'] = newdf['Q4'] + '-' + newdf['Q5']
newdf = newdf.drop(columns=['Q4', 'Q5']).sort_values(by=['size'])
return newdf
data_2019 = roleDanSize(indo_survey_2019)
plt.barh(data_2019['role'], data_2019['size'])
data_2020 = roleDanSize(indo_survey_2020)
plt.barh(data_2020['role'], data_2020['size'])
data_2021 = roleDanSize(indo_survey_2021)
plt.figure(figsize=(10, 17))
plt.barh(data_2021['role'], data_2021['size'])
plt.title('Banyak kemunculan relasi antara pendidikan terakhir dan pekerjaan 2020')
plt.ylabel('Pendidikan-Pekerjaan')
plt.xlabel('Jumlah')
plt.show() | code |
2013637/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
train.head() | code |
2013637/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
all_data = all_data.drop(['Name'], axis=1)
all_data = pd.get_dummies(all_data)
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
y_train = train.Survived
X_test = all_data[train.shape[0]:]
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=0)
tree.fit(X_train, y_train)
print(tree.score(X_train, y_train))
decision_tree_predicts = tree.predict(X_test) | code |
2013637/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
all_data = all_data.drop(['Name'], axis=1)
all_data = pd.get_dummies(all_data)
all_data.head() | code |
17096261/cell_1 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import os
import random
df = pd.read_csv('../input/train.csv')
exps = df['experiment'].unique()
exps = [exp.split('-')[0] for exp in exps]
exp_series = pd.Series(exps)
cell_lines = exp_series.unique()
print('four cell lines are: ', cell_lines) | code |
17096261/cell_7 | [
"image_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import os
import random
df = pd.read_csv('../input/train.csv')
exps = df['experiment'].unique()
exps = [exp.split('-')[0] for exp in exps]
exp_series = pd.Series(exps)
cell_lines = exp_series.unique()
df = pd.read_csv('../input/train.csv')
df['cell_line'], _ = df['experiment'].str.split('-').str
types_select = [1,2,3,4,5]
fig, axes = plt.subplots(figsize=(25, 25), nrows=len(types_select), ncols=5)
for i, sirna in enumerate(types_select):
sub_df = df[df['cell_line'] == 'HEPG2']
sub_df = sub_df[df['sirna'] == sirna]
sub_df_records = sub_df.to_records()
np.random.shuffle(sub_df_records)
axes[i][0].set_ylabel('Type ' + str(sirna))
for j in range(5):
exp = sub_df_records[j]['experiment']
plate = sub_df_records[j]['plate']
well = sub_df_records[j]['well']
path = os.path.join('../input/train', exp, 'Plate' + str(plate), well + '_' + 's2' + '_' + 'w3' + '.png')
img = Image.open(path)
img = transforms.Resize(224)(img)
axes[i][j].imshow(img)
axes[i][j].set_title(sub_df_records[j]['id_code'])
df = pd.read_csv('../input/train.csv')
incomplete_list = []
df['cell_line'], _ = df['experiment'].str.split('-').str
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
for i in range(1, max(df['sirna']) + 1):
sub_df = df[df['sirna'] == i]
if len(df['cell_line'].unique()) < 4:
incomplete_list.append(i)
import matplotlib.pyplot as plt
df = pd.read_csv('../input/train.csv')
incomplete_list = []
df['cell_line'], _ = df['experiment'].str.split('-').str
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
for i, cell_type in enumerate(cell_types):
sub_df = df[df['cell_line'] == cell_type]
axes[i // 2, i % 2].hist(sub_df['sirna'].tolist(), bins=1108)
axes[i // 2, i % 2].set_title(cell_type) | code |
17096261/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import os
import random
df = pd.read_csv('../input/train.csv')
exps = df['experiment'].unique()
exps = [exp.split('-')[0] for exp in exps]
exp_series = pd.Series(exps)
cell_lines = exp_series.unique()
df = pd.read_csv('../input/train.csv')
df['cell_line'], _ = df['experiment'].str.split('-').str
types_select = [1, 2, 3, 4, 5]
fig, axes = plt.subplots(figsize=(25, 25), nrows=len(types_select), ncols=5)
for i, sirna in enumerate(types_select):
sub_df = df[df['cell_line'] == 'HEPG2']
sub_df = sub_df[df['sirna'] == sirna]
sub_df_records = sub_df.to_records()
np.random.shuffle(sub_df_records)
axes[i][0].set_ylabel('Type ' + str(sirna))
for j in range(5):
exp = sub_df_records[j]['experiment']
plate = sub_df_records[j]['plate']
well = sub_df_records[j]['well']
path = os.path.join('../input/train', exp, 'Plate' + str(plate), well + '_' + 's2' + '_' + 'w3' + '.png')
img = Image.open(path)
img = transforms.Resize(224)(img)
axes[i][j].imshow(img)
axes[i][j].set_title(sub_df_records[j]['id_code']) | code |
17096261/cell_5 | [
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import os
import random
df = pd.read_csv('../input/train.csv')
exps = df['experiment'].unique()
exps = [exp.split('-')[0] for exp in exps]
exp_series = pd.Series(exps)
cell_lines = exp_series.unique()
df = pd.read_csv('../input/train.csv')
df['cell_line'], _ = df['experiment'].str.split('-').str
types_select = [1,2,3,4,5]
fig, axes = plt.subplots(figsize=(25, 25), nrows=len(types_select), ncols=5)
for i, sirna in enumerate(types_select):
sub_df = df[df['cell_line'] == 'HEPG2']
sub_df = sub_df[df['sirna'] == sirna]
sub_df_records = sub_df.to_records()
np.random.shuffle(sub_df_records)
axes[i][0].set_ylabel('Type ' + str(sirna))
for j in range(5):
exp = sub_df_records[j]['experiment']
plate = sub_df_records[j]['plate']
well = sub_df_records[j]['well']
path = os.path.join('../input/train', exp, 'Plate' + str(plate), well + '_' + 's2' + '_' + 'w3' + '.png')
img = Image.open(path)
img = transforms.Resize(224)(img)
axes[i][j].imshow(img)
axes[i][j].set_title(sub_df_records[j]['id_code'])
df = pd.read_csv('../input/train.csv')
incomplete_list = []
df['cell_line'], _ = df['experiment'].str.split('-').str
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
for i in range(1, max(df['sirna']) + 1):
sub_df = df[df['sirna'] == i]
if len(df['cell_line'].unique()) < 4:
incomplete_list.append(i)
print('the incomplete list is: ', incomplete_list) | code |
49123700/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
def calnullpercentage(df):
missing_num = df[df.columns].isna().sum().sort_values(ascending=False)
missing_perc = (df[df.columns].isna().sum() / len(df) * 100).sort_values(ascending=False)
missing = pd.concat([missing_num, missing_perc], keys=['Total', 'Percentage'], axis=1)
missing = missing[missing['Percentage'] > 0]
return missing
calnullpercentage(telecom) | code |
49123700/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom.head() | code |
49123700/cell_34 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
def calnullpercentage(df):
missing_num = df[df.columns].isna().sum().sort_values(ascending=False)
missing_perc = (df[df.columns].isna().sum() / len(df) * 100).sort_values(ascending=False)
missing = pd.concat([missing_num, missing_perc], keys=['Total', 'Percentage'], axis=1)
missing = missing[missing['Percentage'] > 0]
return missing
telecom['tot_rech_amt_data_6'] = telecom['total_rech_data_6'] * telecom['av_rech_amt_data_6']
telecom['tot_rech_amt_data_7'] = telecom['total_rech_data_7'] * telecom['av_rech_amt_data_7']
telecom['tot_amt_6'] = telecom[['total_rech_amt_6', 'tot_rech_amt_data_6']].sum(axis=1)
telecom['tot_amt_7'] = telecom[['total_rech_amt_7', 'tot_rech_amt_data_7']].sum(axis=1)
telecom['avg_amt_6_7'] = telecom[['tot_amt_6', 'tot_amt_7']].mean(axis=1)
telecom = telecom.loc[telecom['avg_amt_6_7'] >= np.percentile(telecom['avg_amt_6_7'], 70)]
telecom.shape
catg = []
for col in telecom.columns:
if len(telecom[col].unique()) == 2 | 3:
catg.append(col)
telecom[catg] = telecom[catg].apply(lambda x: x.astype('object'))
col_tmp = ['total_rech_num_6', 'total_rech_num_7', 'total_rech_num_8', 'total_rech_num_9', 'total_rech_data_6', 'total_rech_data_7', 'total_rech_data_8', 'total_rech_data_9']
telecom[col_tmp] = telecom[col_tmp].apply(lambda x: x.astype('object'))
telecom.drop(['tot_rech_amt_data_6', 'tot_rech_amt_data_7', 'tot_rech_amt_data_8', 'tot_rech_amt_data_9'], inplace=True, axis=1)
telecom.drop(telecom.filter(regex='_9|sep', axis=1).columns, axis=1, inplace=True)
telecom.shape
def redundant_feature(df):
redundant = []
for i in df.columns:
counts = df[i].value_counts()
count_max = counts.iloc[0]
if count_max / len(df) * 100 > 95:
redundant.append(i)
redundant = list(redundant)
return redundant
print('Before dropping Redundant features: ', telecom.shape)
redundant_features = redundant_feature(telecom)
telecom = telecom.drop(redundant_features, axis=1)
print('After dropping Redundant features: ', telecom.shape) | code |
49123700/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
def calnullpercentage(df):
missing_num = df[df.columns].isna().sum().sort_values(ascending=False)
missing_perc = (df[df.columns].isna().sum() / len(df) * 100).sort_values(ascending=False)
missing = pd.concat([missing_num, missing_perc], keys=['Total', 'Percentage'], axis=1)
missing = missing[missing['Percentage'] > 0]
return missing
telecom['tot_rech_amt_data_6'] = telecom['total_rech_data_6'] * telecom['av_rech_amt_data_6']
telecom['tot_rech_amt_data_7'] = telecom['total_rech_data_7'] * telecom['av_rech_amt_data_7']
telecom['tot_amt_6'] = telecom[['total_rech_amt_6', 'tot_rech_amt_data_6']].sum(axis=1)
telecom['tot_amt_7'] = telecom[['total_rech_amt_7', 'tot_rech_amt_data_7']].sum(axis=1)
telecom['avg_amt_6_7'] = telecom[['tot_amt_6', 'tot_amt_7']].mean(axis=1)
telecom = telecom.loc[telecom['avg_amt_6_7'] >= np.percentile(telecom['avg_amt_6_7'], 70)]
telecom.shape
catg = []
for col in telecom.columns:
if len(telecom[col].unique()) == 2 | 3:
catg.append(col)
telecom[catg] = telecom[catg].apply(lambda x: x.astype('object'))
col_tmp = ['total_rech_num_6', 'total_rech_num_7', 'total_rech_num_8', 'total_rech_num_9', 'total_rech_data_6', 'total_rech_data_7', 'total_rech_data_8', 'total_rech_data_9']
telecom[col_tmp] = telecom[col_tmp].apply(lambda x: x.astype('object'))
telecom.drop(['tot_rech_amt_data_6', 'tot_rech_amt_data_7', 'tot_rech_amt_data_8', 'tot_rech_amt_data_9'], inplace=True, axis=1)
telecom.drop(telecom.filter(regex='_9|sep', axis=1).columns, axis=1, inplace=True)
pd.DataFrame(round(telecom['churn'].value_counts(normalize=True) * 100, 2)) | code |
49123700/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom.describe(percentiles=[0.25, 0.5, 0.75, 0.99]) | code |
49123700/cell_18 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom['tot_rech_amt_data_6'] = telecom['total_rech_data_6'] * telecom['av_rech_amt_data_6']
telecom['tot_rech_amt_data_7'] = telecom['total_rech_data_7'] * telecom['av_rech_amt_data_7']
telecom['tot_amt_6'] = telecom[['total_rech_amt_6', 'tot_rech_amt_data_6']].sum(axis=1)
telecom['tot_amt_7'] = telecom[['total_rech_amt_7', 'tot_rech_amt_data_7']].sum(axis=1)
telecom['avg_amt_6_7'] = telecom[['tot_amt_6', 'tot_amt_7']].mean(axis=1)
telecom = telecom.loc[telecom['avg_amt_6_7'] >= np.percentile(telecom['avg_amt_6_7'], 70)]
telecom.shape | code |
49123700/cell_32 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom['tot_rech_amt_data_6'] = telecom['total_rech_data_6'] * telecom['av_rech_amt_data_6']
telecom['tot_rech_amt_data_7'] = telecom['total_rech_data_7'] * telecom['av_rech_amt_data_7']
telecom['tot_amt_6'] = telecom[['total_rech_amt_6', 'tot_rech_amt_data_6']].sum(axis=1)
telecom['tot_amt_7'] = telecom[['total_rech_amt_7', 'tot_rech_amt_data_7']].sum(axis=1)
telecom['avg_amt_6_7'] = telecom[['tot_amt_6', 'tot_amt_7']].mean(axis=1)
telecom = telecom.loc[telecom['avg_amt_6_7'] >= np.percentile(telecom['avg_amt_6_7'], 70)]
telecom.shape
catg = []
for col in telecom.columns:
if len(telecom[col].unique()) == 2 | 3:
catg.append(col)
telecom[catg] = telecom[catg].apply(lambda x: x.astype('object'))
col_tmp = ['total_rech_num_6', 'total_rech_num_7', 'total_rech_num_8', 'total_rech_num_9', 'total_rech_data_6', 'total_rech_data_7', 'total_rech_data_8', 'total_rech_data_9']
telecom[col_tmp] = telecom[col_tmp].apply(lambda x: x.astype('object'))
telecom.drop(['tot_rech_amt_data_6', 'tot_rech_amt_data_7', 'tot_rech_amt_data_8', 'tot_rech_amt_data_9'], inplace=True, axis=1)
telecom.drop(telecom.filter(regex='_9|sep', axis=1).columns, axis=1, inplace=True)
telecom.shape | code |
49123700/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom.select_dtypes(include='object').head(3) | code |
49123700/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
def calnullpercentage(df):
missing_num = df[df.columns].isna().sum().sort_values(ascending=False)
missing_perc = (df[df.columns].isna().sum() / len(df) * 100).sort_values(ascending=False)
missing = pd.concat([missing_num, missing_perc], keys=['Total', 'Percentage'], axis=1)
missing = missing[missing['Percentage'] > 0]
return missing
print(len(calnullpercentage(telecom))) | code |
49123700/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
telecom['tot_rech_amt_data_6'] = telecom['total_rech_data_6'] * telecom['av_rech_amt_data_6']
telecom['tot_rech_amt_data_7'] = telecom['total_rech_data_7'] * telecom['av_rech_amt_data_7']
telecom['tot_amt_6'] = telecom[['total_rech_amt_6', 'tot_rech_amt_data_6']].sum(axis=1)
telecom['tot_amt_7'] = telecom[['total_rech_amt_7', 'tot_rech_amt_data_7']].sum(axis=1)
telecom['avg_amt_6_7'] = telecom[['tot_amt_6', 'tot_amt_7']].mean(axis=1)
telecom = telecom.loc[telecom['avg_amt_6_7'] >= np.percentile(telecom['avg_amt_6_7'], 70)]
telecom.shape
catg = []
for col in telecom.columns:
if len(telecom[col].unique()) == 2 | 3:
catg.append(col)
telecom[catg] = telecom[catg].apply(lambda x: x.astype('object'))
col_tmp = ['total_rech_num_6', 'total_rech_num_7', 'total_rech_num_8', 'total_rech_num_9', 'total_rech_data_6', 'total_rech_data_7', 'total_rech_data_8', 'total_rech_data_9']
telecom[col_tmp] = telecom[col_tmp].apply(lambda x: x.astype('object'))
x = ['tot_amt_8', 'total_rech_amt_8', 'tot_rech_amt_data_8', 'total_rech_data_8', 'av_rech_amt_data_8']
plt.figure(figsize=(8, 5))
sns.heatmap(telecom[x].corr(), annot=True, cmap='viridis_r') | code |
49123700/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import six
import warnings
import warnings
warnings.filterwarnings('ignore')
import sys, joblib
import six
sys.modules['sklearn.externals.six'] = six
sys.modules['sklearn.externals.joblib'] = joblib
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk', font_scale=0.65, rc={'grid.linewidth': 5})
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 400)
from sklearn.linear_model import LogisticRegression, LinearRegression, LassoCV, Lasso, Ridge, LogisticRegressionCV
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold, StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from sklearn.preprocessing import StandardScaler, MinMaxScaler, QuantileTransformer
from scipy.stats import skew
from fancyimpute import IterativeImputer, KNN
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.feature_selection import RFE
import statsmodels.api as sm
telecom = pd.read_csv('../input/telecom-churn-dataset/telecom_churn_data.csv')
print(telecom.shape)
print('\n')
print(telecom.info(verbose=True, null_counts=True)) | code |
72098069/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import roc_auc_score
from xgboost import XGBClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/dont-overfit-ii/train.csv')
df_test = pd.read_csv('../input/dont-overfit-ii/test.csv')
labels = df_train.columns.drop(['id', 'target'])
target = df_train['target']
ide = df_test['id']
df_test = df_test.drop('id', axis=1)
from xgboost import XGBClassifier
m = XGBClassifier(max_depth=2, gamma=11, eta=0.8, reg_alpha=0.7, reg_lambda=0.9, eval_metric=None)
m.fit(x_train, y_train)
pred = m.predict(x_test)
print(df_test.shape)
ou = m.predict(df_test) | code |
72098069/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train, y_train)
preds = model.predict(x_test)
from sklearn.metrics import roc_auc_score
print('auc_train:', roc_auc_score(y_train, model.predict(x_train)))
print('auc_test:', roc_auc_score(y_test, preds)) | code |
72098069/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/dont-overfit-ii/train.csv')
df_test = pd.read_csv('../input/dont-overfit-ii/test.csv')
labels = df_train.columns.drop(['id', 'target'])
target = df_train['target']
ide = df_test['id']
df_test = df_test.drop('id', axis=1)
df_train.head() | code |
72098069/cell_6 | [
"text_plain_output_1.png"
] | import missingno as msno
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/dont-overfit-ii/train.csv')
df_test = pd.read_csv('../input/dont-overfit-ii/test.csv')
labels = df_train.columns.drop(['id', 'target'])
target = df_train['target']
ide = df_test['id']
df_test = df_test.drop('id', axis=1)
msno.matrix(df_train, figsize=(20, 7)) | code |
72098069/cell_11 | [
"text_plain_output_1.png"
] | from xgboost import XGBClassifier
from xgboost import XGBClassifier
m = XGBClassifier(max_depth=2, gamma=11, eta=0.8, reg_alpha=0.7, reg_lambda=0.9, eval_metric=None)
m.fit(x_train, y_train)
pred = m.predict(x_test) | code |
72098069/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import missingno as msno
from sklearn.model_selection import train_test_split
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72098069/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/dont-overfit-ii/train.csv')
df_test = pd.read_csv('../input/dont-overfit-ii/test.csv')
labels = df_train.columns.drop(['id', 'target'])
target = df_train['target']
ide = df_test['id']
df_test = df_test.drop('id', axis=1)
df_train.info() | code |
72098069/cell_12 | [
"text_html_output_1.png"
] | from sklearn.metrics import roc_auc_score
from xgboost import XGBClassifier
from xgboost import XGBClassifier
m = XGBClassifier(max_depth=2, gamma=11, eta=0.8, reg_alpha=0.7, reg_lambda=0.9, eval_metric=None)
m.fit(x_train, y_train)
pred = m.predict(x_test)
print('auc_train:', roc_auc_score(y_train, m.predict(x_train)))
print('auc_test:', roc_auc_score(y_test, pred)) | code |
33103605/cell_9 | [
"text_plain_output_1.png"
] | from ktrain import text
import ktrain
import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
else:
raise SystemExit('Data path does not exist.')
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[2], train_df[1], x_test=test_df['Text'], y_test=test_df['Polarity'], maxlen=500, preprocess_mode='bert')
learner = ktrain.get_learner(text.text_classifier('bert', (x_train, y_train), preproc=preproc), train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=6)
learner.load_model(model_path)
predictor = ktrain.get_predictor(learner.model, preproc)
have_test_dataset = False
data_df = test_df
if have_test_dataset:
data = test_df[2].tolist()
label = test_df[1].tolist()
else:
total_dataset = data_df.shape[0]
test_data = round(total_dataset * 0.3)
data_list = data_df['Text'].tolist()
data = data_list[-test_data - 1:-1]
label_list = data_df['Polarity'].tolist()
label = label_list[-test_data - 1:-1]
print('Showing prediction mistakes on test set!')
i = 0
correct = 0
wrong = 0
total = len(data)
for dt in data:
result = predictor.predict(dt)
if result == label[i]:
correct += 1
else:
wrong += 1
print('Wrong result on sentence:\n', dt, '\nExpected: ', label[i], '\nPredicted: ', result)
i += 1 | code |
33103605/cell_4 | [
"text_plain_output_1.png"
] | from ktrain import text
import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
else:
raise SystemExit('Data path does not exist.')
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[2], train_df[1], x_test=test_df['Text'], y_test=test_df['Polarity'], maxlen=500, preprocess_mode='bert') | code |
33103605/cell_6 | [
"text_plain_output_100.png",
"text_plain_output_84.png",
"text_plain_output_56.png",
"text_plain_output_158.png",
"text_plain_output_181.png",
"text_plain_output_137.png",
"text_plain_output_139.png",
"text_plain_output_35.png",
"text_plain_output_130.png",
"text_plain_output_117.png",
"text_plain_output_98.png",
"text_plain_output_43.png",
"text_plain_output_78.png",
"text_plain_output_143.png",
"text_plain_output_106.png",
"text_plain_output_37.png",
"text_plain_output_138.png",
"text_plain_output_172.png",
"text_plain_output_147.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"text_plain_output_48.png",
"text_plain_output_116.png",
"text_plain_output_128.png",
"text_plain_output_30.png",
"text_plain_output_167.png",
"text_plain_output_73.png",
"text_plain_output_126.png",
"text_plain_output_115.png",
"text_plain_output_15.png",
"text_plain_output_133.png",
"text_plain_output_178.png",
"text_plain_output_154.png",
"text_plain_output_114.png",
"text_plain_output_157.png",
"text_plain_output_70.png",
"text_plain_output_9.png",
"text_plain_output_44.png",
"text_plain_output_119.png",
"text_plain_output_86.png",
"text_plain_output_118.png",
"text_plain_output_131.png",
"text_plain_output_40.png",
"text_plain_output_123.png",
"text_plain_output_74.png",
"text_plain_output_31.png",
"text_plain_output_20.png",
"text_plain_output_102.png",
"text_plain_output_111.png",
"text_plain_output_101.png",
"text_plain_output_169.png",
"text_plain_output_144.png",
"text_plain_output_161.png",
"text_plain_output_132.png",
"text_plain_output_60.png",
"text_plain_output_155.png",
"text_plain_output_68.png",
"text_plain_output_4.png",
"text_plain_output_65.png",
"text_plain_output_64.png",
"text_plain_output_13.png",
"text_plain_output_107.png",
"text_plain_output_52.png",
"text_plain_output_66.png",
"text_plain_output_45.png",
"text_plain_output_171.png",
"text_plain_output_14.png",
"text_plain_output_159.png",
"text_plain_output_32.png",
"text_plain_output_88.png",
"text_plain_output_29.png",
"text_plain_output_140.png",
"text_plain_output_129.png",
"text_plain_output_160.png",
"text_plain_output_58.png",
"text_plain_output_49.png",
"text_plain_output_63.png",
"text_plain_output_27.png",
"text_plain_output_177.png",
"text_plain_output_76.png",
"text_plain_output_108.png",
"text_plain_output_54.png",
"text_plain_output_142.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_153.png",
"text_plain_output_170.png",
"text_plain_output_92.png",
"text_plain_output_57.png",
"text_plain_output_120.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_104.png",
"text_plain_output_47.png",
"text_plain_output_121.png",
"text_plain_output_25.png",
"text_plain_output_134.png",
"text_plain_output_77.png",
"text_plain_output_18.png",
"text_plain_output_149.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"text_plain_output_96.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_180.png",
"text_plain_output_141.png",
"text_plain_output_112.png",
"text_plain_output_152.png",
"text_plain_output_113.png",
"text_plain_output_22.png",
"text_plain_output_81.png",
"text_plain_output_69.png",
"text_plain_output_175.png",
"text_plain_output_165.png",
"text_plain_output_146.png",
"text_plain_output_145.png",
"text_plain_output_125.png",
"text_plain_output_38.png",
"text_plain_output_7.png",
"text_plain_output_166.png",
"text_plain_output_91.png",
"text_plain_output_16.png",
"text_plain_output_174.png",
"text_plain_output_59.png",
"text_plain_output_103.png",
"text_plain_output_71.png",
"text_plain_output_8.png",
"text_plain_output_122.png",
"text_plain_output_26.png",
"text_plain_output_109.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"text_plain_output_168.png",
"text_plain_output_85.png",
"text_plain_output_42.png",
"text_plain_output_110.png",
"text_plain_output_67.png",
"text_plain_output_53.png",
"text_plain_output_23.png",
"text_plain_output_173.png",
"text_plain_output_151.png",
"text_plain_output_89.png",
"text_plain_output_51.png",
"text_plain_output_28.png",
"text_plain_output_72.png",
"text_plain_output_99.png",
"text_plain_output_163.png",
"text_plain_output_179.png",
"text_plain_output_162.png",
"text_plain_output_136.png",
"text_plain_output_2.png",
"text_plain_output_127.png",
"text_plain_output_97.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_150.png",
"text_plain_output_39.png",
"text_plain_output_176.png",
"text_plain_output_55.png",
"text_plain_output_82.png",
"text_plain_output_93.png",
"text_plain_output_19.png",
"text_plain_output_105.png",
"text_plain_output_80.png",
"text_plain_output_94.png",
"text_plain_output_164.png",
"text_plain_output_124.png",
"text_plain_output_17.png",
"text_plain_output_148.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"text_plain_output_62.png",
"text_plain_output_95.png",
"text_plain_output_156.png",
"text_plain_output_61.png",
"text_plain_output_83.png",
"text_plain_output_135.png",
"text_plain_output_46.png"
] | from ktrain import text
import ktrain
import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
else:
raise SystemExit('Data path does not exist.')
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[2], train_df[1], x_test=test_df['Text'], y_test=test_df['Polarity'], maxlen=500, preprocess_mode='bert')
learner = ktrain.get_learner(text.text_classifier('bert', (x_train, y_train), preproc=preproc), train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=6)
learner.load_model(model_path)
print('model loaded successfully') | code |
33103605/cell_2 | [
"text_plain_output_1.png"
] | !pip install ktrain
import ktrain
from ktrain import text | code |
33103605/cell_3 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
print('Train path set.')
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
if mo_path.exists():
print('Model path set.')
else:
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
print('Data path set.')
else:
raise SystemExit('Data path does not exist.') | code |
33103605/cell_10 | [
"text_plain_output_1.png"
] | from ktrain import text
import ktrain
import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
else:
raise SystemExit('Data path does not exist.')
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[2], train_df[1], x_test=test_df['Text'], y_test=test_df['Polarity'], maxlen=500, preprocess_mode='bert')
learner = ktrain.get_learner(text.text_classifier('bert', (x_train, y_train), preproc=preproc), train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=6)
learner.load_model(model_path)
predictor = ktrain.get_predictor(learner.model, preproc)
have_test_dataset = False
data_df = test_df
if have_test_dataset:
data = test_df[2].tolist()
label = test_df[1].tolist()
else:
total_dataset = data_df.shape[0]
test_data = round(total_dataset * 0.3)
data_list = data_df['Text'].tolist()
data = data_list[-test_data - 1:-1]
label_list = data_df['Polarity'].tolist()
label = label_list[-test_data - 1:-1]
i = 0
correct = 0
wrong = 0
total = len(data)
for dt in data:
result = predictor.predict(dt)
if result == label[i]:
correct += 1
else:
wrong += 1
i += 1
print('Correct: ', correct, '/', total, '\nWrong: ', wrong, '/', total) | code |
33103605/cell_5 | [
"text_plain_output_1.png"
] | from ktrain import text
import ktrain
import pandas as pd
import pathlib
train_path = '../input/sentimentdatasets/testStackOverFlow.csv'
tr_path = pathlib.Path(train_path)
if tr_path.exists():
train_df = pd.read_csv(train_path, encoding='utf-16', sep=';', header=None)
else:
raise SystemExit('Train path does not exist.')
model_path = '../input/models/model.h5'
mo_path = pathlib.Path(model_path)
raise SystemExit('Model path does not exist.')
data_path = '../input/sentimentdatasets/github_gold.csv'
da_path = pathlib.Path(data_path)
if da_path.exists():
test_df = pd.read_csv(data_path, sep=';', header=0)
else:
raise SystemExit('Data path does not exist.')
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[2], train_df[1], x_test=test_df['Text'], y_test=test_df['Polarity'], maxlen=500, preprocess_mode='bert')
learner = ktrain.get_learner(text.text_classifier('bert', (x_train, y_train), preproc=preproc), train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=6) | code |
130011822/cell_13 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
vectorizer = CountVectorizer()
X_train_vec = vectorizer.fit_transform(X_train)
X_test_vec = vectorizer.transform(X_test)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_vec.toarray())
X_test_scaled = scaler.transform(X_test_vec.toarray())
model = LogisticRegression()
model.fit(X_train_vec, y_train)
accuracy = model.score(X_test_scaled, y_test)
X_new_data = 'agenda for next sitting see minutes'
X_new_data_vec = vectorizer.transform([X_new_data])
X_new_data_scaled = scaler.transform(X_new_data_vec.toarray())
y_pred = model.predict(X_new_data_scaled)
print(y_pred) | code |
130011822/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/unlock-the-power-of-english-asl-with-aslg-pc12-c/train.csv')
df.columns
df.head() | code |
130011822/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
vectorizer = CountVectorizer()
X_train_vec = vectorizer.fit_transform(X_train)
X_test_vec = vectorizer.transform(X_test)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_vec.toarray())
X_test_scaled = scaler.transform(X_test_vec.toarray())
model = LogisticRegression()
model.fit(X_train_vec, y_train) | code |
130011822/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130011822/cell_3 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression | code |
130011822/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
vectorizer = CountVectorizer()
X_train_vec = vectorizer.fit_transform(X_train)
X_test_vec = vectorizer.transform(X_test)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_vec.toarray())
X_test_scaled = scaler.transform(X_test_vec.toarray())
model = LogisticRegression()
model.fit(X_train_vec, y_train)
accuracy = model.score(X_test_scaled, y_test)
print('Accuracy:', accuracy) | code |
130011822/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/unlock-the-power-of-english-asl-with-aslg-pc12-c/train.csv')
df.columns | code |
122263749/cell_13 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from os import listdir
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import cv2
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
np_image_list = np.array(image_list, dtype=np.uint8)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(label_list)
label_list = y.reshape(len(image_list), 1)
label_list.shape
print('[INFO] Spliting data to train, test')
x_train, x_test, y_train, y_test = train_test_split(np_image_list, label_list, test_size=0.2, random_state=42) | code |
122263749/cell_9 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from os import listdir
import cv2
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
print('[INFO] Loading images ...')
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
print(brain_folder_list)
for brain_folder in brain_folder_list:
print(f'[INFO] Processing {brain_folder} ...')
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
print('[INFO] Image loading completed')
except Exception as e:
print(f'Error : {e}') | code |
122263749/cell_20 | [
"text_plain_output_1.png"
] | from PIL import Image
from PIL import Image
from keras.preprocessing import image
from os import listdir
from tensorflow import keras
from tensorflow.keras import layers
import cv2
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 300
image_size = 72
patch_size = 5
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [projection_dim * 2, projection_dim]
transformer_layers = 8
mlp_head_units = [2048, 1024]
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
np_image_list = np.array(image_list, dtype=np.uint8)
from PIL import Image
import matplotlib.pyplot as plt
from PIL import Image
import matplotlib.pyplot as plt
data_augmentation = keras.Sequential([layers.experimental.preprocessing.Normalization(), layers.experimental.preprocessing.Resizing(image_size, image_size), layers.experimental.preprocessing.RandomFlip('horizontal'), layers.experimental.preprocessing.RandomRotation(factor=0.02), layers.experimental.preprocessing.RandomZoom(height_factor=0.2, width_factor=0.2)], name='data_augmentation')
data_augmentation.layers[0].adapt(x_train)
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding='VALID')
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
image = x_train[np.random.choice(range(x_train.shape[0]))]
print(x_train[0])
plt.imshow(image.astype('uint8'))
plt.axis('off')
resized_image = tf.image.resize(tf.convert_to_tensor([image]), size=(image_size, image_size))
patches = Patches(patch_size)(resized_image)
print(f'Image size: {image_size} X {image_size}')
print(f'Patch size: {patch_size} X {patch_size}')
print(f'Patches per image: {patches.shape[1]}')
print(f'Elements per patch: {patches.shape[-1]}')
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(patch_img.numpy().astype('uint8'))
plt.axis('off') | code |
122263749/cell_11 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from os import listdir
from sklearn.preprocessing import LabelEncoder
import cv2
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(label_list)
print(y)
label_list = y.reshape(len(image_list), 1) | code |
122263749/cell_15 | [
"text_plain_output_1.png"
] | print(f'x_train shape: {x_train.shape} - y_train shape: {y_train.shape}')
print(f'x_test shape: {x_test.shape} - y_test shape: {y_test.shape}') | code |
122263749/cell_16 | [
"image_output_1.png"
] | from PIL import Image
from PIL import Image
from keras.preprocessing import image
from os import listdir
import cv2
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
np_image_list = np.array(image_list, dtype=np.uint8)
from PIL import Image
import matplotlib.pyplot as plt
from PIL import Image
import matplotlib.pyplot as plt
plt.imshow(Image.fromarray(x_train[100].astype(np.uint8)))
plt.show() | code |
122263749/cell_14 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.preprocessing import image
from os import listdir
import cv2
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
np_image_list = np.array(image_list, dtype=np.uint8)
from PIL import Image
import matplotlib.pyplot as plt
plt.imshow(Image.fromarray(x_train[0].astype(np.uint8)))
plt.show() | code |
122263749/cell_12 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from os import listdir
from sklearn.preprocessing import LabelEncoder
import cv2
import numpy as np
INIT_LR = 0.001
BS = 32
default_image_size = tuple((72, 72))
image_size = 72
directory_root = '../input/brain-tumor-classification-mri/Training'
width = 256
height = 256
depth = 3
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None:
image = cv2.resize(image, default_image_size)
image = cv2.convertScaleAbs(image, alpha=1.2, beta=0)
return image.astype(np.uint8)
else:
return np.array([])
except Exception as e:
return None
image_list, label_list = ([], [])
try:
root_dir = listdir(directory_root)
brain_folder_list = listdir(f'{directory_root}/')
for brain_folder in brain_folder_list:
brain_image_list = listdir(f'{directory_root}/{brain_folder}/')
for image in brain_image_list[:500]:
image_directory = f'{directory_root}/{brain_folder}/{image}'
if image_directory.endswith('.jpg') == True or image_directory.endswith('.JPG') == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(brain_folder)
exit
except Exception as e:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(label_list)
label_list = y.reshape(len(image_list), 1)
label_list.shape | code |
122263749/cell_5 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | pip install -U tensorflow-addons | code |
90118648/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train.info() | code |
90118648/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_train.describe() | code |
90118648/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train['Embarked'] = [1 if l == 'S' else 2 if l == 'C' else 3 for l in train['Embarked']]
train['Embarked'].value_counts() | code |
90118648/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
X_test_ohe.head() | code |
90118648/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.info() | code |
90118648/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
from sklearn.preprocessing import PowerTransformer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
num = ['Age', 'Parch', 'SibSp', 'Fare']
power = PowerTransformer()
train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num)
train_power.head() | code |
90118648/cell_33 | [
"text_html_output_1.png"
] | from sklearn.impute import KNNImputer
from sklearn.preprocessing import PowerTransformer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
num = ['Age', 'Parch', 'SibSp', 'Fare']
power = PowerTransformer()
train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num)
test_power = pd.DataFrame(power.transform(X_test[num]), columns=num)
X_train_ohe_power = pd.concat([train_power[num], X_train_ohe.drop(num, axis=1)], axis=1)
X_test_ohe_power = pd.concat([test_power[num], X_test_ohe.drop(num, axis=1)], axis=1)
X_test_ohe_power.head() | code |
90118648/cell_20 | [
"text_html_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train['Title'] = [s.split(',')[1].split('.')[0].strip() for s in train['Name']]
X_train['Title'] = X_train['Title'].astype('category')
replacements = {'Mlle': 'Miss', 'Mme': 'Mrs', 'Ms': 'Mrs'}
X_train['Title'] = X_train['Title'].replace(replacements)
counts = X_train['Title'].value_counts()
mask = X_train['Title'].isin(counts[counts < 10].index)
X_train['Title'][mask] = 'Other'
X_train['Title'].value_counts()
X_test['Title'] = [s.split(',')[1].split('.')[0].strip() for s in test['Name']]
X_test['Title'] = X_test['Title'].astype('category')
replacements = {'Mlle': 'Miss', 'Mme': 'Mrs', 'Ms': 'Mrs'}
X_test['Title'] = X_test['Title'].replace(replacements)
counts = X_test['Title'].value_counts()
mask = X_test['Title'].isin(counts[counts < 10].index)
X_test['Title'][mask] = 'Other'
X_test['Title'].value_counts() | code |
90118648/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.describe(include='all') | code |
90118648/cell_19 | [
"text_html_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_train['Title'] = [s.split(',')[1].split('.')[0].strip() for s in train['Name']]
X_train['Title'] = X_train['Title'].astype('category')
replacements = {'Mlle': 'Miss', 'Mme': 'Mrs', 'Ms': 'Mrs'}
X_train['Title'] = X_train['Title'].replace(replacements)
counts = X_train['Title'].value_counts()
mask = X_train['Title'].isin(counts[counts < 10].index)
X_train['Title'][mask] = 'Other'
X_train['Title'].value_counts() | code |
90118648/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90118648/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train['Sex'] = [0 if l == 'male' else 1 for l in train['Sex']]
train['Sex'].value_counts() | code |
90118648/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_test['Embarked'] = ['S' if l == 1 else 'C' if l == 2 else 'Q' for l in X_test['Embarked']]
X_test['Embarked'].value_counts() | code |
90118648/cell_32 | [
"text_html_output_1.png"
] | from sklearn.impute import KNNImputer
from sklearn.preprocessing import PowerTransformer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
num = ['Age', 'Parch', 'SibSp', 'Fare']
power = PowerTransformer()
train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num)
test_power = pd.DataFrame(power.transform(X_test[num]), columns=num)
X_train_ohe_power = pd.concat([train_power[num], X_train_ohe.drop(num, axis=1)], axis=1)
X_train_ohe_power.head() | code |
90118648/cell_28 | [
"text_plain_output_1.png"
] | from category_encoders import TargetEncoder
from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
encoder = TargetEncoder(return_df=True)
X_train_te = encoder.fit_transform(X_train, y_train)
X_test_te = encoder.transform(X_test)
X_test_te.describe() | code |
90118648/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test['Sex'] = [0 if l == 'male' else 1 for l in test['Sex']]
test['Sex'].value_counts() | code |
90118648/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_train['Sex'] = ['male' if l == 0 else 'female' for l in X_train['Sex']]
X_train['Sex'].value_counts() | code |
90118648/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_test['Sex'] = ['male' if l == 0 else 'female' for l in X_test['Sex']]
X_test['Sex'].value_counts() | code |
90118648/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_train['Embarked'] = ['S' if l == 1 else 'C' if l == 2 else 'Q' for l in X_train['Embarked']]
X_train['Embarked'].value_counts() | code |
90118648/cell_35 | [
"text_html_output_1.png"
] | from category_encoders import TargetEncoder
from sklearn.impute import KNNImputer
from sklearn.preprocessing import PowerTransformer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
encoder = TargetEncoder(return_df=True)
X_train_te = encoder.fit_transform(X_train, y_train)
num = ['Age', 'Parch', 'SibSp', 'Fare']
power = PowerTransformer()
train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num)
test_power = pd.DataFrame(power.transform(X_test[num]), columns=num)
X_train_ohe_power = pd.concat([train_power[num], X_train_ohe.drop(num, axis=1)], axis=1)
X_test_ohe_power = pd.concat([test_power[num], X_test_ohe.drop(num, axis=1)], axis=1)
X_train_te_power = pd.concat([train_power[num], X_train_te.drop(num, axis=1)], axis=1)
X_train_te_power.head() | code |
90118648/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
from sklearn.preprocessing import PowerTransformer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_test_ohe = pd.get_dummies(X_test)
num = ['Age', 'Parch', 'SibSp', 'Fare']
power = PowerTransformer()
train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num)
test_power = pd.DataFrame(power.transform(X_test[num]), columns=num)
test_power.head() | code |
90118648/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_train_ohe = pd.get_dummies(X_train)
X_train_ohe.head() | code |
90118648/cell_14 | [
"text_html_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_test.describe() | code |
90118648/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.impute import KNNImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked']
X_train = train[predictors]
y_train = train['Survived']
X_test = test[predictors]
knn_imputer = KNNImputer()
X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors)
X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors)
X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category')
X_test.info() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.