path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105191248/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105191248/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91]
column_names = ['title', 'trending_date']
duplicates2 = youtube.duplicated(subset=column_names, keep=False)
youtube[duplicates2].sort_values(by='title')
youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first')
youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix']
youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m')
youtube['publish_time'] = pd.to_datetime(youtube['publish_time'])
youtube.describe() | code |
105191248/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91]
column_names = ['title', 'trending_date']
duplicates2 = youtube.duplicated(subset=column_names, keep=False)
youtube[duplicates2].sort_values(by='title')
youtube['title'].loc[34137] | code |
105191248/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91]
column_names = ['title', 'trending_date']
duplicates2 = youtube.duplicated(subset=column_names, keep=False)
youtube[duplicates2].sort_values(by='title')
youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first')
youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix'] | code |
105191248/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
youtube.head() | code |
105191248/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91]
column_names = ['title', 'trending_date']
duplicates2 = youtube.duplicated(subset=column_names, keep=False)
youtube[duplicates2].sort_values(by='title')
youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first')
youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix']
youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m')
youtube['publish_time'] = pd.to_datetime(youtube['publish_time'])
youtube.info() | code |
105191248/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91]
column_names = ['title', 'trending_date']
duplicates2 = youtube.duplicated(subset=column_names, keep=False)
youtube[duplicates2].sort_values(by='title') | code |
105191248/cell_10 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index | code |
105191248/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count
total_cells = np.product(youtube.shape)
total_missing = missing_values_count.sum()
total_missing / total_cells * 100
youtube.loc[pd.isna(youtube['description']), :].index
youtube = youtube.fillna('no description available for this video')
youtube.loc[91] | code |
105191248/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv')
missing_values_count = youtube.isnull().sum()
missing_values_count | code |
2042995/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from IPython.display import display
import matplotlib.pyplot as plt
from datetime import date
pd.set_option('display.float_format', lambda x: '%.5f' % x)
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
322963/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
322963/cell_5 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
zika_df = pd.read_csv(os.path.join('..', 'input', 'cdc_zika.csv'), low_memory=False)
keep_rows = pd.notnull(zika_df['report_date'])
zika_df = zika_df[keep_rows]
print('Removed {:d} out of {:d} rows with missing report_date.'.format(len(keep_rows) - sum(keep_rows), len(keep_rows)))
zika_df.index = pd.to_datetime([d.replace('_', '-') for d in zika_df['report_date']], format='%Y-%m-%d')
zika_df.sort_index(inplace=True)
zika_df.index.rename('report_date', inplace=True)
zika_df.drop('report_date', axis=1, inplace=True) | code |
122264653/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | Person1 = [['Maths', 'Science', 'Entrepreneurship'], 'B', 'Blue', '42.5']
Person1[0][0] | code |
122264653/cell_4 | [
"text_plain_output_1.png"
] | age = {}
type(age)
age = dict()
type(age)
age = {'Ragul': 23, 'Joe': 15, 'Venkat': 32}
type(age) | code |
122264653/cell_2 | [
"text_plain_output_1.png"
] | age = {}
type(age) | code |
122264653/cell_11 | [
"text_plain_output_1.png"
] | marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat'] | code |
122264653/cell_8 | [
"text_plain_output_1.png"
] | Person1 = [['Maths', 'Science', 'Entrepreneurship'], 'B', 'Blue', '42.5']
Person1[0] | code |
122264653/cell_15 | [
"text_plain_output_1.png"
] | marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat']
marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat'][2]
for i in marks:
print(i) | code |
122264653/cell_16 | [
"text_plain_output_1.png"
] | age = {}
type(age)
age = dict()
type(age)
age = {'Ragul': 23, 'Joe': 15, 'Venkat': 32}
type(age)
age = ['Venkat']
for i in age:
print(i, age[i]) | code |
122264653/cell_3 | [
"text_plain_output_1.png"
] | age = {}
type(age)
age = dict()
type(age) | code |
122264653/cell_14 | [
"text_plain_output_1.png"
] | marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat']
marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat'][2]
marks | code |
122264653/cell_12 | [
"text_plain_output_1.png"
] | marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat']
marks = {'Ragul': 23, 'Joe': 15, 'Venkat': [34, 44, 56]}
marks['Venkat'][2] | code |
73071424/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
pi = left[:, 0]
pi_normalized = [(x / np.sum(pi)).real for x in pi]
steps = 10 ** 6
start_state = 0
pi = transition[start_state + 1]
for i in range(steps):
pi = np.dot(pi, transition)
steps = 10
transition_n = transition
for i in range(steps):
transition_n = np.matmul(transition_n, transition)
steps = 1000
transition_n = transition
for i in range(steps):
transition_n = np.matmul(transition_n, transition)
print('Matrix: \n', transition_n, '\n')
print('pi = ', transition_n[1]) | code |
73071424/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
print('Loss : ', br_l / br)
print('Draw : ', br_d / br)
print('Win : ', br_w / br) | code |
73071424/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition | code |
73071424/cell_23 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
pi = left[:, 0]
pi_normalized = [(x / np.sum(pi)).real for x in pi]
steps = 10 ** 6
start_state = 0
pi = transition[start_state + 1]
for i in range(steps):
pi = np.dot(pi, transition)
def find_prob(seq, A, pi):
start_state = seq[0]
prob = pi[start_state]
prev_state = start_state
for i in range(1, len(seq)):
curr_state = seq[i]
prob *= A[prev_state][curr_state]
prev_state = curr_state
return prob
print(find_prob([1, 0, -1, -1, -1, 1], transition, pi_normalized)) | code |
73071424/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
pi = left[:, 0]
pi_normalized = [(x / np.sum(pi)).real for x in pi]
steps = 10 ** 6
start_state = 0
pi = transition[start_state + 1]
for i in range(steps):
pi = np.dot(pi, transition)
steps = 10
transition_n = transition
for i in range(steps):
transition_n = np.matmul(transition_n, transition)
print('Matrix: \n', transition_n, '\n')
print('pi = ', transition_n[1]) | code |
73071424/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
print(states[start_state], '-->', end=' ')
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
print(states[curr_state], '-->', end=' ')
prev_state = curr_state
n -= 1
br += 1
print('stop')
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
print('Bodovi: ', points)
print('Uspješnost: ', points / (3 * br)) | code |
73071424/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
pi = left[:, 0]
pi_normalized = [(x / np.sum(pi)).real for x in pi]
print('pi = ', pi_normalized) | code |
73071424/cell_3 | [
"text_plain_output_1.png"
] | states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states | code |
73071424/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
pi = left[:, 0]
pi_normalized = [(x / np.sum(pi)).real for x in pi]
steps = 10 ** 6
start_state = 0
pi = transition[start_state + 1]
for i in range(steps):
pi = np.dot(pi, transition)
print('pi = ', pi) | code |
73071424/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
states = {-1: 'Loss', 0: 'Draw', 1: 'Win'}
states
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
n = 15
br = 0
points = 0
start_state = 0
prev_state = start_state
while n:
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
prev_state = curr_state
n -= 1
br += 1
if prev_state == 0:
points += 1
elif prev_state == 1:
points += 3
n = 100000
br = n
br_l = 0
br_d = 0
br_w = 0
start_state = 0
prev_state = start_state
while n:
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
if curr_state == -1:
br_l += 1
elif curr_state == 0:
br_d += 1
else:
br_w += 1
prev_state = curr_state
n -= 1
steps = 10 ** 6
start_state = 0
pi = np.array([0, 0, 0])
pi[start_state + 1] += 1
prev_state = start_state
for i in range(steps):
curr_state = np.random.choice([-1, 0, 1], p=transition[prev_state + 1])
pi[curr_state + 1] += 1
prev_state = curr_state
print('pi = ', pi / steps) | code |
73071424/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
transition = np.array([[0.65, 0.1, 0.25], [0.3, 0.5, 0.2], [0.35, 0.1, 0.55]])
transition
import scipy.linalg
values, left = scipy.linalg.eig(transition, right=False, left=True)
print('left eigen vectors =\n', left, '\n')
print('eigen values = \n', values) | code |
326282/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from IPython.display import display
import numpy as np
import pandas as pd
import re
def extract_maritial(name):
""" extract the person's title, and bin it to Mr. Miss. and Mrs.
assuming a Miss, Lady or Countess has more change to survive than a regular married woman."""
re_maritial = ' ([A-Za-z]+\\.) '
found = re.findall(re_maritial, name)[0]
replace = [['Dr.', 'Sir.'], ['Rev.', 'Sir.'], ['Major.', 'Officer.'], ['Mlle.', 'Miss.'], ['Col.', 'Officer.'], ['Master.', 'Sir.'], ['Jonkheer.', 'Sir.'], ['Sir.', 'Sir.'], ['Don.', 'Sir.'], ['Countess.', 'High.'], ['Capt.', 'Officer.'], ['Ms.', 'High.'], ['Mme.', 'High.'], ['Dona.', 'High.'], ['Lady.', 'High.']]
for i in range(0, len(replace)):
if found == replace[i][0]:
found = replace[i][1]
break
return found
def father(sex, age, parch):
if sex == 'male' and age > 16 and (parch > 0):
return 1
else:
return 0
def mother(sex, age, parch):
if sex == 'female' and age > 16 and (parch > 0):
return 1
else:
return 0
def parent(sex, age, parch):
if mother(sex, age, parch) == 1 or father(sex, age, parch) == 1:
return 1
else:
return 0
def extract_cabin_nr(cabin):
""" Extracts the cabin number. If there no number found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_numb = '[A-Z]([0-9]+)'
try:
number = int(re.findall(re_numb, cabin)[0])
return number
except:
return np.nan
else:
return np.nan
def extract_cabin_letter(cabin):
""" Extracts the cabin letter. If there no letter found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_char = '([A-Z])[0-9]+'
try:
character = re.findall(re_char, cabin)[0]
return character
except:
return np.nan
else:
return np.nan
def expand_sex(sex, age):
""" this expands male/female with kid. Cause below 14 years old, male or female is irrelevant"""
if age < 14:
return 'kid'
else:
return sex
def feat_eng(data):
data['Title'] = list(map(extract_maritial, data['Name']))
data['Cabin_char'] = list(map(extract_cabin_letter, data['Cabin']))
data['Cabin_nr'] = list(map(extract_cabin_nr, data['Cabin']))
data['Cabin_nr_odd'] = data.Cabin_nr.apply(lambda x: np.nan if x == np.nan else x % 2)
data['Father'] = list(map(father, data.Sex, data.Age, data.Parch))
data['Mother'] = list(map(mother, data.Sex, data.Age, data.Parch))
data['Parent'] = list(map(parent, data.Sex, data.Age, data.Parch))
data['has_parents_or_kids'] = data.Parch.apply(lambda x: 1 if x > 0 else 0)
data['FamilySize'] = data.SibSp + data.Parch
data['Sex'] = list(map(expand_sex, data['Sex'], data['Age']))
data['FareBin'] = pd.cut(data.Fare, bins=(-1000, 0, 8.67, 16.11, 32, 350, 1000))
data['AgeBin'] = pd.cut(data.Age, bins=(0, 15, 25, 60, 90))
return data
def missing(data):
data.loc[data.Age.isnull() & (data.Title == 'Sir.'), 'Age'] = data.loc[data.Title == 'Sir.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Officer.'), 'Age'] = data.loc[data.Title == 'Officer.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Miss.'), 'Age'] = data.loc[data.Title == 'Miss.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'High.'), 'Age'] = data.loc[data.Title == 'High.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mrs.'), 'Age'] = data.loc[data.Title == 'Mrs.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mr.'), 'Age'] = data.loc[data.Title == 'Mr.', 'Age'].median()
median_fare = data['Fare'].median()
data['Fare'].fillna(value=median_fare, inplace=True)
mode_embarked = data['Embarked'].mode()[0]
data['Embarked'].fillna(value=mode_embarked, inplace=True)
data['Cabin_char'].fillna(value=-9999, inplace=True)
data['Cabin_nr'].fillna(value=-9999, inplace=True)
data['Cabin_nr_odd'].fillna(value=-9999, inplace=True)
data = data.drop(['Name', 'Cabin', 'Fare', 'Age', 'Ticket'], 1)
return data
train = pd.read_csv('../input/train.csv')
display('Unaltered training set:')
display(train.head(8))
train = feat_eng(train)
display('After feature engineering:')
display(train.head(8))
train = missing(train)
display('After handling missing values:')
display(train.head(8))
train = pd.get_dummies(train, drop_first=True)
display('After handling categorical values:')
display(train.head(8)) | code |
326282/cell_16 | [
"text_plain_output_1.png"
] | from IPython.display import display
from sklearn import cross_validation
from sklearn.feature_selection import RFECV
import numpy as np
import pandas as pd
import re
import xgboost as xgb
def extract_maritial(name):
""" extract the person's title, and bin it to Mr. Miss. and Mrs.
assuming a Miss, Lady or Countess has more change to survive than a regular married woman."""
re_maritial = ' ([A-Za-z]+\\.) '
found = re.findall(re_maritial, name)[0]
replace = [['Dr.', 'Sir.'], ['Rev.', 'Sir.'], ['Major.', 'Officer.'], ['Mlle.', 'Miss.'], ['Col.', 'Officer.'], ['Master.', 'Sir.'], ['Jonkheer.', 'Sir.'], ['Sir.', 'Sir.'], ['Don.', 'Sir.'], ['Countess.', 'High.'], ['Capt.', 'Officer.'], ['Ms.', 'High.'], ['Mme.', 'High.'], ['Dona.', 'High.'], ['Lady.', 'High.']]
for i in range(0, len(replace)):
if found == replace[i][0]:
found = replace[i][1]
break
return found
def father(sex, age, parch):
if sex == 'male' and age > 16 and (parch > 0):
return 1
else:
return 0
def mother(sex, age, parch):
if sex == 'female' and age > 16 and (parch > 0):
return 1
else:
return 0
def parent(sex, age, parch):
if mother(sex, age, parch) == 1 or father(sex, age, parch) == 1:
return 1
else:
return 0
def extract_cabin_nr(cabin):
""" Extracts the cabin number. If there no number found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_numb = '[A-Z]([0-9]+)'
try:
number = int(re.findall(re_numb, cabin)[0])
return number
except:
return np.nan
else:
return np.nan
def extract_cabin_letter(cabin):
""" Extracts the cabin letter. If there no letter found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_char = '([A-Z])[0-9]+'
try:
character = re.findall(re_char, cabin)[0]
return character
except:
return np.nan
else:
return np.nan
def expand_sex(sex, age):
""" this expands male/female with kid. Cause below 14 years old, male or female is irrelevant"""
if age < 14:
return 'kid'
else:
return sex
def feat_eng(data):
data['Title'] = list(map(extract_maritial, data['Name']))
data['Cabin_char'] = list(map(extract_cabin_letter, data['Cabin']))
data['Cabin_nr'] = list(map(extract_cabin_nr, data['Cabin']))
data['Cabin_nr_odd'] = data.Cabin_nr.apply(lambda x: np.nan if x == np.nan else x % 2)
data['Father'] = list(map(father, data.Sex, data.Age, data.Parch))
data['Mother'] = list(map(mother, data.Sex, data.Age, data.Parch))
data['Parent'] = list(map(parent, data.Sex, data.Age, data.Parch))
data['has_parents_or_kids'] = data.Parch.apply(lambda x: 1 if x > 0 else 0)
data['FamilySize'] = data.SibSp + data.Parch
data['Sex'] = list(map(expand_sex, data['Sex'], data['Age']))
data['FareBin'] = pd.cut(data.Fare, bins=(-1000, 0, 8.67, 16.11, 32, 350, 1000))
data['AgeBin'] = pd.cut(data.Age, bins=(0, 15, 25, 60, 90))
return data
def missing(data):
data.loc[data.Age.isnull() & (data.Title == 'Sir.'), 'Age'] = data.loc[data.Title == 'Sir.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Officer.'), 'Age'] = data.loc[data.Title == 'Officer.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Miss.'), 'Age'] = data.loc[data.Title == 'Miss.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'High.'), 'Age'] = data.loc[data.Title == 'High.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mrs.'), 'Age'] = data.loc[data.Title == 'Mrs.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mr.'), 'Age'] = data.loc[data.Title == 'Mr.', 'Age'].median()
median_fare = data['Fare'].median()
data['Fare'].fillna(value=median_fare, inplace=True)
mode_embarked = data['Embarked'].mode()[0]
data['Embarked'].fillna(value=mode_embarked, inplace=True)
data['Cabin_char'].fillna(value=-9999, inplace=True)
data['Cabin_nr'].fillna(value=-9999, inplace=True)
data['Cabin_nr_odd'].fillna(value=-9999, inplace=True)
data = data.drop(['Name', 'Cabin', 'Fare', 'Age', 'Ticket'], 1)
return data
train = pd.read_csv('../input/train.csv')
train = feat_eng(train)
train = missing(train)
train = pd.get_dummies(train, drop_first=True)
X = np.array(train.drop(['Survived', 'PassengerId'], 1))
training_features = np.array(train.drop(['Survived', 'PassengerId'], 1).columns)
y = np.array(train['Survived'])
clf = xgb.XGBClassifier()
cv = cross_validation.KFold(len(X), n_folds=20, shuffle=True, random_state=1)
scores = cross_validation.cross_val_score(clf, X, y, cv=cv, n_jobs=1, scoring='accuracy')
clf.fit(X, y)
featselect = RFECV(estimator=clf, cv=cv, scoring='accuracy')
featselect.fit(X, y)
print('features used during training: ')
print(training_features)
print('')
(print('features proposed by RFECV: '),)
print(training_features[featselect.support_]) | code |
326282/cell_3 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from IPython.display import display
import re
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn.model_selection import KFold
from sklearn.feature_selection import RFECV
from sklearn.grid_search import GridSearchCV | code |
326282/cell_14 | [
"text_html_output_4.png",
"text_plain_output_4.png",
"text_html_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | from IPython.display import display
from sklearn import cross_validation
import numpy as np
import pandas as pd
import re
import xgboost as xgb
def extract_maritial(name):
""" extract the person's title, and bin it to Mr. Miss. and Mrs.
assuming a Miss, Lady or Countess has more change to survive than a regular married woman."""
re_maritial = ' ([A-Za-z]+\\.) '
found = re.findall(re_maritial, name)[0]
replace = [['Dr.', 'Sir.'], ['Rev.', 'Sir.'], ['Major.', 'Officer.'], ['Mlle.', 'Miss.'], ['Col.', 'Officer.'], ['Master.', 'Sir.'], ['Jonkheer.', 'Sir.'], ['Sir.', 'Sir.'], ['Don.', 'Sir.'], ['Countess.', 'High.'], ['Capt.', 'Officer.'], ['Ms.', 'High.'], ['Mme.', 'High.'], ['Dona.', 'High.'], ['Lady.', 'High.']]
for i in range(0, len(replace)):
if found == replace[i][0]:
found = replace[i][1]
break
return found
def father(sex, age, parch):
if sex == 'male' and age > 16 and (parch > 0):
return 1
else:
return 0
def mother(sex, age, parch):
if sex == 'female' and age > 16 and (parch > 0):
return 1
else:
return 0
def parent(sex, age, parch):
if mother(sex, age, parch) == 1 or father(sex, age, parch) == 1:
return 1
else:
return 0
def extract_cabin_nr(cabin):
""" Extracts the cabin number. If there no number found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_numb = '[A-Z]([0-9]+)'
try:
number = int(re.findall(re_numb, cabin)[0])
return number
except:
return np.nan
else:
return np.nan
def extract_cabin_letter(cabin):
""" Extracts the cabin letter. If there no letter found, return NaN """
if not pd.isnull(cabin):
cabin = cabin.split(' ')[-1]
re_char = '([A-Z])[0-9]+'
try:
character = re.findall(re_char, cabin)[0]
return character
except:
return np.nan
else:
return np.nan
def expand_sex(sex, age):
""" this expands male/female with kid. Cause below 14 years old, male or female is irrelevant"""
if age < 14:
return 'kid'
else:
return sex
def feat_eng(data):
data['Title'] = list(map(extract_maritial, data['Name']))
data['Cabin_char'] = list(map(extract_cabin_letter, data['Cabin']))
data['Cabin_nr'] = list(map(extract_cabin_nr, data['Cabin']))
data['Cabin_nr_odd'] = data.Cabin_nr.apply(lambda x: np.nan if x == np.nan else x % 2)
data['Father'] = list(map(father, data.Sex, data.Age, data.Parch))
data['Mother'] = list(map(mother, data.Sex, data.Age, data.Parch))
data['Parent'] = list(map(parent, data.Sex, data.Age, data.Parch))
data['has_parents_or_kids'] = data.Parch.apply(lambda x: 1 if x > 0 else 0)
data['FamilySize'] = data.SibSp + data.Parch
data['Sex'] = list(map(expand_sex, data['Sex'], data['Age']))
data['FareBin'] = pd.cut(data.Fare, bins=(-1000, 0, 8.67, 16.11, 32, 350, 1000))
data['AgeBin'] = pd.cut(data.Age, bins=(0, 15, 25, 60, 90))
return data
def missing(data):
data.loc[data.Age.isnull() & (data.Title == 'Sir.'), 'Age'] = data.loc[data.Title == 'Sir.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Officer.'), 'Age'] = data.loc[data.Title == 'Officer.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Miss.'), 'Age'] = data.loc[data.Title == 'Miss.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'High.'), 'Age'] = data.loc[data.Title == 'High.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mrs.'), 'Age'] = data.loc[data.Title == 'Mrs.', 'Age'].median()
data.loc[data.Age.isnull() & (data.Title == 'Mr.'), 'Age'] = data.loc[data.Title == 'Mr.', 'Age'].median()
median_fare = data['Fare'].median()
data['Fare'].fillna(value=median_fare, inplace=True)
mode_embarked = data['Embarked'].mode()[0]
data['Embarked'].fillna(value=mode_embarked, inplace=True)
data['Cabin_char'].fillna(value=-9999, inplace=True)
data['Cabin_nr'].fillna(value=-9999, inplace=True)
data['Cabin_nr_odd'].fillna(value=-9999, inplace=True)
data = data.drop(['Name', 'Cabin', 'Fare', 'Age', 'Ticket'], 1)
return data
train = pd.read_csv('../input/train.csv')
train = feat_eng(train)
train = missing(train)
train = pd.get_dummies(train, drop_first=True)
X = np.array(train.drop(['Survived', 'PassengerId'], 1))
training_features = np.array(train.drop(['Survived', 'PassengerId'], 1).columns)
y = np.array(train['Survived'])
clf = xgb.XGBClassifier()
cv = cross_validation.KFold(len(X), n_folds=20, shuffle=True, random_state=1)
scores = cross_validation.cross_val_score(clf, X, y, cv=cv, n_jobs=1, scoring='accuracy')
clf.fit(X, y)
print(scores)
print('Accuracy: %.3f stdev: %.2f' % (np.mean(np.abs(scores)), np.std(scores))) | code |
128039843/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_
learning_rate_params = {'learning_rate': np.linspace(0.08, 0.12, 5), 'subsample': [search.best_params_['subsample']], 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
lr_clf = GridSearchCV(model, learning_rate_params, scoring='accuracy', verbose=2, cv=2)
lr_search = lr_clf.fit(X_train, y_train)
lr_search.best_params_
subsample_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': np.linspace(0.6, 0.8, 5), 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
s_clf = GridSearchCV(model, subsample_params, scoring='accuracy', verbose=2, cv=2)
s_search = s_clf.fit(X_train, y_train)
s_search.best_params_ | code |
128039843/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum() | code |
128039843/cell_23 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_
learning_rate_params = {'learning_rate': np.linspace(0.08, 0.12, 5), 'subsample': [search.best_params_['subsample']], 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
lr_clf = GridSearchCV(model, learning_rate_params, scoring='accuracy', verbose=2, cv=2)
lr_search = lr_clf.fit(X_train, y_train)
lr_search.best_params_
subsample_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': np.linspace(0.6, 0.8, 5), 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
s_clf = GridSearchCV(model, subsample_params, scoring='accuracy', verbose=2, cv=2)
s_search = s_clf.fit(X_train, y_train)
s_search.best_params_
n_estimators_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': range(34, 42, 2), 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
n_clf = GridSearchCV(model, n_estimators_params, scoring='accuracy', verbose=2, cv=2)
n_search = n_clf.fit(X_train, y_train)
n_search.best_params_
max_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': [n_search.best_params_['n_estimators']], 'max_depth': range(6, 10, 1), 'min_samples_split': [search.best_params_['min_samples_split']]}
max_clf = GridSearchCV(model, max_params, scoring='accuracy', verbose=2, cv=2)
max_search = max_clf.fit(X_train, y_train)
max_search.best_params_ | code |
128039843/cell_20 | [
"image_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_
learning_rate_params = {'learning_rate': np.linspace(0.08, 0.12, 5), 'subsample': [search.best_params_['subsample']], 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
lr_clf = GridSearchCV(model, learning_rate_params, scoring='accuracy', verbose=2, cv=2)
lr_search = lr_clf.fit(X_train, y_train)
lr_search.best_params_ | code |
128039843/cell_26 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
yX2 = train_data.copy()
y2 = yX2.pop('label')
X2 = yX2.copy()
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.4, random_state=42)
(X2_train.shape, X2_test.shape, y2_train.shape, y2_test.shape) | code |
128039843/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5, 5, figsize=(8, 8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i + j * 5], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show() | code |
128039843/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128039843/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.head() | code |
128039843/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_ | code |
128039843/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.describe() | code |
128039843/cell_16 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape) | code |
128039843/cell_24 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_
learning_rate_params = {'learning_rate': np.linspace(0.08, 0.12, 5), 'subsample': [search.best_params_['subsample']], 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
lr_clf = GridSearchCV(model, learning_rate_params, scoring='accuracy', verbose=2, cv=2)
lr_search = lr_clf.fit(X_train, y_train)
lr_search.best_params_
subsample_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': np.linspace(0.6, 0.8, 5), 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
s_clf = GridSearchCV(model, subsample_params, scoring='accuracy', verbose=2, cv=2)
s_search = s_clf.fit(X_train, y_train)
s_search.best_params_
n_estimators_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': range(34, 42, 2), 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
n_clf = GridSearchCV(model, n_estimators_params, scoring='accuracy', verbose=2, cv=2)
n_search = n_clf.fit(X_train, y_train)
n_search.best_params_
max_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': [n_search.best_params_['n_estimators']], 'max_depth': range(6, 10, 1), 'min_samples_split': [search.best_params_['min_samples_split']]}
max_clf = GridSearchCV(model, max_params, scoring='accuracy', verbose=2, cv=2)
max_search = max_clf.fit(X_train, y_train)
max_search.best_params_
min_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': [n_search.best_params_['n_estimators']], 'max_depth': [max_search.best_params_['max_depth']], 'min_samples_split': range(17, 21, 1)}
min_clf = GridSearchCV(model, min_params, scoring='accuracy', verbose=2, cv=2)
min_search = min_clf.fit(X_train, y_train)
min_search.best_params_ | code |
128039843/cell_22 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/Kannada-MNIST/train.csv')
train_data.isnull().sum().sum()
fig, ax = plt.subplots(5,5,figsize = (8,8))
for i in range(5):
for j in range(5):
ax[i][j].axis('off')
ax[i][j].imshow(train_data.iloc[[i+(j*5)], 1:].to_numpy().astype(np.uint8).reshape(28, 28), cmap='gray')
plt.show()
#temp = train_data.iloc[:1, 1:].to_numpy().reshape(28,28)
#plt.imshow(temp, cmap='gray')
yX = train_data.copy()
y = yX.pop('label')
X = yX.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42)
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = GradientBoostingClassifier()
params_dict = {'learning_rate': np.linspace(0.05, 0.3, 6), 'subsample': np.linspace(0.3, 1, 10), 'n_estimators': range(10, 40, 5), 'max_depth': range(2, 20, 2), 'min_samples_split': range(2, 20, 1)}
clf = RandomizedSearchCV(model, params_dict, scoring='accuracy', n_iter=15, verbose=2, cv=3)
search = clf.fit(X_train, y_train)
search.best_params_
learning_rate_params = {'learning_rate': np.linspace(0.08, 0.12, 5), 'subsample': [search.best_params_['subsample']], 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
lr_clf = GridSearchCV(model, learning_rate_params, scoring='accuracy', verbose=2, cv=2)
lr_search = lr_clf.fit(X_train, y_train)
lr_search.best_params_
subsample_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': np.linspace(0.6, 0.8, 5), 'n_estimators': [search.best_params_['n_estimators']], 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
s_clf = GridSearchCV(model, subsample_params, scoring='accuracy', verbose=2, cv=2)
s_search = s_clf.fit(X_train, y_train)
s_search.best_params_
n_estimators_params = {'learning_rate': [lr_search.best_params_['learning_rate']], 'subsample': [s_search.best_params_['subsample']], 'n_estimators': range(34, 42, 2), 'max_depth': [search.best_params_['max_depth']], 'min_samples_split': [search.best_params_['min_samples_split']]}
n_clf = GridSearchCV(model, n_estimators_params, scoring='accuracy', verbose=2, cv=2)
n_search = n_clf.fit(X_train, y_train)
n_search.best_params_ | code |
128007350/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix
df['education'].isna().sum()
df['banned'].isna().sum() | code |
128007350/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges | code |
128007350/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
nodes['education'].value_counts() | code |
128007350/cell_29 | [
"text_html_output_1.png"
] | from scipy.stats import chi2
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix
from scipy.stats import chi2
def chi2_p_return(col1, col2):
col1_vals = col1.unique()
col2_vals = col2.unique()
contingency_matrix = {}
for i in col1_vals:
row = {}
for j in col2_vals:
count = len(df[(col1 == i) & (col2 == j)])
row[j] = count
contingency_matrix[i] = row
contingency_matrix_df = pd.DataFrame(contingency_matrix)
contingency_matrix_df.fillna(0, inplace=True)
actual = contingency_matrix_df.values
row_sums = actual.sum(axis=1)
col_sums = actual.sum(axis=0)
total = actual.sum()
expected = np.outer(row_sums, col_sums) / total
chi_squared = np.sum((actual - expected) ** 2 / expected)
degree_freedom = (len(contingency_matrix_df) - 1) * (len(contingency_matrix_df.columns) - 1)
p_value_sci = '{:.3e}'.format(1 - chi2.cdf(chi_squared, degree_freedom))
return (chi_squared, p_value_sci)
chi_squared, p_value = chi2_p_return(df['education'], df['banned'])
alpha = 0.01
chi_squared, p_value = chi2_p_return(df['expired_rate'], df['banned'])
print('Chi_squared:', chi_squared, ', p_value:', p_value, '(very close to zero)')
alpha = 0.01
if float(p_value) <= alpha:
print('Dependent (reject H0)')
else:
print('Independent (H0 holds true)') | code |
128007350/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes | code |
128007350/cell_31 | [
"text_plain_output_1.png"
] | from scipy.stats import chi2
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix
from scipy.stats import chi2
def chi2_p_return(col1, col2):
col1_vals = col1.unique()
col2_vals = col2.unique()
contingency_matrix = {}
for i in col1_vals:
row = {}
for j in col2_vals:
count = len(df[(col1 == i) & (col2 == j)])
row[j] = count
contingency_matrix[i] = row
contingency_matrix_df = pd.DataFrame(contingency_matrix)
contingency_matrix_df.fillna(0, inplace=True)
actual = contingency_matrix_df.values
row_sums = actual.sum(axis=1)
col_sums = actual.sum(axis=0)
total = actual.sum()
expected = np.outer(row_sums, col_sums) / total
chi_squared = np.sum((actual - expected) ** 2 / expected)
degree_freedom = (len(contingency_matrix_df) - 1) * (len(contingency_matrix_df.columns) - 1)
p_value_sci = '{:.3e}'.format(1 - chi2.cdf(chi_squared, degree_freedom))
return (chi_squared, p_value_sci)
chi_squared, p_value = chi2_p_return(df['education'], df['banned'])
alpha = 0.01
chi_squared, p_value = chi2_p_return(df['expired_rate'], df['banned'])
alpha = 0.01
chi_squared, p_value = chi2_p_return(df['approved_rate'], df['english_profile'])
print('Chi_squared:', chi_squared, ', p_value:', p_value, '(very close to zero)')
alpha = 0.01
if float(p_value) <= alpha:
print('Dependent (reject H0)')
else:
print('Independent (H0 holds true)') | code |
128007350/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix
df.head() | code |
128007350/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix | code |
128007350/cell_27 | [
"text_html_output_1.png"
] | from scipy.stats import chi2
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
corr_matrix = df.corr()
corr_matrix
from scipy.stats import chi2
def chi2_p_return(col1, col2):
col1_vals = col1.unique()
col2_vals = col2.unique()
contingency_matrix = {}
for i in col1_vals:
row = {}
for j in col2_vals:
count = len(df[(col1 == i) & (col2 == j)])
row[j] = count
contingency_matrix[i] = row
contingency_matrix_df = pd.DataFrame(contingency_matrix)
contingency_matrix_df.fillna(0, inplace=True)
actual = contingency_matrix_df.values
row_sums = actual.sum(axis=1)
col_sums = actual.sum(axis=0)
total = actual.sum()
expected = np.outer(row_sums, col_sums) / total
chi_squared = np.sum((actual - expected) ** 2 / expected)
degree_freedom = (len(contingency_matrix_df) - 1) * (len(contingency_matrix_df.columns) - 1)
p_value_sci = '{:.3e}'.format(1 - chi2.cdf(chi_squared, degree_freedom))
return (chi_squared, p_value_sci)
chi_squared, p_value = chi2_p_return(df['education'], df['banned'])
print('Chi_squared:', chi_squared, ', p_value:', p_value, '(very close to zero)')
alpha = 0.01
if float(p_value) <= alpha:
print('Dependent (reject H0)')
else:
print('Independent (H0 holds true)') | code |
34133139/cell_13 | [
"text_html_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df1 = df[df['sentences_clean'] != '']
df1 | code |
34133139/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
l = []
for k in location:
l.append(calculate(wordlist, k)) | code |
34133139/cell_23 | [
"text_plain_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
s = []
for k in service:
s.append(calculate(wordlist, k)) | code |
34133139/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i))
r = []
for k in room:
r.append(calculate(wordlist, k))
r = np.array(r).transpose()
ro = []
for i in r:
ro.append(mean(i))
f = []
for k in food:
f.append(calculate(wordlist, k))
f = np.array(f).transpose()
fo = []
for i in f:
fo.append(mean(i))
fa = []
for k in facility:
fa.append(calculate(wordlist, k))
fa = np.array(fa).transpose()
fac = []
for i in fa:
fac.append(mean(i)) | code |
34133139/cell_20 | [
"text_plain_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k)) | code |
34133139/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df | code |
34133139/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i))
r = []
for k in room:
r.append(calculate(wordlist, k))
r = np.array(r).transpose()
ro = []
for i in r:
ro.append(mean(i))
f = []
for k in food:
f.append(calculate(wordlist, k))
f = np.array(f).transpose()
fo = []
for i in f:
fo.append(mean(i)) | code |
34133139/cell_11 | [
"text_html_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df | code |
34133139/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34133139/cell_18 | [
"text_plain_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
a | code |
34133139/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
cl = []
for i in c:
cl.append(mean(i))
cl
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i))
r = []
for k in room:
r.append(calculate(wordlist, k))
r = np.array(r).transpose()
ro = []
for i in r:
ro.append(mean(i))
f = []
for k in food:
f.append(calculate(wordlist, k))
f = np.array(f).transpose()
fo = []
for i in f:
fo.append(mean(i))
fa = []
for k in facility:
fa.append(calculate(wordlist, k))
fa = np.array(fa).transpose()
fac = []
for i in fa:
fac.append(mean(i))
st = []
for k in staff:
st.append(calculate(wordlist, k))
st = np.array(st).transpose()
sta = []
for i in st:
sta.append(mean(i))
cl = np.array(cl)
cl | code |
34133139/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i))
r = []
for k in room:
r.append(calculate(wordlist, k))
r = np.array(r).transpose()
ro = []
for i in r:
ro.append(mean(i)) | code |
34133139/cell_15 | [
"text_html_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df1 = df[df['sentences_clean'] != '']
df1['sentences_clean'][5] | code |
34133139/cell_16 | [
"text_plain_output_1.png"
] | from nltk.tokenize import sent_tokenize
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist) | code |
34133139/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i))
r = []
for k in room:
r.append(calculate(wordlist, k))
r = np.array(r).transpose()
ro = []
for i in r:
ro.append(mean(i))
f = []
for k in food:
f.append(calculate(wordlist, k))
f = np.array(f).transpose()
fo = []
for i in f:
fo.append(mean(i))
fa = []
for k in facility:
fa.append(calculate(wordlist, k))
fa = np.array(fa).transpose()
fac = []
for i in fa:
fac.append(mean(i))
st = []
for k in staff:
st.append(calculate(wordlist, k))
st = np.array(st).transpose()
sta = []
for i in st:
sta.append(mean(i)) | code |
34133139/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
cl = []
for i in c:
cl.append(mean(i))
cl | code |
34133139/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.tokenize import sent_tokenize
from statistics import mean
import nltk
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import string
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open('/kaggle/input/hotel-text-data/text.txt')
data = fp.read()
sentences = sent_tokenize(data)
df = pd.DataFrame(sentences, columns=['sentences'])
cleanliness = ['satisfactory', 'ample', 'hygienic', 'proper', 'ambience', 'odour', 'dirty', 'clean', 'smell', 'cleanliness']
service = ['desk', 'check in', 'check out', 'reliable', 'fast', 'convenient', 'service', 'hospitality']
location = ['railway', 'view', 'station', 'airport', 'distance', 'far', 'close', 'train', 'metro', 'transport', 'market', 'mall', 'surrounding', 'areas', 'highway', 'traffic', 'out', 'location']
value = ['price', 'amount', 'rate', 'cheap', 'worth', 'low', 'money', 'economical', 'reasonable', 'fee', 'expensive', 'charge', 'value']
room = ['bed', 'bunkbeds', 'toilet', 'bathroom', 'shower', 'dryer', 'fridge', 'space', 'spacious', 'outdated', 'noisy', 'room']
food = ['drink', 'breakfast', 'spicy', 'food', 'tasty', 'tea', 'coffee', 'buffet', 'bar', 'restaurant', 'dinner', 'lunch', 'brunch', 'delicious']
facility = ['pool', 'gym', 'wifi', 'spa', 'internet', 'wireless', 'broken', 'parking', 'ventilation', 'maintained', 'facility', 'lot', 'premises']
staff = ['friendly', 'helpful', 'reliable', 'quick', 'good', 'polite', 'staff']
from nltk.corpus import wordnet
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
text = [t for t in text if len(t) > 0]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
df['sentences_clean'] = df['sentences'].apply(lambda x: clean_text(x))
df1 = df[df['sentences_clean'] != '']
import re
wordlist = []
for i in df1['sentences_clean']:
wordlist.append(re.split('\\s+', i))
np.array(wordlist)
a = []
for i in range(len(wordlist)):
a.append(len(wordlist[i]))
def calculate(word, category):
clean = []
for i in range(len(word)):
sum = 0
sum1 = 0
for j in range(len(word[i])):
sum = sum + fuzz.ratio(word[i][j], category)
clean.append(sum / a[i])
return clean
c = []
for k in cleanliness:
c.append(calculate(wordlist, k))
c = np.array(c).transpose()
s = []
for k in service:
s.append(calculate(wordlist, k))
s = np.array(s).transpose()
se = []
for i in s:
se.append(mean(i))
l = []
for k in location:
l.append(calculate(wordlist, k))
l = np.array(l).transpose()
lo = []
for i in l:
lo.append(mean(i))
v = []
for k in value:
v.append(calculate(wordlist, k))
v = np.array(v).transpose()
va = []
for i in v:
va.append(mean(i)) | code |
50224995/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | train = dt.fread(os.path.join(root_path, 'train.csv')).to_pandas().query('weight > 0').pipe(reduce_mem_usage).reset_index(drop=True)
train['action'] = (train.resp > 0).astype(int)
resp_cols = [i for i in train.columns if 'resp' in i]
features_names = [i for i in train.columns if 'feature_' in i]
features_index = list(map(lambda x: int(re.sub('feature_', '', x)), features_names))
features_tuples = sorted(list(zip(features_names, features_index)), key=lambda x: x[1])
just_features = [i[0] for i in features_tuples] | code |
50224995/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedKFold
import janestreet
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
params = {'objective': 'binary', 'metrics': ['auc']}
nfolds = 3
kfold = StratifiedKFold(n_splits=nfolds)
lgb_models = list()
import lightgbm as lgb
for k, (train_idx, valid_idx) in enumerate(kfold.split(train.query('date>150')[just_features], train.query('date>150')['action'])):
lgb_train = lgb.Dataset(train.loc[train_idx, just_features], train.loc[train_idx, 'action'])
lgb_valid = lgb.Dataset(train.loc[valid_idx, just_features], train.loc[valid_idx, 'action'])
model = lgb.train(params, lgb_train, valid_sets=[lgb_train, lgb_valid], num_boost_round=10000, verbose_eval=50, early_stopping_rounds=10)
lgb_models.append(model)
import janestreet
env = janestreet.make_env()
iter_test = env.iter_test()
for test_df, sample_prediction_df in iter_test:
prediction = 0
for model in lgb_models:
prediction += model.predict(test_df[features])[0]
prediction /= len(lgb_models)
prediction = prediction > 0.5
sample_prediction_df.action = prediction.astype(int)
env.predict(sample_prediction_df)
if rcount % 1000 == 0:
print('Processed: {} rows\n'.format(rcount))
print(f'Finished processing {rcount} rows.') | code |
50224995/cell_3 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import random
import numpy as np
import datatable as dt
import pandas as pd
import random
import re
random.seed(28)
import tqdm
import os
import gc
import logging
import optuna
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = [20, 12]
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
input_path = '/kaggle/input/'
root_path = os.path.join(input_path, 'jane-street-market-prediction') | code |
74041737/cell_9 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape | code |
74041737/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.head() | code |
74041737/cell_23 | [
"text_plain_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
import seaborn as sns
x_year = terror_df['Year'].unique()
y_year = terror_df['Year'].value_counts(dropna=False).sort_index()
plt.xticks(rotation=45)
plt.xticks(rotation=45)
from wordcloud import WordCloud
from scipy import signal
cities = terror_df.state.dropna(False)
wordcloud = WordCloud(background_color='white', width=500, height=250).generate(' '.join(cities))
plt.axis('off')
terror_copy = terror_df.sort_values(by='casualities', ascending=False)[:30]
terror_copy.corr() | code |
74041737/cell_20 | [
"text_plain_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
import seaborn as sns
x_year = terror_df['Year'].unique()
y_year = terror_df['Year'].value_counts(dropna=False).sort_index()
plt.xticks(rotation=45)
plt.xticks(rotation=45)
from wordcloud import WordCloud
from scipy import signal
cities = terror_df.state.dropna(False)
plt.subplots(figsize=(10, 10))
wordcloud = WordCloud(background_color='white', width=500, height=250).generate(' '.join(cities))
plt.axis('off')
plt.imshow(wordcloud)
plt.show() | code |
74041737/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74041737/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
terror_df.info() | code |
74041737/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
import seaborn as sns
x_year = terror_df['Year'].unique()
y_year = terror_df['Year'].value_counts(dropna=False).sort_index()
plt.xticks(rotation=45)
plt.xticks(rotation=45)
pd.crosstab(terror_df.Year, terror_df.Region).plot(kind='area', figsize=(15, 6))
plt.title('Terrorist activites by Region in each year')
plt.ylabel('Number of attacks')
plt.show() | code |
74041737/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns | code |
74041737/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
import seaborn as sns
x_year = terror_df['Year'].unique()
y_year = terror_df['Year'].value_counts(dropna=False).sort_index()
plt.xticks(rotation=45)
plt.subplots(figsize=(15, 6))
sns.countplot('Year', data=terror_df, palette='RdYlGn_r', edgecolor=sns.color_palette('YlOrBr', 5))
plt.xticks(rotation=45)
plt.title('CountPLot of Number Of Terrorist Activities Each Year')
plt.show() | code |
74041737/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.head() | code |
74041737/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
terror_df['Year'].value_counts(dropna=False).sort_index() | code |
74041737/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
import seaborn as sns
x_year = terror_df['Year'].unique()
y_year = terror_df['Year'].value_counts(dropna=False).sort_index()
plt.figure(figsize=(15, 10))
plt.title('Attack in Years')
plt.xlabel('Attack Years')
plt.ylabel('Number of attacks each year')
plt.xticks(rotation=45)
sns.barplot(x=x_year, y=y_year, palette='rocket')
plt.show() | code |
74041737/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
print('Country with most attacks: ', terror_df['Country'].value_counts().idxmax())
print('City with most attacks: ', terror_df['city'].value_counts().index[1])
print('Region with the most attacks:', terror_df['Region'].value_counts().idxmax())
print('Year with the most attacks:', terror_df['Year'].value_counts().idxmax())
print('Month with the most attacks:', terror_df['Month'].value_counts().idxmax())
print('Group with the most attacks:', terror_df['Group'].value_counts().index[1])
print('Most Attack Types:', terror_df['AttackType'].value_counts().idxmax()) | code |
74041737/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum() | code |
74041737/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list()
terror_df.rename(columns={'iyear': 'Year', 'imonth': 'Month', 'iday': 'Day', 'country_txt': 'Country', 'provstate': 'state', 'region_txt': 'Region', 'attacktype1_txt': 'AttackType', 'target1': 'Target', 'nkill': 'Killed', 'nwound': 'Wounded', 'summary': 'Summary', 'gname': 'Group', 'targtype1_txt': 'Target_type', 'weaptype1_txt': 'Weapon_type', 'motive': 'Motive'}, inplace=True)
terror_df.columns
terror_df = terror_df[['Year', 'Month', 'Day', 'Country', 'state', 'Region', 'city', 'latitude', 'longitude', 'AttackType', 'Killed', 'Wounded', 'Target', 'Summary', 'Group', 'Target_type', 'Weapon_type', 'Motive']]
terror_df.shape
terror_df.isnull().sum()
terror_df.describe(include='all') | code |
74041737/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
terror_df = pd.read_csv('/kaggle/input/gtd/globalterrorismdb_0718dist.csv', encoding='latin1')
terror_df.columns.to_list() | code |
72121245/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes | code |
72121245/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols | code |
72121245/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
mean = train['target'].mean()
std = train['target'].std()
cut_off = std * 3
lower, upper = (mean - cut_off, mean + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape
q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75))
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
cut_off = iqr * 1.5
lower, upper = (q25 - cut_off, q75 + cut_off)
outliers = train[(train['target'] > upper) | (train['target'] < lower)]
train.drop(outliers.index.to_list(), inplace=True)
train.shape | code |
72121245/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
train.dtypes
cat_cols = [col for col in train.columns if train[col].dtype == 'object']
cat_cols
cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')]
cont_cols
train['target'].describe() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.