path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
16161701/cell_13 | [
"text_plain_output_1.png"
] | from collections import Counter
from sklearn.metrics import accuracy_score
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
print('accuracy score is :', accuracy * 100, '%') | code |
16161701/cell_20 | [
"text_plain_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
import matplotlib.pyplot as plt
plt.xlabel('Value of K')
plt.ylabel('RMSE')
plt.plot(range(20), rmse_values)
plt.show() | code |
16161701/cell_29 | [
"text_plain_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
import matplotlib.pyplot as plt
predict = model.predict(x_test)
import seaborn as sns
plt.scatter(x_test, predict)
plt.xlabel('Sepal Length')
plt.ylabel('Petal Length') | code |
16161701/cell_11 | [
"text_plain_output_1.png"
] | from collections import Counter
from sklearn.metrics import accuracy_score
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
for i in range(len(x_test)):
print('Flower with sepal length', x_test.iloc[i], ':')
print('belongs to the kingdom', predictions[i]) | code |
16161701/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16161701/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import math
import operator
df = pd.read_csv('../input/Iris.csv')
df.shape
from collections import Counter
from sklearn.model_selection import train_test_split
x = df[['SepalLengthCm']]
y = df['Species']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33)
len(y_train)
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
from sklearn.model_selection import train_test_split
x = df[['SepalLengthCm']]
y = df['PetalLengthCm']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33)
len(y_train)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
import matplotlib.pyplot as plt
predict = model.predict(x_test)
import seaborn as sns
K = []
for x in range(1, 21):
j = 1 / x
K.append(j)
plt.plot(rmse_values, K)
plt.xlabel('1/K')
plt.ylabel('RMSE Values') | code |
16161701/cell_28 | [
"image_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
predict = model.predict(x_test)
for i in range(len(predict)):
print('For sepal length:', x_test.values[i])
print('The coressponding petal length in centimeters is:', predict[i]) | code |
16161701/cell_15 | [
"text_plain_output_1.png"
] | from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import math
import operator
df = pd.read_csv('../input/Iris.csv')
df.shape
from collections import Counter
from sklearn.model_selection import train_test_split
x = df[['SepalLengthCm']]
y = df['Species']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33)
len(y_train)
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
from sklearn.model_selection import train_test_split
x = df[['SepalLengthCm']]
y = df['PetalLengthCm']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33)
len(y_train) | code |
16161701/cell_16 | [
"text_plain_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
print('RMSE value for k= ', k, 'is:', error) | code |
16161701/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import math
import operator
df = pd.read_csv('../input/Iris.csv')
print(df.head())
df.shape
from collections import Counter | code |
16161701/cell_17 | [
"text_plain_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
min(rmse_values) | code |
16161701/cell_24 | [
"text_plain_output_1.png"
] | from collections import Counter
from math import sqrt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import numpy as np
import numpy as np # linear algebra
def predict(x_train, y_train, x_test, k):
distances = []
targets = []
for i in range(len(x_train)):
distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :])))
distances.append([distance, i])
distances = sorted(distances)
for i in range(k):
index = distances[i][1]
targets.append(y_train.values[index])
return Counter(targets).most_common(1)[0][0]
def train(x_train, y_train):
return
def kNearestNeighbor(x_train, y_train, x_test, predictions, k):
train(x_train, y_train)
for i in range(len(x_test)):
predictions.append(predict(x_train, y_train, x_test.values[i, :], k))
predictions = []
from sklearn.metrics import accuracy_score
kNearestNeighbor(x_train, y_train, x_test, predictions, 9)
predictions = np.asarray(predictions)
accuracy = accuracy_score(y_test, predictions)
rmse_values = []
for k in range(20):
k = k + 1
model = neighbors.KNeighborsRegressor(n_neighbors=k)
model.fit(x_train, y_train)
pred = model.predict(x_test)
error = sqrt(mean_squared_error(y_test, pred))
rmse_values.append(error)
predict = model.predict(x_test)
len(predict) | code |
16161701/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import math
import operator
df = pd.read_csv('../input/Iris.csv')
df.shape
from collections import Counter
from sklearn.model_selection import train_test_split
x = df[['SepalLengthCm']]
y = df['Species']
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33)
len(y_train) | code |
74058414/cell_21 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
P_h = 0.9
P_t = 0.1
S_h = surprise(P_h)
print(f'Surprise of heads is: {round(S_h, 2)}')
S_t = surprise(P_t)
print(f'Surprise of tails is: {round(S_t, 2)}') | code |
74058414/cell_13 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
green_ctr = Counter(green_bag)
P_apple_green = green_ctr['Apple'] / len(green_bag)
print(f'P(Apple from Green Bag) : {round(P_apple_green, 2)}')
P_orange_green = green_ctr['Orange'] / len(green_bag)
print(f'P(Orange from Green Bag): {round(P_orange_green, 2)}') | code |
74058414/cell_25 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
P_h = 0.9
P_t = 0.1
S_h = surprise(P_h)
S_t = surprise(P_t)
print(f'Surprise by using the definition: {round(surprise(P_h * P_h * P_t), 2)}')
print(f'Surprise by adding up the individual values: {round(S_h + S_h + S_t, 2)} ') | code |
74058414/cell_40 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table
Entropy = table.loc['P(x)', 'Heads'] * table.loc['S(x)', 'Heads'] + table.loc['P(x)', 'Tails'] * table.loc['S(x)', 'Heads']
Entropy
P_e = np.array([0.9, 0.1])
S_e = np.array([0.15, 3.32])
Entropy = np.dot(P_e, S_e)
Entropy
def entropy(arr):
ent = 0
probs = dict()
ctr = Counter(arr)
for e in ctr:
probs[f'P_{e}'] = ctr[e] / len(arr)
for p in probs:
ent += -1 * probs[p] * np.log2(probs[p])
return round(ent, 2)
def show_entropy(bag_type=None, bag_type_str=None):
ctr = Counter(bag_type)
show_entropy(bag_type=white_bag, bag_type_str='White Bag') | code |
74058414/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table
Entropy = table.loc['P(x)', 'Heads'] * table.loc['S(x)', 'Heads'] + table.loc['P(x)', 'Tails'] * table.loc['S(x)', 'Heads']
Entropy | code |
74058414/cell_11 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
red_ctr = Counter(red_bag)
P_apple_red = red_ctr['Apple'] / len(red_bag)
print(f'P(Apple from Red Bag) : {round(P_apple_red, 2)}')
P_orange_red = red_ctr['Orange'] / len(red_bag)
print(f'P(Orange from Red Bag): {round(P_orange_red, 2)}') | code |
74058414/cell_19 | [
"image_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
plot_func(surprise, tx='Probabity', ty='Surprise', title='Surprise vs Entropy', min=0, max=1) | code |
74058414/cell_28 | [
"text_html_output_1.png"
] | import pandas as pd
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table | code |
74058414/cell_15 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
white_ctr = Counter(white_bag)
P_apple_white = white_ctr['Apple'] / len(white_bag)
print(f'P(Apple from White Bag) : {round(P_apple_white, 2)}')
P_orange_white = white_ctr['Orange'] / len(white_bag)
print(f'P(Orange from White Bag): {round(P_orange_white, 2)}') | code |
74058414/cell_38 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table
Entropy = table.loc['P(x)', 'Heads'] * table.loc['S(x)', 'Heads'] + table.loc['P(x)', 'Tails'] * table.loc['S(x)', 'Heads']
Entropy
P_e = np.array([0.9, 0.1])
S_e = np.array([0.15, 3.32])
Entropy = np.dot(P_e, S_e)
Entropy
def entropy(arr):
ent = 0
probs = dict()
ctr = Counter(arr)
for e in ctr:
probs[f'P_{e}'] = ctr[e] / len(arr)
for p in probs:
ent += -1 * probs[p] * np.log2(probs[p])
return round(ent, 2)
def show_entropy(bag_type=None, bag_type_str=None):
ctr = Counter(bag_type)
show_entropy(bag_type=green_bag, bag_type_str='Green Bag') | code |
74058414/cell_31 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table
Entropy = table.loc['P(x)', 'Heads'] * table.loc['S(x)', 'Heads'] + table.loc['P(x)', 'Tails'] * table.loc['S(x)', 'Heads']
Entropy
P_e = np.array([0.9, 0.1])
S_e = np.array([0.15, 3.32])
Entropy = np.dot(P_e, S_e)
Entropy | code |
74058414/cell_10 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
print(f'Red Bag conatins: {Counter(red_bag)}')
green_bag = np.array(['Orange'] * 9 + ['Apple'])
print(f'Green Bag conatins: {Counter(green_bag)}')
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
print(f'White Bag conatins: {Counter(white_bag)}') | code |
74058414/cell_36 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Code to plot a function. Borrowed from fastai library.
def plot_func(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = np.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
red_bag = np.array(['Apple'] * 4 + ['Orange'])
green_bag = np.array(['Orange'] * 9 + ['Apple'])
white_bag = np.array(['Orange'] * 5 + ['Apple'] * 5)
def surprise(probability):
return np.log2(1 / probability)
table = pd.DataFrame({'Heads': [0.15, 0.9], 'Tails': [3.32, 0.1]}, index=['S(x)', 'P(x)'])
table
Entropy = table.loc['P(x)', 'Heads'] * table.loc['S(x)', 'Heads'] + table.loc['P(x)', 'Tails'] * table.loc['S(x)', 'Heads']
Entropy
P_e = np.array([0.9, 0.1])
S_e = np.array([0.15, 3.32])
Entropy = np.dot(P_e, S_e)
Entropy
def entropy(arr):
ent = 0
probs = dict()
ctr = Counter(arr)
for e in ctr:
probs[f'P_{e}'] = ctr[e] / len(arr)
for p in probs:
ent += -1 * probs[p] * np.log2(probs[p])
return round(ent, 2)
def show_entropy(bag_type=None, bag_type_str=None):
ctr = Counter(bag_type)
show_entropy(bag_type=red_bag, bag_type_str='Red Bag') | code |
72081821/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y') | code |
72081821/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
test_1 = df
test_1['a'] = 100
test_1.head() | code |
72081821/cell_44 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.iloc[99][['Pclass', 'Sex']]
midage.iloc[[99], [2, 4]] | code |
72081821/cell_6 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
os.getcwd() | code |
72081821/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns)) | code |
72081821/cell_39 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
df[df.Age < 10].head() | code |
72081821/cell_48 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.iloc[99][['Pclass', 'Sex']]
midage.iloc[[99], [2, 4]]
midage.loc[[99], ['Pclass', 'Sex']]
midage.loc[[100, 105, 108], ['Pclass', 'Name', 'Sex']] | code |
72081821/cell_41 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.head() | code |
72081821/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull() | code |
72081821/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72081821/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
df2.head() | code |
72081821/cell_45 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.iloc[99][['Pclass', 'Sex']]
midage.iloc[[99], [2, 4]]
midage.loc[[99], ['Pclass', 'Sex']] | code |
72081821/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin | code |
72081821/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.head(10) | code |
72081821/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.tail(15) | code |
72081821/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
test_1 = df
test_1['a'] = 100
del test_1['a']
test_1.head() | code |
72081821/cell_43 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.iloc[99][['Pclass', 'Sex']] | code |
72081821/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df['Cabin'] | code |
72081821/cell_46 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
midage = df[(df.Age > 10) & (df.Age < 50)]
midage.iloc[99][['Pclass', 'Sex']]
midage.iloc[[99], [2, 4]]
midage.loc[[99], ['Pclass', 'Sex']]
midage | code |
72081821/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b))) | code |
72081821/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.info() | code |
72081821/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
df_c.head() | code |
72081821/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/train.csv')
chunker = pd.read_csv('../input/titanic/train.csv', chunksize=1000)
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.isnull()
df.to_csv('train_chinese.csv')
a = [1, 2, 3, 4]
b = ['a', 'b', 'c', 'd']
pd.Series(dict(zip(a, b)))
a = ['x', 'y', 'z']
b = [['a', 'b', 'c'], [1, 2, 3], ['o', 'p', 'q']]
pd.DataFrame(dict(zip(a, b))).set_index('y')
df = pd.read_csv('../input/titanic/train.csv')
df_c = pd.read_csv('train_chinese.csv')
(list(df.columns), list(df_c.columns))
df.Cabin
test_1 = df
test_1['a'] = 100
del test_1['a']
test_1.drop(['PassengerId', 'Name', 'Age', 'Ticket'], axis=1)
test_1.head() | code |
72081821/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
df = pd.read_csv('../input/titanic/train.csv')
column_text = 'PassengerId => 乘客ID\n Survived => 是否幸存\n Pclass => 乘客等级(1/2/3等舱位)\n Name => 乘客姓名\n Sex => 性别\n Age => 年龄\n SibSp => 堂兄弟/妹个数\n Parch => 父母与小孩个数\n Ticket => 船票信息\n Fare => 票价\n Cabin => 客舱\n Embarked => 登船港口'
column_list = [re.sub('.*=> ', '', name) for name in column_text.split('\n')]
df.columns = column_list
df.set_index('乘客ID', inplace=True)
df.head() | code |
72081821/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/titanic/train.csv')
df.head() | code |
2024516/cell_6 | [
"image_output_1.png"
] | from random import sample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
airvisit = pd.read_csv('../input/air_visit_data.csv')
ids = airvisit.air_store_id.unique()
mindate = airvisit.visit_date.min()
maxdate = airvisit.visit_date.max()
skeleton = pd.DataFrame({})
dates = pd.date_range(mindate, maxdate, freq='D')
skeleton = pd.DataFrame({})
skeleton['visit_date'] = np.ravel([pd.to_datetime(x) for x in dates] * len(ids))
skeleton['air_store_id'] = np.ravel([[x] * len(dates) for x in ids])
groupcols = ['visit_date', 'air_store_id']
subcols = ['visit_date', 'air_store_id', 'visitors']
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date'])
series = pd.merge(skeleton, airvisit[subcols], on=groupcols, how='left')
from random import sample
samp = sample(list(ids), 10)
plt.figure(figsize=(18, 3.5 * len(samp)))
for idx, i in enumerate(samp):
plt.subplot(len(samp), 1, idx + 1)
sub = series[series.air_store_id == i].reset_index()
plt.plot(sub.visitors)
plt.title(i) | code |
2024516/cell_1 | [
"text_plain_output_1.png"
] | from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt | code |
2024516/cell_8 | [
"image_output_1.png"
] | from random import sample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
airvisit = pd.read_csv('../input/air_visit_data.csv')
ids = airvisit.air_store_id.unique()
mindate = airvisit.visit_date.min()
maxdate = airvisit.visit_date.max()
skeleton = pd.DataFrame({})
dates = pd.date_range(mindate, maxdate, freq='D')
skeleton = pd.DataFrame({})
skeleton['visit_date'] = np.ravel([pd.to_datetime(x) for x in dates] * len(ids))
skeleton['air_store_id'] = np.ravel([[x] * len(dates) for x in ids])
groupcols = ['visit_date', 'air_store_id']
subcols = ['visit_date', 'air_store_id', 'visitors']
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date'])
series = pd.merge(skeleton, airvisit[subcols], on=groupcols, how='left')
from random import sample
samp = sample(list(ids), 10)
for idx, i in enumerate(samp):
sub = series[series.air_store_id == i].reset_index()
plt.figure(figsize=(16, 4))
exs = ['air_ca1315af9e073bd1', 'air_7d65049f9d275c0d', 'air_9c6787aa03a45586']
for ex in exs:
sub = series[series.air_store_id == ex].reset_index()
plt.plot(sub.visitors, label=ex)
plt.legend() | code |
32062338/cell_4 | [
"text_plain_output_1.png"
] | from copy import deepcopy
from tqdm import tqdm
import json
import os
import pandas as pd
import os
import json
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
def format_name(author):
middle_name = ' '.join(author['middle'])
if author['middle']:
return ' '.join([author['first'], middle_name, author['last']])
else:
return ' '.join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ', '.join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f'{name} ({affiliation})')
else:
name_ls.append(name)
else:
name_ls.append(name)
return ', '.join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: '' for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ''
for section, text in texts_di.items():
body += section
body += '\n\n'
body += text
body += '\n\n'
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(bib['authors'], with_affiliation=False)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(', '.join(formatted_ls))
return '; '.join(formatted)
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files):
cleaned_files = []
for file in tqdm(all_files):
features = [file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']) if 'abstract' in file else '', format_body(file['body_text']), format_bib(file['bib_entries'])]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
return clean_df
json_dirs = {'biorxiv_pdf': '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/', 'comm_pdf': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/', 'comm_pmc': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pmc_json/', 'noncomm_pdf': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/', 'noncomm_pmc': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pmc_json/', 'custom_pdf': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/', 'custom_pmc': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pmc_json/'}
json_dfs = []
for category, json_dir in json_dirs.items():
json_files = load_files(json_dir)
json_df = generate_clean_df(json_files)
json_df['category'] = category
json_dfs.append(json_df)
df_all = pd.concat(json_dfs)
print(df_all.shape) | code |
32062338/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from copy import deepcopy
from tqdm import tqdm
import json
import os
import pandas as pd
import pandas as pd
import os
import json
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
def format_name(author):
middle_name = ' '.join(author['middle'])
if author['middle']:
return ' '.join([author['first'], middle_name, author['last']])
else:
return ' '.join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ', '.join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f'{name} ({affiliation})')
else:
name_ls.append(name)
else:
name_ls.append(name)
return ', '.join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: '' for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ''
for section, text in texts_di.items():
body += section
body += '\n\n'
body += text
body += '\n\n'
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(bib['authors'], with_affiliation=False)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(', '.join(formatted_ls))
return '; '.join(formatted)
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files):
cleaned_files = []
for file in tqdm(all_files):
features = [file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']) if 'abstract' in file else '', format_body(file['body_text']), format_bib(file['bib_entries'])]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
return clean_df
json_dirs = {'biorxiv_pdf': '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/', 'comm_pdf': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/', 'comm_pmc': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pmc_json/', 'noncomm_pdf': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/', 'noncomm_pmc': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pmc_json/', 'custom_pdf': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/', 'custom_pmc': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pmc_json/'}
json_dfs = []
for category, json_dir in json_dirs.items():
json_files = load_files(json_dir)
json_df = generate_clean_df(json_files)
json_df['category'] = category
json_dfs.append(json_df)
df_all = pd.concat(json_dfs)
import pandas as pd
df_metadata = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
df_metadata.head() | code |
32062338/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | code |
|
32062338/cell_3 | [
"text_html_output_1.png"
] | from copy import deepcopy
from tqdm import tqdm
import json
import os
import pandas as pd
import os
import json
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
def format_name(author):
middle_name = ' '.join(author['middle'])
if author['middle']:
return ' '.join([author['first'], middle_name, author['last']])
else:
return ' '.join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ', '.join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f'{name} ({affiliation})')
else:
name_ls.append(name)
else:
name_ls.append(name)
return ', '.join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: '' for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ''
for section, text in texts_di.items():
body += section
body += '\n\n'
body += text
body += '\n\n'
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(bib['authors'], with_affiliation=False)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(', '.join(formatted_ls))
return '; '.join(formatted)
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files):
cleaned_files = []
for file in tqdm(all_files):
features = [file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']) if 'abstract' in file else '', format_body(file['body_text']), format_bib(file['bib_entries'])]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
return clean_df
json_dirs = {'biorxiv_pdf': '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/', 'comm_pdf': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/', 'comm_pmc': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pmc_json/', 'noncomm_pdf': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/', 'noncomm_pmc': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pmc_json/', 'custom_pdf': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/', 'custom_pmc': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pmc_json/'}
json_dfs = []
for category, json_dir in json_dirs.items():
json_files = load_files(json_dir)
json_df = generate_clean_df(json_files)
json_df['category'] = category
json_dfs.append(json_df) | code |
32062338/cell_14 | [
"text_plain_output_1.png"
] | from copy import deepcopy
from datetime import datetime
from tqdm import tqdm
import json
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pandas as pd
import os
import json
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
def format_name(author):
middle_name = ' '.join(author['middle'])
if author['middle']:
return ' '.join([author['first'], middle_name, author['last']])
else:
return ' '.join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ', '.join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f'{name} ({affiliation})')
else:
name_ls.append(name)
else:
name_ls.append(name)
return ', '.join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: '' for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ''
for section, text in texts_di.items():
body += section
body += '\n\n'
body += text
body += '\n\n'
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(bib['authors'], with_affiliation=False)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(', '.join(formatted_ls))
return '; '.join(formatted)
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files):
cleaned_files = []
for file in tqdm(all_files):
features = [file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']) if 'abstract' in file else '', format_body(file['body_text']), format_bib(file['bib_entries'])]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
return clean_df
json_dirs = {'biorxiv_pdf': '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/', 'comm_pdf': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/', 'comm_pmc': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pmc_json/', 'noncomm_pdf': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/', 'noncomm_pmc': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pmc_json/', 'custom_pdf': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/', 'custom_pmc': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pmc_json/'}
json_dfs = []
for category, json_dir in json_dirs.items():
json_files = load_files(json_dir)
json_df = generate_clean_df(json_files)
json_df['category'] = category
json_dfs.append(json_df)
df_all = pd.concat(json_dfs)
import pandas as pd
df_metadata = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
df_example = df_metadata.iloc[:20]
dates = [datetime.strptime(d, '%Y-%m-%d') for d in list(df_example['publish_time'])]
names = df_example['cord_uid']
levels = np.tile([-5, 5, -3, 3, -1, 1], int(np.ceil(len(dates) / 6)))[:len(dates)]
fig, ax = plt.subplots(figsize=(8.8, 4), constrained_layout=True)
ax.set(title='Matplotlib release dates')
markerline, stemline, baseline = ax.stem(dates, levels, linefmt='C3-', basefmt='k-', use_line_collection=True)
plt.setp(markerline, mec='k', mfc='w', zorder=3)
markerline.set_ydata(np.zeros(len(dates)))
vert = np.array(['top', 'bottom'])[(levels > 0).astype(int)]
for d, l, r, va in zip(dates, levels, names, vert):
ax.annotate(r, xy=(d, l), xytext=(-3, np.sign(l) * 3), textcoords='offset points', va=va, ha='right')
ax.get_xaxis().set_major_locator(mdates.MonthLocator(interval=3))
ax.get_xaxis().set_major_formatter(mdates.DateFormatter('%b %Y'))
plt.setp(ax.get_xticklabels(), rotation=30, ha='right')
ax.get_yaxis().set_visible(False)
for spine in ['left', 'top', 'right']:
ax.spines[spine].set_visible(False)
ax.margins(y=0.1)
plt.show() | code |
32062338/cell_5 | [
"text_html_output_1.png"
] | from copy import deepcopy
from tqdm import tqdm
import json
import os
import pandas as pd
import os
import json
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
def format_name(author):
middle_name = ' '.join(author['middle'])
if author['middle']:
return ' '.join([author['first'], middle_name, author['last']])
else:
return ' '.join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ', '.join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f'{name} ({affiliation})')
else:
name_ls.append(name)
else:
name_ls.append(name)
return ', '.join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: '' for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ''
for section, text in texts_di.items():
body += section
body += '\n\n'
body += text
body += '\n\n'
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(bib['authors'], with_affiliation=False)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(', '.join(formatted_ls))
return '; '.join(formatted)
def load_files(dirname):
filenames = os.listdir(dirname)
raw_files = []
for filename in tqdm(filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return raw_files
def generate_clean_df(all_files):
cleaned_files = []
for file in tqdm(all_files):
features = [file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']) if 'abstract' in file else '', format_body(file['body_text']), format_bib(file['bib_entries'])]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
return clean_df
json_dirs = {'biorxiv_pdf': '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/', 'comm_pdf': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/', 'comm_pmc': '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pmc_json/', 'noncomm_pdf': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/', 'noncomm_pmc': '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pmc_json/', 'custom_pdf': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/', 'custom_pmc': '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pmc_json/'}
json_dfs = []
for category, json_dir in json_dirs.items():
json_files = load_files(json_dir)
json_df = generate_clean_df(json_files)
json_df['category'] = category
json_dfs.append(json_df)
df_all = pd.concat(json_dfs)
df_all.head() | code |
2009978/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/spam.csv', encoding='latin-1')
df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
df = df.rename(columns={'v1': 'class', 'v2': 'text'})
df.head() | code |
2009978/cell_6 | [
"text_plain_output_1.png"
] | from nltk.tokenize import WhitespaceTokenizer
from subprocess import check_output
import numpy as np # linear algebra
import operator
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from subprocess import check_output
import operator
import nltk
df = pd.read_csv('../input/spam.csv', encoding='latin-1')
df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
df = df.rename(columns={'v1': 'class', 'v2': 'text'})
from nltk.tokenize import WhitespaceTokenizer
tokeniser = WhitespaceTokenizer()
def tokenize(sentence):
return tokeniser.tokenize(sentence)
num_top_words = 1000
all_words = {}
def build_words(string_in):
for w in tokenize(string_in):
all_words[w] = all_words.get(w, 0) + 1
for x in df['text']:
build_words(x)
sorted_words = sorted(all_words.items(), key=operator.itemgetter(1), reverse=True)
sorted_words = list(map(lambda x: x[0], sorted_words))
sorted_words = sorted_words[:num_top_words]
words_by_emails = []
def count_words_per_email(text):
row = np.zeros(len(sorted_words))
for word in tokenize(text):
try:
row[sorted_words.index(word)] = row[sorted_words.index(word)] + 1
except ValueError:
pass
return row
X_rows = []
for _row in df['text']:
X_rows.append(count_words_per_email(_row))
X_rows = np.array(X_rows)
print(X_rows.shape) | code |
2009978/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/spam.csv', encoding='latin-1')
df.head() | code |
2009978/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np # linear algebra
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
np.set_printoptions(threshold=np.inf)
import operator
import nltk | code |
104116934/cell_21 | [
"text_html_output_1.png"
] | from keras.layers import Dense, LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import math
dataset = df_new.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(dataset)
train_data = scaled_data[0:training_data_len, :]
x_train = []
y_train = []
num = 80
for i in range(num, len(train_data)):
x_train.append(train_data[i - num:i, 0])
y_train.append(train_data[i, 0])
x_train, y_train = (np.array(x_train), np.array(y_train))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, batch_size=1, epochs=10) | code |
104116934/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index() | code |
104116934/cell_25 | [
"text_html_output_1.png"
] | from keras.layers import Dense, LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import math
dataset = df_new.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(dataset)
train_data = scaled_data[0:training_data_len, :]
x_train = []
y_train = []
num = 80
for i in range(num, len(train_data)):
x_train.append(train_data[i - num:i, 0])
y_train.append(train_data[i, 0])
x_train, y_train = (np.array(x_train), np.array(y_train))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, batch_size=1, epochs=10)
test_data = scaled_data[training_data_len - num:, :]
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(num, len(test_data)):
x_test.append(test_data[i - num:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = sc.inverse_transform(predictions)
rmse = np.sqrt(np.mean(predictions - y_test) ** 2)
rmse | code |
104116934/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df | code |
104116934/cell_20 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import math
dataset = df_new.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(dataset)
train_data = scaled_data[0:training_data_len, :]
x_train = []
y_train = []
num = 80
for i in range(num, len(train_data)):
x_train.append(train_data[i - num:i, 0])
y_train.append(train_data[i, 0])
x_train, y_train = (np.array(x_train), np.array(y_train))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape | code |
104116934/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns | code |
104116934/cell_26 | [
"image_output_1.png"
] | from keras.layers import Dense, LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
import warnings
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import os
from decimal import ROUND_HALF_UP, Decimal
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from keras.models import Sequential
from keras.layers import Dense, LSTM
import math
dataset = df_new.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(dataset)
train_data = scaled_data[0:training_data_len, :]
x_train = []
y_train = []
num = 80
for i in range(num, len(train_data)):
x_train.append(train_data[i - num:i, 0])
y_train.append(train_data[i, 0])
x_train, y_train = (np.array(x_train), np.array(y_train))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, batch_size=1, epochs=10)
test_data = scaled_data[training_data_len - num:, :]
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(num, len(test_data)):
x_test.append(test_data[i - num:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = sc.inverse_transform(predictions)
col = 'KESİNTİ_SÜRESİ_YENİ'
train = df_new[:training_data_len]
valid = df_new[training_data_len:]
valid['Predictions'] = predictions
plt.figure(figsize=(20, 8))
plt.title('Kesinti süresi')
plt.xlabel('Date', fontsize=28)
plt.ylabel(col, fontsize=28)
plt.plot(train[col])
plt.plot(valid[[col, 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions']) | code |
104116934/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104116934/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ'] | code |
104116934/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
import warnings
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import os
from decimal import ROUND_HALF_UP, Decimal
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from keras.models import Sequential
from keras.layers import Dense, LSTM
plt.figure(figsize=(16, 8))
plt.title('KESİNTİ_SÜRESİ tarihe göre', fontsize=18)
plt.plot(df_new['KESİNTİ_SÜRESİ_YENİ'])
plt.xlabel('TARİH', fontsize=18)
plt.ylabel('KESİNTİ_SÜRESİ')
plt.show() | code |
104116934/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
print(df['BAŞLAMA_TARİHİ_VE_ZAMANI'].min())
print(df['BAŞLAMA_TARİHİ_VE_ZAMANI'].max()) | code |
104116934/cell_17 | [
"text_html_output_1.png"
] | import math
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
import math
dataset = df_new.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len | code |
104116934/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa
df_new = dfa.groupby('Tarih').sum()
df_new | code |
104116934/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a | code |
104116934/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission = pd.read_csv('/kaggle/input/gdz22-datathon/submission.csv')
df = pd.read_csv('/kaggle/input/gdz22-datathon/train.csv')
trafo = pd.read_csv('/kaggle/input/gdz22-datathon/trafo.csv')
bulutluluk_orani = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bulutluluk Oranı.csv')
bagil_nem = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Bağıl Nem.csv')
radyasyon = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Radyasyon.csv')
sicaklik = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Sıcaklık.csv')
ruzgar_yonu = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Yönü.csv')
yagis = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Yağış.csv')
ruzgar_hizi = pd.read_csv('/kaggle/input/gdz22-datathon/Hava Durumu/Rüzgar Hızı.csv')
df['BAŞLAMA_TARİHİ_VE_ZAMANI'] = pd.to_datetime(df['BAŞLAMA_TARİHİ_VE_ZAMANI'])
df = df.sort_values(by='BAŞLAMA_TARİHİ_VE_ZAMANI')
df
df = pd.merge(df, trafo, on='ŞEBEKE_UNSURU_KODU', how='left')
df.columns
df.groupby(['trafo_id']).sum()['KESİNTİ_SÜRESİ']
df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a = df.groupby('trafo_id').sum()['KESİNTİ_SÜRESİ'].reset_index()
a.columns = ['trafo_id', 'KESİNTİ_SÜRESİ_YENİ']
a
df = pd.merge(df, a, on='trafo_id')
dfa = df[['Tarih', 'KESİNTİ_SÜRESİ_YENİ']].sort_values(by='Tarih')
dfa | code |
122263700/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
sns.set_style('whitegrid')
plt.axis('equal')
# Categorical features
categorical_feats=['HomePlanet', 'CryoSleep', 'Destination', 'VIP']
# Plot categorical features
fig=plt.figure(figsize=(10,16))
for i, var_name in enumerate(categorical_feats):
ax=fig.add_subplot(4,1,i+1)
sns.countplot(data=traindf, x=var_name, axes=ax, hue='Transported')
ax.set_title(var_name)
fig.tight_layout() # Improves appearance a bit
plt.show()
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
plt.figure(figsize=(10, 4))
g = sns.countplot(data=traindf, x='Expenses_group', hue='Transported', order=['Expenses_0', 'Expenses_0-500', 'Expenses_500-1000', 'Expenses_1000-5000', 'Expenses_5000+'])
plt.title('Distribuição por grupo de gastos')
plt.show() | code |
122263700/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
sns.set_style('whitegrid')
plt.axis('equal')
plt.figure(figsize=(10, 4))
sns.histplot(data=traindf, x='Age', hue='Transported', binwidth=1, kde=True)
plt.title('Distribuição idade')
plt.xlabel('Idade (anos)')
plt.show() | code |
122263700/cell_25 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
traindf['Age_group'] = np.nan
traindf.loc[traindf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
traindf.loc[(traindf['Age'] > 12) & (traindf['Age'] < 18), 'Age_group'] = 'Age_13-17'
traindf.loc[(traindf['Age'] >= 18) & (traindf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
traindf.loc[(traindf['Age'] > 25) & (traindf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
traindf.loc[(traindf['Age'] > 30) & (traindf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
traindf.loc[traindf['Age'] > 50, 'Age_group'] = 'Age_51+'
testdf['Age_group'] = np.nan
testdf.loc[testdf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
testdf.loc[(testdf['Age'] > 12) & (testdf['Age'] < 18), 'Age_group'] = 'Age_13-17'
testdf.loc[(testdf['Age'] >= 18) & (testdf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
testdf.loc[(testdf['Age'] > 25) & (testdf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
testdf.loc[(testdf['Age'] > 30) & (testdf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
testdf.loc[testdf['Age'] > 50, 'Age_group'] = 'Age_51+'
traindf['Age_group'].value_counts() | code |
122263700/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
traindf['Expenses_group'].value_counts() | code |
122263700/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
testdf.head() | code |
122263700/cell_26 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
traindf['Age_group'] = np.nan
traindf.loc[traindf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
traindf.loc[(traindf['Age'] > 12) & (traindf['Age'] < 18), 'Age_group'] = 'Age_13-17'
traindf.loc[(traindf['Age'] >= 18) & (traindf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
traindf.loc[(traindf['Age'] > 25) & (traindf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
traindf.loc[(traindf['Age'] > 30) & (traindf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
traindf.loc[traindf['Age'] > 50, 'Age_group'] = 'Age_51+'
testdf['Age_group'] = np.nan
testdf.loc[testdf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
testdf.loc[(testdf['Age'] > 12) & (testdf['Age'] < 18), 'Age_group'] = 'Age_13-17'
testdf.loc[(testdf['Age'] >= 18) & (testdf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
testdf.loc[(testdf['Age'] > 25) & (testdf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
testdf.loc[(testdf['Age'] > 30) & (testdf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
testdf.loc[testdf['Age'] > 50, 'Age_group'] = 'Age_51+'
traindf[['Age_group', 'Transported']].groupby(['Age_group'], as_index=False).mean().sort_values(by='Age_group', ascending=True) | code |
122263700/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
sns.set_style('whitegrid')
plt.axis('equal')
categorical_feats = ['HomePlanet', 'CryoSleep', 'Destination', 'VIP']
fig = plt.figure(figsize=(10, 16))
for i, var_name in enumerate(categorical_feats):
ax = fig.add_subplot(4, 1, i + 1)
sns.countplot(data=traindf, x=var_name, axes=ax, hue='Transported')
ax.set_title(var_name)
fig.tight_layout()
plt.show() | code |
122263700/cell_19 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
traindf['totalExpenses'].describe() | code |
122263700/cell_18 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
print(traindf.loc[traindf.totalExpenses > 0, 'totalExpenses'].count(), traindf.loc[traindf.totalExpenses == 0, 'totalExpenses'].count()) | code |
122263700/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
plt.figure(figsize=(6, 6))
sns.set_style('whitegrid')
plt.pie(traindf['Transported'].value_counts(), autopct='%1.1f%%', startangle=90)
plt.axis('equal')
plt.title('Transportado x Não transportado')
plt.show() | code |
122263700/cell_15 | [
"image_output_1.png"
] | import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape
traindf.columns | code |
122263700/cell_31 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
sns.set_style('whitegrid')
plt.axis('equal')
# Categorical features
categorical_feats=['HomePlanet', 'CryoSleep', 'Destination', 'VIP']
# Plot categorical features
fig=plt.figure(figsize=(10,16))
for i, var_name in enumerate(categorical_feats):
ax=fig.add_subplot(4,1,i+1)
sns.countplot(data=traindf, x=var_name, axes=ax, hue='Transported')
ax.set_title(var_name)
fig.tight_layout() # Improves appearance a bit
plt.show()
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
plt.figure(figsize=(10,4))
g = sns.countplot(data=traindf, x='Expenses_group', hue='Transported', order=['Expenses_0','Expenses_0-500','Expenses_500-1000','Expenses_1000-5000','Expenses_5000+'])
plt.title('Distribuição por grupo de gastos')
plt.show()
traindf['Age_group'] = np.nan
traindf.loc[traindf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
traindf.loc[(traindf['Age'] > 12) & (traindf['Age'] < 18), 'Age_group'] = 'Age_13-17'
traindf.loc[(traindf['Age'] >= 18) & (traindf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
traindf.loc[(traindf['Age'] > 25) & (traindf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
traindf.loc[(traindf['Age'] > 30) & (traindf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
traindf.loc[traindf['Age'] > 50, 'Age_group'] = 'Age_51+'
testdf['Age_group'] = np.nan
testdf.loc[testdf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
testdf.loc[(testdf['Age'] > 12) & (testdf['Age'] < 18), 'Age_group'] = 'Age_13-17'
testdf.loc[(testdf['Age'] >= 18) & (testdf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
testdf.loc[(testdf['Age'] > 25) & (testdf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
testdf.loc[(testdf['Age'] > 30) & (testdf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
testdf.loc[testdf['Age'] > 50, 'Age_group'] = 'Age_51+'
# Plot distribution of new features
plt.figure(figsize=(10,4))
g = sns.countplot(data=traindf, x='Age_group', hue='Transported', order=['Age_0-12','Age_13-17','Age_18-25','Age_26-30','Age_31-50'])
plt.title('Age group distribution')
plt.show()
plt.figure(figsize=(10, 4))
g = sns.countplot(data=traindf, x='GroupsSize', hue='Transported')
plt.title('Distribuição por grupo')
plt.show() | code |
122263700/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
sns.set_style('whitegrid')
plt.axis('equal')
# Categorical features
categorical_feats=['HomePlanet', 'CryoSleep', 'Destination', 'VIP']
# Plot categorical features
fig=plt.figure(figsize=(10,16))
for i, var_name in enumerate(categorical_feats):
ax=fig.add_subplot(4,1,i+1)
sns.countplot(data=traindf, x=var_name, axes=ax, hue='Transported')
ax.set_title(var_name)
fig.tight_layout() # Improves appearance a bit
plt.show()
traindf.shape
traindf.columns
traindf['Expenses_group'] = np.nan
traindf.loc[traindf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
traindf.loc[(traindf['totalExpenses'] > 0) & (traindf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
traindf.loc[(traindf['totalExpenses'] > 500) & (traindf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
traindf.loc[(traindf['totalExpenses'] > 1000) & (traindf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
traindf.loc[(traindf['totalExpenses'] > 5000) & (traindf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
testdf['Expenses_group'] = np.nan
testdf.loc[testdf['totalExpenses'] == 0, 'Expenses_group'] = 'Expenses_0'
testdf.loc[(testdf['totalExpenses'] > 0) & (testdf['totalExpenses'] <= 500), 'Expenses_group'] = 'Expenses_0-500'
testdf.loc[(testdf['totalExpenses'] > 500) & (testdf['totalExpenses'] <= 1000), 'Expenses_group'] = 'Expenses_500-1000'
testdf.loc[(testdf['totalExpenses'] > 1000) & (testdf['totalExpenses'] <= 5000), 'Expenses_group'] = 'Expenses_1000-5000'
testdf.loc[(testdf['totalExpenses'] > 5000) & (testdf['totalExpenses'] <= 500000000000), 'Expenses_group'] = 'Expenses_5000+'
plt.figure(figsize=(10,4))
g = sns.countplot(data=traindf, x='Expenses_group', hue='Transported', order=['Expenses_0','Expenses_0-500','Expenses_500-1000','Expenses_1000-5000','Expenses_5000+'])
plt.title('Distribuição por grupo de gastos')
plt.show()
traindf['Age_group'] = np.nan
traindf.loc[traindf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
traindf.loc[(traindf['Age'] > 12) & (traindf['Age'] < 18), 'Age_group'] = 'Age_13-17'
traindf.loc[(traindf['Age'] >= 18) & (traindf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
traindf.loc[(traindf['Age'] > 25) & (traindf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
traindf.loc[(traindf['Age'] > 30) & (traindf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
traindf.loc[traindf['Age'] > 50, 'Age_group'] = 'Age_51+'
testdf['Age_group'] = np.nan
testdf.loc[testdf['Age'] <= 12, 'Age_group'] = 'Age_0-12'
testdf.loc[(testdf['Age'] > 12) & (testdf['Age'] < 18), 'Age_group'] = 'Age_13-17'
testdf.loc[(testdf['Age'] >= 18) & (testdf['Age'] <= 25), 'Age_group'] = 'Age_18-25'
testdf.loc[(testdf['Age'] > 25) & (testdf['Age'] <= 30), 'Age_group'] = 'Age_26-30'
testdf.loc[(testdf['Age'] > 30) & (testdf['Age'] <= 50), 'Age_group'] = 'Age_31-50'
testdf.loc[testdf['Age'] > 50, 'Age_group'] = 'Age_51+'
plt.figure(figsize=(10, 4))
g = sns.countplot(data=traindf, x='Age_group', hue='Transported', order=['Age_0-12', 'Age_13-17', 'Age_18-25', 'Age_26-30', 'Age_31-50'])
plt.title('Age group distribution')
plt.show() | code |
122263700/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.shape | code |
122263700/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
traindf = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
testdf = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
data_cleaner = [traindf, testdf]
traindf.head() | code |
122262215/cell_42 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import pandas as pd
import re
import re
email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?'
email = email.lower()
re.findall('saturday|sunday|monday|wednesday', email)
re.findall('january|february', email)
re.findall('\\d{1,2}:\\d{1,2} a?p?m', email)
df = pd.DataFrame(columns=['text', 'label'])
old_dataset = pd.read_csv('./events.csv')
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
wnl = WordNetLemmatizer()
engstopwords = stopwords.words('english')
def lemmatize_all_types(word):
word = wnl.lemmatize(word, 'a')
word = wnl.lemmatize(word, 'v')
word = wnl.lemmatize(word, 'n')
return word
def clean(text):
text = re.sub('https?://\\w+\\.\\w+\\.\\w+', '', text).lower()
text = re.sub('[^a-zA-Z ]', '', text)
text = list(map(lemmatize_all_types, text.split()))
text = [word for word in text if word not in engstopwords]
text = ' '.join(text)
return text
df = pd.read_csv('../input/emails-events/emails_events.csv')
tfidf = TfidfVectorizer(max_features=10000)
dtm = tfidf.fit_transform(X).toarray()
words = tfidf.get_feature_names()
X_dtm = pd.DataFrame(columns=words, data=dtm)
model = MultinomialNB()
model.fit(X_train, y_train)
model.score(X_test, y_test)
text = 'can we have meeting on the next week please on morning'
text = clean(text)
enc = tfidf.transform([text])
model.predict(enc) | code |
122262215/cell_21 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
ps = soup.find_all('p', {'class': 'sentence-item__text'})
df = pd.DataFrame(columns=['text', 'label'])
days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split()
days
days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split()
for day in days:
page = requests.get('https://sentence.yourdictionary.com/saturday')
soup = BeautifulSoup(page.content, 'html.parser')
ps = soup.find_all('p', {'class': 'sentence-item__text'})
for p in ps:
df = df.append({'text': p.text, 'label': 1}, ignore_index=True)
days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split()
for day in days:
page = requests.get('https://sentence.yourdictionary.com/' + day)
soup = BeautifulSoup(page.content, 'html.parser')
ps = soup.find_all('p', {'class': 'sentence-item__text'})
for p in ps:
df = df.append({'text': p.text, 'label': 1}, ignore_index=True)
old_dataset.columns = ['text', 'label']
old_dataset.to_csv('good_dataset.csv', index=False)
months = 'January February March April May June July August September October November December'.lower().split()
for month in months:
page = requests.get('https://sentence.yourdictionary.com/' + month)
soup = BeautifulSoup(page.content, 'html.parser')
ps = soup.find_all('p', {'class': 'sentence-item__text'})
for p in ps:
df = df.append({'text': p.text, 'label': 1}, ignore_index=True)
for item in ['again']:
page = requests.get('https://sentence.yourdictionary.com/' + item)
soup = BeautifulSoup(page.content, 'html.parser')
ps = soup.find_all('p', {'class': 'sentence-item__text'})
for p in ps:
old_dataset = old_dataset.append({'text': p.text, 'label': 0}, ignore_index=True)
old_dataset.shape | code |
122262215/cell_13 | [
"text_plain_output_1.png"
] | days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split()
days | code |
122262215/cell_25 | [
"text_plain_output_1.png"
] | import nltk
import nltk
nltk.download('omw-1.4') | code |
122262215/cell_6 | [
"text_plain_output_1.png"
] | import re
email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?'
email = email.lower()
re.findall('saturday|sunday|monday|wednesday', email) | code |
122262215/cell_40 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(X_train, y_train) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.