path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
33106981/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape
hotel.head().T
hotel_num = hotel.dtypes[hotel.dtypes != 'object']
hotel_num = hotel_num.index.to_list()
Date_Drop = {'is_canceled', 'company'}
hotel_num = [ele for ele in hotel_num if ele not in Date_Drop]
hotel_num
hot_num = hotel[hotel_num].copy()
for i in hot_num.columns:
hot_num.boxplot(column=i)
plt.show() | code |
33106981/cell_15 | [
"text_html_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape
hotel.head().T
hotel_num = hotel.dtypes[hotel.dtypes != 'object']
hotel_num = hotel_num.index.to_list()
Date_Drop = {'is_canceled', 'company'}
hotel_num = [ele for ele in hotel_num if ele not in Date_Drop]
hotel_num
hot_num = hotel[hotel_num].copy()
from collections import Counter
def detect_outliers(df, features):
"""
Takes a dataframe df of features and returns a list of the indices
corresponding to the observations containing more than n outliers according
to the Tukey method.
"""
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
return outlier_indices
Outliers_to_drop = detect_outliers(hotel, hot_num)
len(Outliers_to_drop)
hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True)
hotel.isna().sum()
hotel[hotel['children'].isna()].T | code |
33106981/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape | code |
33106981/cell_22 | [
"image_output_11.png",
"image_output_17.png",
"image_output_14.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape
hotel.head().T
hotel_num = hotel.dtypes[hotel.dtypes != 'object']
hotel_num = hotel_num.index.to_list()
Date_Drop = {'is_canceled', 'company'}
hotel_num = [ele for ele in hotel_num if ele not in Date_Drop]
hotel_num
hot_num = hotel[hotel_num].copy()
from collections import Counter
def detect_outliers(df, features):
"""
Takes a dataframe df of features and returns a list of the indices
corresponding to the observations containing more than n outliers according
to the Tukey method.
"""
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
return outlier_indices
Outliers_to_drop = detect_outliers(hotel, hot_num)
len(Outliers_to_drop)
hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True)
hotel.isna().sum()
hotel.company = hotel.company.fillna(0)
hotel.agent = hotel.agent.fillna(0)
hotel.children = hotel.children.fillna(0)
hotel.country = hotel.country.fillna('unknown')
hotel.drop(hotel[(hotel['children'] == 0) & (hotel['babies'] == 0) & (hotel['adults'] == 0)].index, inplace=True)
Cat_Var = hotel.dtypes[hotel.dtypes == 'object']
Cat_Var = Cat_Var.index.to_list()
Date_Drop = {'arrival_date_month', 'reservation_status_date'}
Cat_Var = [ele for ele in Cat_Var if ele not in Date_Drop]
Cat_Var | code |
33106981/cell_27 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape
hotel.head().T
hotel_num = hotel.dtypes[hotel.dtypes != 'object']
hotel_num = hotel_num.index.to_list()
Date_Drop = {'is_canceled', 'company'}
hotel_num = [ele for ele in hotel_num if ele not in Date_Drop]
hotel_num
hot_num = hotel[hotel_num].copy()
from collections import Counter
def detect_outliers(df, features):
"""
Takes a dataframe df of features and returns a list of the indices
corresponding to the observations containing more than n outliers according
to the Tukey method.
"""
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
return outlier_indices
Outliers_to_drop = detect_outliers(hotel, hot_num)
len(Outliers_to_drop)
hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True)
hotel.isna().sum()
hotel.company = hotel.company.fillna(0)
hotel.agent = hotel.agent.fillna(0)
hotel.children = hotel.children.fillna(0)
hotel.country = hotel.country.fillna('unknown')
hotel.drop(hotel[(hotel['children'] == 0) & (hotel['babies'] == 0) & (hotel['adults'] == 0)].index, inplace=True)
def cnt_plot(a):
col = hotel[a]
title = 'Category wise count of' + ' ' + a
Cat_Var = hotel.dtypes[hotel.dtypes == 'object']
Cat_Var = Cat_Var.index.to_list()
Date_Drop = {'arrival_date_month', 'reservation_status_date'}
Cat_Var = [ele for ele in Cat_Var if ele not in Date_Drop]
Cat_Var
corrmap = hotel.corr()
hotel.columns | code |
33106981/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
hotel.shape
hotel.head().T
hotel.info() | code |
34141447/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
X_test.isnull().any().describe() | code |
34141447/cell_25 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
if use_gpu:
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
n_samples = X_train.shape[0]
batch_size = 4
n_epochs = 2
for epoch in range(0, n_epochs):
for i in range(0, n_samples, batch_size):
if i + batch_size >= n_samples:
mini_batch_data = Variable(X_train[i:n_samples, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:n_samples].clone(), requires_grad=False)
else:
mini_batch_data = Variable(X_train[i:i + batch_size, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:i + batch_size].clone(), requires_grad=False)
mini_batch_data = mini_batch_data.type(torch.FloatTensor)
mini_batch_label = mini_batch_label.type(torch.LongTensor)
if use_gpu:
mini_data = mini_batch_data.cuda()
mini_label = mini_batch_label.cuda()
optimizer.zero_grad()
batch_output = net(mini_data)
batch_loss = criterion(batch_output, mini_label)
batch_loss.backward()
optimizer.step()
n_val_samples = X_val.shape[0]
true_counter = 0
for val_idx in range(n_val_samples):
val_sample = X_val[val_idx].clone().unsqueeze(dim=0)
val_sample = val_sample.type(torch.FloatTensor)
if use_gpu:
val_sample = val_sample.cuda()
pred = net(val_sample)
_, pred = torch.max(pred, 1)
if pred == y_val[val_idx]:
true_counter += 1
true_counter /= 1.0
print(f'Accuracy: {true_counter / n_val_samples}') | code |
34141447/cell_4 | [
"text_plain_output_1.png"
] | import torch
use_gpu = torch.cuda.is_available()
use_gpu | code |
34141447/cell_23 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
if use_gpu:
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
n_samples = X_train.shape[0]
batch_size = 4
n_epochs = 2
for epoch in range(0, n_epochs):
for i in range(0, n_samples, batch_size):
if i + batch_size >= n_samples:
print('reaching end of data')
mini_batch_data = Variable(X_train[i:n_samples, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:n_samples].clone(), requires_grad=False)
else:
mini_batch_data = Variable(X_train[i:i + batch_size, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:i + batch_size].clone(), requires_grad=False)
mini_batch_data = mini_batch_data.type(torch.FloatTensor)
mini_batch_label = mini_batch_label.type(torch.LongTensor)
if use_gpu:
mini_data = mini_batch_data.cuda()
mini_label = mini_batch_label.cuda()
optimizer.zero_grad()
batch_output = net(mini_data)
batch_loss = criterion(batch_output, mini_label)
batch_loss.backward()
optimizer.step()
if i % 10000 == 0:
print(f'epoch # {epoch}, iter # {i}:, loss {batch_loss}') | code |
34141447/cell_29 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
if use_gpu:
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
n_samples = X_train.shape[0]
batch_size = 4
n_epochs = 2
for epoch in range(0, n_epochs):
for i in range(0, n_samples, batch_size):
if i + batch_size >= n_samples:
mini_batch_data = Variable(X_train[i:n_samples, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:n_samples].clone(), requires_grad=False)
else:
mini_batch_data = Variable(X_train[i:i + batch_size, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:i + batch_size].clone(), requires_grad=False)
mini_batch_data = mini_batch_data.type(torch.FloatTensor)
mini_batch_label = mini_batch_label.type(torch.LongTensor)
if use_gpu:
mini_data = mini_batch_data.cuda()
mini_label = mini_batch_label.cuda()
optimizer.zero_grad()
batch_output = net(mini_data)
batch_loss = criterion(batch_output, mini_label)
batch_loss.backward()
optimizer.step()
n_val_samples = X_val.shape[0]
true_counter = 0
for val_idx in range(n_val_samples):
val_sample = X_val[val_idx].clone().unsqueeze(dim=0)
val_sample = val_sample.type(torch.FloatTensor)
if use_gpu:
val_sample = val_sample.cuda()
pred = net(val_sample)
_, pred = torch.max(pred, 1)
if pred == y_val[val_idx]:
true_counter += 1
true_counter /= 1.0
X_test = X_test.reshape(-1, 1, 28, 28)
X_test = torch.from_numpy(X_test)
n_test_samples = X_test.shape[0]
net.eval()
output_file = np.ndarray(shape=(n_test_samples, 2), dtype=int)
for test_idx in range(n_test_samples):
test_sample = X_test[test_idx].clone().unsqueeze(dim=1)
test_sample = test_sample.type(torch.FloatTensor)
if use_gpu:
test_sample = test_sample.cuda()
pred = net(test_sample)
_, pred = torch.max(pred, 1)
output_file[test_idx][0] = test_idx + 1
output_file[test_idx][1] = pred
submission = pd.DataFrame(output_file, dtype=int, columns=['ImageId', 'Label'])
sample = 150
plt.imshow(X_test[sample][0].numpy())
print(output_file[sample][1]) | code |
34141447/cell_18 | [
"text_plain_output_1.png"
] | from torch import nn, optim
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
print(net)
if use_gpu:
net = net.cuda() | code |
34141447/cell_28 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
if use_gpu:
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
n_samples = X_train.shape[0]
batch_size = 4
n_epochs = 2
for epoch in range(0, n_epochs):
for i in range(0, n_samples, batch_size):
if i + batch_size >= n_samples:
mini_batch_data = Variable(X_train[i:n_samples, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:n_samples].clone(), requires_grad=False)
else:
mini_batch_data = Variable(X_train[i:i + batch_size, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:i + batch_size].clone(), requires_grad=False)
mini_batch_data = mini_batch_data.type(torch.FloatTensor)
mini_batch_label = mini_batch_label.type(torch.LongTensor)
if use_gpu:
mini_data = mini_batch_data.cuda()
mini_label = mini_batch_label.cuda()
optimizer.zero_grad()
batch_output = net(mini_data)
batch_loss = criterion(batch_output, mini_label)
batch_loss.backward()
optimizer.step()
n_val_samples = X_val.shape[0]
true_counter = 0
for val_idx in range(n_val_samples):
val_sample = X_val[val_idx].clone().unsqueeze(dim=0)
val_sample = val_sample.type(torch.FloatTensor)
if use_gpu:
val_sample = val_sample.cuda()
pred = net(val_sample)
_, pred = torch.max(pred, 1)
if pred == y_val[val_idx]:
true_counter += 1
true_counter /= 1.0
X_test = X_test.reshape(-1, 1, 28, 28)
X_test = torch.from_numpy(X_test)
n_test_samples = X_test.shape[0]
net.eval()
output_file = np.ndarray(shape=(n_test_samples, 2), dtype=int)
for test_idx in range(n_test_samples):
test_sample = X_test[test_idx].clone().unsqueeze(dim=1)
test_sample = test_sample.type(torch.FloatTensor)
if use_gpu:
test_sample = test_sample.cuda()
pred = net(test_sample)
_, pred = torch.max(pred, 1)
output_file[test_idx][0] = test_idx + 1
output_file[test_idx][1] = pred
if test_idx % 1000 == 0:
print(f'testing sample #{test_idx}')
submission = pd.DataFrame(output_file, dtype=int, columns=['ImageId', 'Label']) | code |
34141447/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
plt.figure(figsize=(12, 8))
sns.countplot(y_train)
y_train.unique() | code |
34141447/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape | code |
34141447/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34141447/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
print(X_train[34][0].shape)
plt.imshow(X_train[0][0]) | code |
34141447/cell_27 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
y_train = torch.from_numpy(y_train)
y_val = torch.from_numpy(y_val)
X_train.shape
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
net = LeNet()
if use_gpu:
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
n_samples = X_train.shape[0]
batch_size = 4
n_epochs = 2
for epoch in range(0, n_epochs):
for i in range(0, n_samples, batch_size):
if i + batch_size >= n_samples:
mini_batch_data = Variable(X_train[i:n_samples, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:n_samples].clone(), requires_grad=False)
else:
mini_batch_data = Variable(X_train[i:i + batch_size, :, :, :].clone(), requires_grad=True)
mini_batch_label = Variable(y_train[i:i + batch_size].clone(), requires_grad=False)
mini_batch_data = mini_batch_data.type(torch.FloatTensor)
mini_batch_label = mini_batch_label.type(torch.LongTensor)
if use_gpu:
mini_data = mini_batch_data.cuda()
mini_label = mini_batch_label.cuda()
optimizer.zero_grad()
batch_output = net(mini_data)
batch_loss = criterion(batch_output, mini_label)
batch_loss.backward()
optimizer.step()
n_val_samples = X_val.shape[0]
true_counter = 0
for val_idx in range(n_val_samples):
val_sample = X_val[val_idx].clone().unsqueeze(dim=0)
val_sample = val_sample.type(torch.FloatTensor)
if use_gpu:
val_sample = val_sample.cuda()
pred = net(val_sample)
_, pred = torch.max(pred, 1)
if pred == y_val[val_idx]:
true_counter += 1
true_counter /= 1.0
X_test = X_test.reshape(-1, 1, 28, 28)
X_test = torch.from_numpy(X_test)
n_test_samples = X_test.shape[0]
net.eval() | code |
34141447/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape) | code |
122255805/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibutton2016/Temp2016.csv', skiprows=[i for i in range(1, 1096)], skipfooter=1112, engine='python', parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.replace(0, np.nan)
spat_mean = df.mean(axis=1).copy()
print(spat_mean)
spat_mean.groupby(spat_mean.index.hour).mean().plot()
plt.title('Spatial mean: Diurnal cycle of summer 2021 absolute')
plt.ylabel('Temperature ℃')
plt.xlabel('Hour of the day')
plt.show() | code |
122255805/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122255805/cell_3 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibutton2016/Temp2016.csv', skiprows=[i for i in range(1, 1096)], skipfooter=1112, engine='python', parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.replace(0, np.nan)
df.head() | code |
2016103/cell_4 | [
"text_plain_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from scipy.stats import linregress
from subprocess import check_output
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
SW = pd.read_csv('../input/seattleWeather_1948-2017.csv')
SW.columns = SW.columns.str.lower()
separate = SW.date.str.split('-')
a, b, c = zip(*separate)
SW['year'] = a
SW['month'] = b
SW_year = SW['year'].unique().astype(int)
SW['avegareinday'] = (SW.tmax + SW.tmin) / 2
SW_avegareinmonth = SW.groupby([SW.year, SW.month])['avegareinday'].sum() / SW.groupby([SW.year, SW.month])['avegareinday'].count()
SW_avegareinannualy = SW_avegareinmonth.groupby('year').sum() / 12
SW_5yearmovingaverage = np.convolve(SW_avegareinannualy, np.ones((5,)) / 5, mode='valid')
from scipy.stats import linregress
linregress(SW_year, SW_avegareinannualy.values) | code |
2016103/cell_2 | [
"text_plain_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from subprocess import check_output
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
SW = pd.read_csv('../input/seattleWeather_1948-2017.csv')
SW.columns = SW.columns.str.lower()
separate = SW.date.str.split('-')
a, b, c = zip(*separate)
SW['year'] = a
SW['month'] = b
SW_year = SW['year'].unique().astype(int) | code |
2016103/cell_7 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from scipy.stats import linregress
from subprocess import check_output
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
SW = pd.read_csv('../input/seattleWeather_1948-2017.csv')
SW.columns = SW.columns.str.lower()
separate = SW.date.str.split('-')
a, b, c = zip(*separate)
SW['year'] = a
SW['month'] = b
SW_year = SW['year'].unique().astype(int)
SW['avegareinday'] = (SW.tmax + SW.tmin) / 2
SW_avegareinmonth = SW.groupby([SW.year, SW.month])['avegareinday'].sum() / SW.groupby([SW.year, SW.month])['avegareinday'].count()
SW_avegareinannualy = SW_avegareinmonth.groupby('year').sum() / 12
SW_5yearmovingaverage = np.convolve(SW_avegareinannualy, np.ones((5,)) / 5, mode='valid')
from scipy.stats import linregress
linregress(SW_year, SW_avegareinannualy.values)
trace1 = dict(x=SW_year, y=SW_avegareinannualy, line=dict(color='rgb(255, 127, 14)', width=1), mode='lines+markers', name='annual average temp', type='scatter', uid='f5d9be')
trace2 = dict(x=SW_year, y=SW_5yearmovingaverage, line=dict(color='rgb(51, 51, 255)', width=2), mode='lines', name='5-year moving average temp', type='scatter', uid='f5d9be')
trace3 = dict(x=SW_year, y=0.04642 * SW_year - 40.055, line=dict(color='rgb(0, 153, 0)', width=2), mode='lines', name='long-term linear trend', type='scatter', uid='f5d9be')
layout = go.Layout(title='Annualy avegare temperature in Seattle (1948-2017)', xaxis={'title': 'Years'}, yaxis={'title': '°C'}, annotations=[dict(x=2006, y=49, showarrow=False, text='y = 0.04642x-40.05572<br>R<sup>2</sup> =0.641239', font={'size': 20})])
data = [trace1, trace2, trace3]
fig = dict(data=data, layout=layout)
SW_rain = np.asarray(SW.groupby('year')['rain'].sum())
SW_dry = np.asarray(SW.groupby('year')['rain'].count()) - np.asarray(SW.groupby('year')['rain'].sum())
labels = ['Dryness', 'Rain']
colors = ['rgb(255, 51, 0)', 'rgb(0, 51, 204)']
x_data = SW_year
y_data = [SW_rain, SW_dry]
traces = []
for i in range(0, 2):
traces.append(go.Scatter(x=x_data, y=y_data[i], mode='splines', name=labels[i], line=dict(color=colors[i], width=3)))
layout = {'title': 'Rain and dry in Seattle (1948-2017)', 'xaxis': {'title': 'Years'}, 'yaxis': {'title': 'day'}}
figure = dict(data=traces, layout=layout)
iplot(figure) | code |
2016103/cell_5 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from scipy.stats import linregress
from subprocess import check_output
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
SW = pd.read_csv('../input/seattleWeather_1948-2017.csv')
SW.columns = SW.columns.str.lower()
separate = SW.date.str.split('-')
a, b, c = zip(*separate)
SW['year'] = a
SW['month'] = b
SW_year = SW['year'].unique().astype(int)
SW['avegareinday'] = (SW.tmax + SW.tmin) / 2
SW_avegareinmonth = SW.groupby([SW.year, SW.month])['avegareinday'].sum() / SW.groupby([SW.year, SW.month])['avegareinday'].count()
SW_avegareinannualy = SW_avegareinmonth.groupby('year').sum() / 12
SW_5yearmovingaverage = np.convolve(SW_avegareinannualy, np.ones((5,)) / 5, mode='valid')
from scipy.stats import linregress
linregress(SW_year, SW_avegareinannualy.values)
trace1 = dict(x=SW_year, y=SW_avegareinannualy, line=dict(color='rgb(255, 127, 14)', width=1), mode='lines+markers', name='annual average temp', type='scatter', uid='f5d9be')
trace2 = dict(x=SW_year, y=SW_5yearmovingaverage, line=dict(color='rgb(51, 51, 255)', width=2), mode='lines', name='5-year moving average temp', type='scatter', uid='f5d9be')
trace3 = dict(x=SW_year, y=0.04642 * SW_year - 40.055, line=dict(color='rgb(0, 153, 0)', width=2), mode='lines', name='long-term linear trend', type='scatter', uid='f5d9be')
layout = go.Layout(title='Annualy avegare temperature in Seattle (1948-2017)', xaxis={'title': 'Years'}, yaxis={'title': '°C'}, annotations=[dict(x=2006, y=49, showarrow=False, text='y = 0.04642x-40.05572<br>R<sup>2</sup> =0.641239', font={'size': 20})])
data = [trace1, trace2, trace3]
fig = dict(data=data, layout=layout)
iplot(fig) | code |
90102830/cell_5 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import gc
import glob
import multiprocessing
import numpy as np
import os
import random
import torch
from tqdm.auto import tqdm
import os
import sys
import random
import numpy as np
import pandas as pd
import glob
import gc
gc.enable()
from joblib import Parallel, delayed
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningModule, LightningDataModule
from pytorch_lightning import Trainer
import multiprocessing
from transformers import AutoTokenizer, AutoModel, AutoConfig, AutoModelForTokenClassification
from transformers.models.deberta_v2.tokenization_deberta_v2_fast import DebertaV2TokenizerFast
def seed_everything(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
os.environ['PYTHONHASHSEED'] = str(seed)
target_id_map = {'O': 0, 'B-Lead': 1, 'I-Lead': 2, 'B-Position': 3, 'I-Position': 4, 'B-Claim': 5, 'I-Claim': 6, 'B-Counterclaim': 7, 'I-Counterclaim': 8, 'B-Rebuttal': 9, 'I-Rebuttal': 10, 'B-Evidence': 11, 'I-Evidence': 12, 'B-Concluding Statement': 13, 'I-Concluding Statement': 14, 'PAD': -100}
'\ntarget_id_map2 = {\n "B-Lead": 0,\n "I-Lead": 1,\n "B-Position": 2,\n "I-Position": 3,\n "B-Evidence": 4,\n "I-Evidence": 5,\n "B-Claim": 6,\n "I-Claim": 7,\n "B-Concluding Statement": 8,\n "I-Concluding Statement": 9,\n "B-Counterclaim": 10,\n "I-Counterclaim": 11,\n "B-Rebuttal": 12,\n "I-Rebuttal": 13,\n "O": 14,\n "PAD": -100,\n}\n'
length_threshold = {'Lead': 9, 'Position': 5, 'Claim': 3, 'Counterclaim': 6, 'Rebuttal': 4, 'Evidence': 14, 'Concluding Statement': 11}
probability_threshold = {'Lead': 0.6, 'Position': 0.6, 'Claim': 0.6, 'Counterclaim': 0.6, 'Rebuttal': 0.6, 'Evidence': 0.6, 'Concluding Statement': 0.6}
id_target_map = {v: k for k, v in target_id_map.items()}
seed_everything(2022)
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def process(func):
def worker(func, q):
q.put(func())
out = None
q = multiprocessing.Queue()
p = multiprocessing.Process(target=worker, args=(func, q))
p.start()
out = q.get()
p.join()
return out
DEBUG = False
if DEBUG:
text_dir = '../input/feedback-prize-2021/train'
valid_id = [f.split('/')[-1][:-4] for f in glob.glob(text_dir + '/*.txt')]
valid_id = sorted(valid_id)[0:10000]
num_valid = len(valid_id)
print('len(valid_id)', len(valid_id))
else:
text_dir = '../input/feedback-prize-2021/test'
valid_id = [f.split('/')[-1][:-4] for f in glob.glob(text_dir + '/*.txt')]
valid_id = sorted(valid_id)
num_valid = len(valid_id)
print('len(valid_id)', len(valid_id))
size = [os.path.getsize(text_dir + '/%s.txt' % id) for id in valid_id]
valid_id = [id for id, s in sorted(zip(valid_id, size), key=lambda pair: -pair[1])]
del size
gc.collect()
print('len(valid_id)', len(valid_id)) | code |
33102990/cell_13 | [
"text_plain_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size) | code |
33102990/cell_4 | [
"image_output_1.png"
] | from glob import glob
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
plt.figure()
plt.imshow(img)
plt.show()
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
plt.figure()
plt.imshow(img)
plt.show()
x = img_to_array(img)
print(x.shape)
numberOfClass = len(glob(train_path + '/*')) | code |
33102990/cell_20 | [
"text_plain_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
plt.figure(figsize=(30, 5))
plt.subplot(121)
plt.suptitle('Model:VGG16 Epoch:5 Optimizer:Adam Veri Çoğaltma:Yok')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model doğruluğu')
plt.ylabel('Doğruluk')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplot(122)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model kaybı')
plt.ylabel('Kayıp')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Adam_5_0_vgg')
plt.show() | code |
33102990/cell_6 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary') | code |
33102990/cell_11 | [
"text_plain_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.utils import plot_model
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
from keras.utils import plot_model
plot_model(model) | code |
33102990/cell_19 | [
"image_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size) | code |
33102990/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import sys
import csv
import os
import math
import json, codecs
import numpy as np
import pandas as pd
import cv2 as cv
import matplotlib.pyplot as plt
from zipfile import ZipFile
import shutil
from glob import glob
from PIL import Image
from PIL import ImageFilter
from sklearn.model_selection import train_test_split
import keras
from keras import layers
from keras.models import Model
from keras.layers import Input, Dense
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications import ResNet50
from keras.applications.resnet50 import preprocess_input
import torch
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33102990/cell_8 | [
"image_output_1.png"
] | from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224)) | code |
33102990/cell_16 | [
"image_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size) | code |
33102990/cell_17 | [
"text_plain_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size)
plt.figure(figsize=(30, 5))
plt.subplot(121)
plt.suptitle('Model:VGG16 Epoch:5 Optimizer:SDG Veri Çoğaltma:Var')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model doğruluğu')
plt.ylabel('Doğruluk')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplot(122)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model kaybı')
plt.ylabel('Kayıp')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('SGD_5_1_vgg')
plt.show() | code |
33102990/cell_14 | [
"text_plain_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
plt.figure(figsize=(30, 5))
plt.subplot(121)
plt.suptitle('Model:VGG16 Epoch:5 Optimizer:SDG Veri Çoğaltma:Yok')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model doğruluğu')
plt.ylabel('Doğruluk')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplot(122)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model kaybı')
plt.ylabel('Kayıp')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('SGD_5_0_vgg')
plt.show() | code |
33102990/cell_22 | [
"image_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224, 224), class_mode='binary')
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224, 224), class_mode='binary')
train_datagen = ImageDataGenerator(shear_range=10, zoom_range=0.2, horizontal_flip=True, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path, batch_size=32, class_mode='binary', target_size=(224, 224))
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path, shuffle=False, class_mode='binary', target_size=(224, 224))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
batch_size = 32
history = model.fit_generator(train_data, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_data, validation_steps=800 / batch_size)
model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
history = model.fit_generator(train_generator, steps_per_epoch=1600 / batch_size, epochs=5, validation_data=test_generator, validation_steps=800 / batch_size) | code |
33102990/cell_10 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
from keras import layers
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import matplotlib.pyplot as plt
train_path = '/kaggle/input/signature/signature/Train/'
test_path = '/kaggle/input/signature/signature/Test/'
img = load_img(train_path + 'forged/f138.png')
x = img_to_array(img)
img = load_img(train_path + '/forged/f98.png')
x = img_to_array(img)
numberOfClass = len(glob(train_path + '/*'))
vgg = VGG16()
vgg_layer_list = vgg.layers
model = Sequential()
for i in range(len(vgg_layer_list) - 1):
model.add(vgg_layer_list[i])
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation='softmax'))
print(model.summary()) | code |
32071593/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
train_data_covid = train_data.copy()
test_data_covid = test_data.copy()
test_data_covid = test_data_covid.fillna('NA')
train_data_covid = train_data_covid.fillna('NA')
train_series_cc = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['ConfirmedCases'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_series_fatal = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['Fatalities'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_series_date = train_data_covid.groupby(['Date'])[['ConfirmedCases']].sum().sort_values('ConfirmedCases')
display(train_series_date.head())
train_series_date_fata = train_data_covid.groupby(['Date'])[['Fatalities']].sum().sort_values('Fatalities')
display(train_series_date_fata.head()) | code |
32071593/cell_6 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape) | code |
32071593/cell_11 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
train_data_covid = train_data.copy()
test_data_covid = test_data.copy()
test_data_covid = test_data_covid.fillna('NA')
train_data_covid = train_data_covid.fillna('NA')
train_series_cc = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['ConfirmedCases'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_series_fatal = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['Fatalities'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_large10_cc = pd.DataFrame(train_series_cc).head(10)
display(train_large10_cc.head())
train_large10_fatal = pd.DataFrame(train_series_fatal).head(10)
display(train_large10_fatal.head())
print('Toal number of people infected by Coronavirus in the world from', min(train_data['Date']), 'to', max(train_data['Date']), 'are:', int(sum(train_series_cc)))
print('Toal number of people deceased by cronavirus in the world from', min(train_data['Date']), 'to', max(train_data['Date']), 'are:', int(sum(train_series_fatal))) | code |
32071593/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32071593/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
display(test_data.head())
print('Test data are from', test_data['Date'].min(), 'to', test_data['Date'].max())
print('Number of days', pd.date_range(test_data['Date'].min(), test_data['Date'].max()).shape[0]) | code |
32071593/cell_8 | [
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
print(train_data.isna().any().any(), test_data.isna().any().any())
display(train_data.isna().any())
display(test_data.isna().any()) | code |
32071593/cell_14 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
train_data_covid = train_data.copy()
test_data_covid = test_data.copy()
test_data_covid = test_data_covid.fillna('NA')
train_data_covid = train_data_covid.fillna('NA')
train_series_cc = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['ConfirmedCases'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_series_fatal = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['Fatalities'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_large10_cc = pd.DataFrame(train_series_cc).head(10)
display(train_large10_cc.head())
train_large10_fatal= pd.DataFrame(train_series_fatal).head(10)
display(train_large10_fatal.head())
print("Toal number of people infected by Coronavirus in the world from", min(train_data['Date']), \
"to", max(train_data['Date']), 'are:', \
int(sum(train_series_cc)))
print("Toal number of people deceased by cronavirus in the world from", min(train_data['Date']), \
"to", max(train_data['Date']), 'are:', \
int(sum(train_series_fatal)))
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (24,8))
fig.suptitle('Number of Confirmed Cases and Fatalities in the World', fontsize = 30)
#Left plot
ax1.bar(train_large10_cc.index, train_large10_cc['ConfirmedCases'], color = 'purple')
ax1.set(xlabel = 'Countries',
ylabel = 'Number of ConfirmedCases')
ax1.legend(['ConfirmedCases'])
ax1.grid()
#Right plot
ax2.bar(train_large10_fatal.index, train_large10_fatal['Fatalities'], color = 'orange')
ax2.set(xlabel = 'Countries',
ylabel = 'Number of Fatalities')
ax2.legend(['Fatalities'])
ax2.grid()
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = "20"
plt.show()
train_series_date = train_data_covid.groupby(['Date'])[['ConfirmedCases']].sum().sort_values('ConfirmedCases')
train_series_date_fata = train_data_covid.groupby(['Date'])[['Fatalities']].sum().sort_values('Fatalities')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 8))
fig.suptitle('Trends of Confirmed Cases and Fatalities in the World', fontsize=30)
ax1.plot(train_series_date.index, train_series_date['ConfirmedCases'], color='purple', marker='o', linewidth=2)
ax1.set(xlabel='Date', ylabel='ConfirmedCases')
ax1.set_xticks(np.arange(0, 80, step=12))
ax1.legend(['ConfirmedCases'])
ax1.grid()
ax2.plot(train_series_date_fata.index, train_series_date_fata['Fatalities'], color='orange', marker='o', linewidth=2)
ax2.set(xlabel='Date', ylabel='Fatalities')
ax2.set_xticks(np.arange(0, 80, step=12))
ax2.legend(['Fatalities'])
ax2.grid()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = '16'
plt.show() | code |
32071593/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
(train_data.shape, test_data.shape, submission_file.shape)
train_data_covid = train_data.copy()
test_data_covid = test_data.copy()
test_data_covid = test_data_covid.fillna('NA')
train_data_covid = train_data_covid.fillna('NA')
train_series_cc = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['ConfirmedCases'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_series_fatal = train_data_covid.fillna('NA').groupby(['Country_Region', 'Province_State', 'Date'])['Fatalities'].sum().groupby(['Country_Region', 'Province_State']).max().sort_values().groupby('Country_Region').sum().sort_values(ascending=False)
train_large10_cc = pd.DataFrame(train_series_cc).head(10)
display(train_large10_cc.head())
train_large10_fatal= pd.DataFrame(train_series_fatal).head(10)
display(train_large10_fatal.head())
print("Toal number of people infected by Coronavirus in the world from", min(train_data['Date']), \
"to", max(train_data['Date']), 'are:', \
int(sum(train_series_cc)))
print("Toal number of people deceased by cronavirus in the world from", min(train_data['Date']), \
"to", max(train_data['Date']), 'are:', \
int(sum(train_series_fatal)))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 8))
fig.suptitle('Number of Confirmed Cases and Fatalities in the World', fontsize=30)
ax1.bar(train_large10_cc.index, train_large10_cc['ConfirmedCases'], color='purple')
ax1.set(xlabel='Countries', ylabel='Number of ConfirmedCases')
ax1.legend(['ConfirmedCases'])
ax1.grid()
ax2.bar(train_large10_fatal.index, train_large10_fatal['Fatalities'], color='orange')
ax2.set(xlabel='Countries', ylabel='Number of Fatalities')
ax2.legend(['Fatalities'])
ax2.grid()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = '20'
plt.show() | code |
32071593/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_file = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
display(train_data.head())
display(train_data.describe())
print('Number of countries:', train_data['Country_Region'].nunique())
print('Training data are from', min(train_data['Date']), 'to', max(train_data['Date']))
print('Total number of days: ', train_data['Date'].nunique()) | code |
2036553/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
df_train = pd.read_csv('../input/fashion-mnist_train.csv')
df_test = pd.read_csv('../input/fashion-mnist_test.csv')
train_data = np.array(df_train, dtype='float32')
test_data = np.array(df_test, dtype='float32')
X_train = train_data[:, 1:] / 255
y_train = train_data[:, 0]
X_test = test_data[:, 1:] / 255
y_test = test_data[:, 0]
im_rows = 28
im_cols = 28
batch_size = 512
im_shape = (im_rows, im_cols, 1)
X_train = X_train.reshape(X_train.shape[0], *im_shape)
X_test = X_test.reshape(X_test.shape[0], *im_shape)
X_validate = X_validate.reshape(X_validate.shape[0], *im_shape)
print(X_train.shape)
print(X_test.shape) | code |
2036553/cell_6 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
df_train = pd.read_csv('../input/fashion-mnist_train.csv')
df_test = pd.read_csv('../input/fashion-mnist_test.csv')
train_data = np.array(df_train, dtype='float32')
test_data = np.array(df_test, dtype='float32')
X_train = train_data[:, 1:] / 255
y_train = train_data[:, 0]
X_test = test_data[:, 1:] / 255
y_test = test_data[:, 0]
im_rows = 28
im_cols = 28
batch_size = 512
im_shape = (im_rows, im_cols, 1)
X_train = X_train.reshape(X_train.shape[0], *im_shape)
X_test = X_test.reshape(X_test.shape[0], *im_shape)
X_validate = X_validate.reshape(X_validate.shape[0], *im_shape)
cnn = Sequential([Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=im_shape), MaxPooling2D(pool_size=2), Dropout(0.2), Flatten(), Dense(32, activation='relu'), Dense(10, activation='softmax')])
cnn.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])
cnn.fit(X_train, y_train, batch_size=batch_size, epochs=10, verbose=1, validation_data=(X_validate, y_validate)) | code |
2036553/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
df_train = pd.read_csv('../input/fashion-mnist_train.csv')
df_test = pd.read_csv('../input/fashion-mnist_test.csv')
print(df_train.head()) | code |
2036553/cell_3 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
df_train = pd.read_csv('../input/fashion-mnist_train.csv')
df_test = pd.read_csv('../input/fashion-mnist_test.csv')
train_data = np.array(df_train, dtype='float32')
test_data = np.array(df_test, dtype='float32')
X_train = train_data[:, 1:] / 255
y_train = train_data[:, 0]
X_test = test_data[:, 1:] / 255
y_test = test_data[:, 0]
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
image = X_train[100, :].reshape((28, 28))
plt.imshow(image)
plt.show() | code |
128002832/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr,cmap=cmap, center=0, annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df)
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
df.drop(to_drop, axis=1, inplace=True)
corr = plot_corr(df) | code |
128002832/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr, cmap=cmap, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df) | code |
128002832/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.head(2) | code |
128002832/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df) | code |
128002832/cell_19 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr,cmap=cmap, center=0, annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df)
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
df.drop(to_drop, axis=1, inplace=True)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
model = LinearRegression()
X_train = np.concatenate((X_train, X_test))
y_train = np.concatenate((y_train, y_test))
model.fit(X_train, y_train) | code |
128002832/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
128002832/cell_18 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print('mean_absolute_error : ', mean_absolute_error(y_test, predictions)) | code |
128002832/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
sns.displot(df, x='yield')
plt.title('Distribution of target variable')
plt.show() | code |
128002832/cell_15 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr,cmap=cmap, center=0, annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df)
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
df.drop(to_drop, axis=1, inplace=True)
X = df.drop(columns=['id', 'yield'])
y = df['yield']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
(len(X_train), len(X_test)) | code |
128002832/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr,cmap=cmap, center=0, annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df)
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
df.drop(to_drop, axis=1, inplace=True)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
model = LinearRegression()
X_train = np.concatenate((X_train, X_test))
y_train = np.concatenate((y_train, y_test))
model.fit(X_train, y_train)
df_test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
to_drop.append('id')
df_test.drop(to_drop, axis=1, inplace=True)
df_test = scaler.transform(df_test)
final_predictions = model.predict(df_test)
df_submission = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
df_submission['yield'] = final_predictions
df_submission | code |
128002832/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum()
def plot_features(df):
fig, axs = plt.subplots(figsize=(16, 10), ncols=3, nrows=6)
for count, col in enumerate(df.columns):
sns.boxplot(data=df, x=col, ax=axs[count // 3][count % 3])
plt.tight_layout()
plt.show()
plot_features(df)
def plot_corr(df):
plt.figure(figsize=(16, 10))
corr = df.drop(columns=['id', 'yield']).corr()
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr,cmap=cmap, center=0, annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlation matrix of features')
plt.show()
return corr
corr = plot_corr(df)
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
to_drop = [column for column in upper.columns if any(upper[column] > 0.9)]
df.drop(to_drop, axis=1, inplace=True) | code |
128002832/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
df.isna().sum().sum() | code |
90134529/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from bq_helper import BigQueryHelper
from datetime import datetime
from google.cloud import bigquery
import numpy as np
import pandas as pd
from google.cloud import bigquery
from bq_helper import BigQueryHelper
client = bigquery.Client()
query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n '
bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain')
df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000)
original = df.copy()
from datetime import datetime
df = original.copy()
df = df.sort_values(by=['timestamp'], ascending=True)
ts_col = df['timestamp'].div(1000.0)
df['timestamp'] = ts_col.apply(datetime.fromtimestamp)
print(df.describe())
summary = df.diff().describe()
print(summary)
df.diff().plot(kind='line')
maxidx = df.idxmax()
print(df['timestamp'][maxidx['timestamp']]) | code |
90134529/cell_6 | [
"text_plain_output_1.png"
] | from bq_helper import BigQueryHelper
from datetime import datetime
from datetime import datetime, timedelta
from google.cloud import bigquery
from scipy.stats import norm
import numpy as np
import pandas as pd
from google.cloud import bigquery
from bq_helper import BigQueryHelper
client = bigquery.Client()
query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n '
bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain')
df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000)
original = df.copy()
from datetime import datetime
df = original.copy()
df = df.sort_values(by=['timestamp'], ascending=True)
ts_col = df['timestamp'].div(1000.0)
df['timestamp'] = ts_col.apply(datetime.fromtimestamp)
print(df.describe())
summary = df.diff().describe()
print(summary)
maxidx = df.idxmax()
from scipy.stats import norm
from datetime import datetime, timedelta
import numpy as np
df = df.diff()
df = df.dropna()
print(df.head())
print(df.describe())
print(df.dtypes)
# convert timedelta type to a float (seconds)
print(df['timestamp'][2].total_seconds())
df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds())
float_summary = df.describe()
print(float_summary)
time_threshold = timedelta(hours=2).total_seconds()
print(float_summary["timestamp"][1]) # mean
print(float_summary["timestamp"][2]) # std
df_cdf = norm.cdf(time_threshold, float_summary['timestamp'][1], float_summary['timestamp'][2])
print(1 - df_cdf)
print((1 - df_cdf) * df.shape[0])
print(df.shape[0])
print(len(df[df['timestamp'] > time_threshold]))
print(df.timestamp.quantile(0.99))
print(df.timestamp.quantile(0.1)) | code |
90134529/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from bq_helper import BigQueryHelper
from google.cloud import bigquery
import numpy as np
import pandas as pd
from google.cloud import bigquery
from bq_helper import BigQueryHelper
client = bigquery.Client()
query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n '
bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain')
df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000)
print('Size of dataframe: {} Bytes'.format(int(df.memory_usage(index=True, deep=True).sum())))
df.head(10) | code |
90134529/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from bq_helper import BigQueryHelper
from datetime import datetime
from google.cloud import bigquery
import numpy as np
import pandas as pd
from google.cloud import bigquery
from bq_helper import BigQueryHelper
client = bigquery.Client()
query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n '
bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain')
df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000)
original = df.copy()
from datetime import datetime
df = original.copy()
df = df.sort_values(by=['timestamp'], ascending=True)
ts_col = df['timestamp'].div(1000.0)
df['timestamp'] = ts_col.apply(datetime.fromtimestamp)
print(df.describe())
summary = df.diff().describe()
print(summary) | code |
90134529/cell_5 | [
"text_plain_output_1.png"
] | from bq_helper import BigQueryHelper
from datetime import datetime
from datetime import datetime, timedelta
from google.cloud import bigquery
import numpy as np
import pandas as pd
from google.cloud import bigquery
from bq_helper import BigQueryHelper
client = bigquery.Client()
query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n '
bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain')
df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000)
original = df.copy()
from datetime import datetime
df = original.copy()
df = df.sort_values(by=['timestamp'], ascending=True)
ts_col = df['timestamp'].div(1000.0)
df['timestamp'] = ts_col.apply(datetime.fromtimestamp)
print(df.describe())
summary = df.diff().describe()
print(summary)
maxidx = df.idxmax()
from scipy.stats import norm
from datetime import datetime, timedelta
import numpy as np
df = df.diff()
df = df.dropna()
print(df.head())
print(df.describe())
print(df.dtypes)
print(df['timestamp'][2].total_seconds())
df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds())
float_summary = df.describe()
print(float_summary)
time_threshold = timedelta(hours=2).total_seconds()
print(float_summary['timestamp'][1])
print(float_summary['timestamp'][2]) | code |
2002221/cell_4 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
medals = pd.read_csv('../input/.csv')
print(medals.info())
medals.head() | code |
2002221/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
medals = pd.read_csv('../input/.csv')
medal_counts = medals['NOC'].value_counts()
print('The total medals: %d' % medal_counts.sum())
print('\nTop 15 countries:\n', medal_counts.head(15)) | code |
2019264/cell_21 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
print('Mutiple Linear Regression Accuracy: ', accuracy_lf)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
print('Cross-Predicted(KFold) Mutiple Linear Regression Accuracy: ', accuracy_lf) | code |
2019264/cell_13 | [
"text_plain_output_1.png"
] | import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
y[0:5] | code |
2019264/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns) | code |
2019264/cell_4 | [
"text_plain_output_1.png"
] | dataset.hist(bins=50, figsize=(20, 20))
plt.show() | code |
2019264/cell_30 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=10)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
dt_regressor.fit(X_train, y_train)
y_pred_dt = dt_regressor.predict(X_test)
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=10)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
rf_regressor = RandomForestRegressor(n_estimators=300, random_state=0)
rf_regressor.fit(X_train, y_train)
y_pred_rf = rf_regressor.predict(X_test)
print('Random Forest Regression Accuracy: ', rf_regressor.score(X_test, y_test))
y_pred_rf = cross_val_predict(rf_regressor, X, y, cv=10)
accuracy_rf = metrics.r2_score(y, y_pred_rf)
print('Cross-Predicted(KFold) Random Forest Regression Accuracy: ', accuracy_rf) | code |
2019264/cell_33 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt # plotting library
import missingno as msno # plotting missing data
import numpy as np # linear algebra
import pandas as pd # data processing
import seaborn as sns # plotting library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=10)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
dt_regressor.fit(X_train, y_train)
y_pred_dt = dt_regressor.predict(X_test)
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=10)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
rf_regressor = RandomForestRegressor(n_estimators=300, random_state=0)
rf_regressor.fit(X_train, y_train)
y_pred_rf = rf_regressor.predict(X_test)
y_pred_rf = cross_val_predict(rf_regressor, X, y, cv=10)
accuracy_rf = metrics.r2_score(y, y_pred_rf)
ranking = np.argsort(-rf_regressor.feature_importances_)
f, ax = plt.subplots(figsize=(15, 100))
sns.barplot(x=rf_regressor.feature_importances_[ranking], y=X_train.columns.values[ranking], orient='h')
ax.set_xlabel('feature importance')
plt.tight_layout()
plt.show() | code |
2019264/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum() | code |
2019264/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.head() | code |
2019264/cell_11 | [
"text_html_output_1.png"
] | import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0)) | code |
2019264/cell_7 | [
"image_output_1.png"
] | import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0)) | code |
2019264/cell_24 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=10)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
print('Cross-Predicted(KFold) Polynominal Regression Accuracy: ', accuracy_pl) | code |
2019264/cell_14 | [
"image_output_1.png"
] | import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X.head() | code |
2019264/cell_27 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=10)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
dt_regressor.fit(X_train, y_train)
y_pred_dt = dt_regressor.predict(X_test)
print('Decision Tree Regression Accuracy: ', dt_regressor.score(X_test, y_test))
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=10)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
print('Cross-Predicted(KFold) Decision Tree Regression Accuracy: ', accuracy_dt) | code |
2019264/cell_37 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt # plotting library
import missingno as msno # plotting missing data
import numpy as np # linear algebra
import pandas as pd # data processing
import seaborn as sns # plotting library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.fillna(X.median())
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=10)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
dt_regressor.fit(X_train, y_train)
y_pred_dt = dt_regressor.predict(X_test)
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=10)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
rf_regressor = RandomForestRegressor(n_estimators=300, random_state=0)
rf_regressor.fit(X_train, y_train)
y_pred_rf = rf_regressor.predict(X_test)
y_pred_rf = cross_val_predict(rf_regressor, X, y, cv=10)
accuracy_rf = metrics.r2_score(y, y_pred_rf)
ranking = np.argsort(-rf_regressor.feature_importances_)
f, ax = plt.subplots(figsize=(15, 100))
sns.barplot(x=rf_regressor.feature_importances_[ranking], y=X_train.columns.values[ranking], orient='h')
ax.set_xlabel("feature importance")
plt.tight_layout()
plt.show()
X_train = X_train.iloc[:, ranking[:30]]
X_test = X_test.iloc[:, ranking[:30]]
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred_lr = lin_reg.predict(X_test)
accuracy_lf = metrics.r2_score(y_test, y_pred_lr)
print('Mutiple Linear Regression Accuracy: ', accuracy_lf)
y_pred_kf_lr = cross_val_predict(lin_reg, X, y, cv=10)
accuracy_lf = metrics.r2_score(y, y_pred_kf_lr)
print('Cross-Predicted(KFold) Mutiple Linear Regression Accuracy: ', accuracy_lf) | code |
2019264/cell_12 | [
"image_output_1.png"
] | import missingno as msno # plotting missing data
import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as msno
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
msno.matrix(df=dataset, figsize=(20, 14), color=(0.5, 0, 0))
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X.head() | code |
106208751/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
corr_m = corr_m.drop(['Year_of_Release'], axis=1)
corr_m.head(6)
corr_m = corr_m.drop(corr_m.index[0], axis=0)
corr_m.head(6) | code |
106208751/cell_9 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_clear.info() | code |
106208751/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
data_clear.info() | code |
106208751/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
corr_m = corr_m.drop(['Year_of_Release'], axis=1)
corr_m = corr_m.drop(corr_m.index[0], axis=0)
fig,ax = plt.subplots()
sns.heatmap(corr_m, annot=True, ax = ax)
ax.set_title('World sales correlation Matrix', fontsize=15)
plt.show()
publishers_df = pd.DataFrame()
sales_regions = ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
for region in sales_regions:
sales_pub = data_clear.groupby('Publisher')[region].sum()
publishers_df[region] = sales_pub
publishers = data_clear['Publisher'].unique()
publishers_df['Publisher'] = np.sort(publishers)
fig = plt.figure(figsize=(16,10))
axes=[None]*5
axes[0] = plt.subplot2grid(shape=(3,2), loc=(0,0), colspan=1)
axes[1] = plt.subplot2grid(shape=(3,2), loc=(0,1), colspan=1)
axes[2] = plt.subplot2grid(shape=(3,2), loc=(1,0), colspan=1)
axes[3] = plt.subplot2grid(shape=(3,2), loc=(1,1), colspan=1)
axes[4] = plt.subplot2grid(shape=(3,2), loc=(2,0), colspan=2)
# axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = publishers_df.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Publisher'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games editor', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Publisher Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center', fontsize = 10)
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
for region in sales_regions:
publishers_df[region] = (100 * publishers_df[region] / data_clear[region].sum()).round(1)
publishers_df.head(10) | code |
106208751/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
corr_m.head(6) | code |
106208751/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
publishers_df = pd.DataFrame()
sales_regions = ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
for region in sales_regions:
sales_pub = data_clear.groupby('Publisher')[region].sum()
publishers_df[region] = sales_pub
publishers = data_clear['Publisher'].unique()
publishers_df['Publisher'] = np.sort(publishers)
publishers_df.head() | code |
106208751/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.info()
data_clear.isnull().sum() | code |
106208751/cell_28 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
corr_m = corr_m.drop(['Year_of_Release'], axis=1)
corr_m = corr_m.drop(corr_m.index[0], axis=0)
fig,ax = plt.subplots()
sns.heatmap(corr_m, annot=True, ax = ax)
ax.set_title('World sales correlation Matrix', fontsize=15)
plt.show()
publishers_df = pd.DataFrame()
sales_regions = ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
for region in sales_regions:
sales_pub = data_clear.groupby('Publisher')[region].sum()
publishers_df[region] = sales_pub
publishers = data_clear['Publisher'].unique()
publishers_df['Publisher'] = np.sort(publishers)
fig = plt.figure(figsize=(16, 10))
axes = [None] * 5
axes[0] = plt.subplot2grid(shape=(3, 2), loc=(0, 0), colspan=1)
axes[1] = plt.subplot2grid(shape=(3, 2), loc=(0, 1), colspan=1)
axes[2] = plt.subplot2grid(shape=(3, 2), loc=(1, 0), colspan=1)
axes[3] = plt.subplot2grid(shape=(3, 2), loc=(1, 1), colspan=1)
axes[4] = plt.subplot2grid(shape=(3, 2), loc=(2, 0), colspan=2)
for index, region in enumerate(sales_regions):
data_sales_sort = publishers_df.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10), y=data_sales_sort['Publisher'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games editor', fontsize=14)
axes[index].set_xlabel(f'{region} [million units]', fontsize=14)
axes[index].set_ylabel('Publisher Name', fontsize=14)
axes[index].bar_label(axes[index].containers[0], label_type='center', fontsize=10)
plt.suptitle('10 top sellers per region', fontsize=18)
plt.tight_layout()
plt.show() | code |
106208751/cell_15 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending=True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show() | code |
106208751/cell_3 | [
"image_output_1.png"
] | #Installing the libraries
!pip install pandas
!pip install seaborn
!pip install numpy
!pip install matplotlib | code |
106208751/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales']
fig, axes = plt.subplots(int(len(sales_regions) / 2), int(len(sales_regions) / 2), figsize=(16, 10))
axes = axes.ravel()
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10), y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize=14)
axes[index].set_xlabel(f'{region} [million units]', fontsize=14)
axes[index].set_ylabel('Video game Name', fontsize=14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize=18)
plt.tight_layout()
plt.show() | code |
106208751/cell_14 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
data_sales_sort.head(10) | code |
106208751/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv')
col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating']
data_clear = data.drop(labels=col_remove, axis=1)
data_clear.isnull().sum()
data_clear = data_clear.dropna(axis=0)
data_sales_sort = data_clear.sort_values(by=['Global_Sales'], ascending=False)
#Ten best games
data_sales_sort_hd = data_sales_sort.head(10).sort_values(by=['Global_Sales'], ascending = True)
top10_games_gs = data_sales_sort_hd['Name'].head(10)
top10_sales_gs = data_sales_sort_hd['Global_Sales'].head(10)
fig, ax = plt.subplots()
p1 = ax.barh(top10_games_gs, top10_sales_gs)
ax.set_title('Top 10 global selling video games')
ax.set_xlabel('Sales [million units]')
ax.set_ylabel('Video game names')
ax.bar_label(p1, label_type='center')
plt.show()
sales_regions = ['NA_Sales','EU_Sales','JP_Sales','Other_Sales']
fig,axes = plt.subplots(int(len(sales_regions)/2),int(len(sales_regions)/2), figsize=(16,10))
axes = axes.ravel() #Required for array typing ???
for index, region in enumerate(sales_regions):
data_sales_sort = data_clear.sort_values(by=region, ascending=True)
sns.barplot(x=data_sales_sort[region].tail(10),y=data_sales_sort['Name'].tail(10), ax=axes[index])
axes[index].set_title(f'Top 10 {region} Video games', fontsize = 14)
axes[index].set_xlabel(f'{region} [million units]', fontsize = 14)
axes[index].set_ylabel('Video game Name', fontsize = 14)
axes[index].bar_label(axes[index].containers[0], label_type='center')
plt.suptitle('10 top sellers per region', fontsize = 18)
plt.tight_layout()
plt.show()
corr_m = data_clear.corr()
corr_m = corr_m.drop(['Year_of_Release'], axis=1)
corr_m = corr_m.drop(corr_m.index[0], axis=0)
fig, ax = plt.subplots()
sns.heatmap(corr_m, annot=True, ax=ax)
ax.set_title('World sales correlation Matrix', fontsize=15)
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.