path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
49129249/cell_39 | [
"text_plain_output_1.png"
] | from PIL import Image
from imutils import paths
from torch.utils.data import Dataset, random_split, DataLoader
from torchvision.utils import make_grid
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
#counting number of images under each category
plt.figure(figsize=(10,6))
g=sns.countplot(train_csv['Target'])
g.set_xticklabels(g.get_xticklabels(),rotation=40);
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
transformer = transforms.Compose([transforms.Resize(130), transforms.CenterCrop(129), transforms.ToTensor()])
class heroDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.df = csv_file
self.transform = transform
self.root_dir = root_dir
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.loc[idx]
img_id, img_label = (row['File_name'], row['Target'])
img = Image.open(row['image_path'])
if self.transform:
img = self.transform(img)
return (img, encode_label(img_label))
train_dataset = heroDataset(train_csv, train_dir, transform=transformer)
def show_sample(img, target):
pass
torch.manual_seed(10)
val_pct = 0.11
val_size = int(val_pct * len(train_dataset))
train_size = len(train_dataset) - val_size
batch_size = 50
input_size = 129 * 129
output_size = 12
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=2, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size * 2, num_workers=2, pin_memory=True)
for a, b in val_dl:
break
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(16, 8))
ax.set_xticks([]); ax.set_yticks([])
data = images
ax.imshow(make_grid(data, nrow=15).permute(1, 2, 0))
break
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def F_score(output, label, threshold=0.5, beta=1):
prob = output > threshold
label = label > threshold
TP = (prob & label).sum(1).float()
TN = (~prob & ~label).sum(1).float()
FP = (prob & ~label).sum(1).float()
FN = (~prob & label).sum(1).float()
precision = torch.mean(TP / (TP + FP + 1e-12))
recall = torch.mean(TP / (TP + FN + 1e-12))
F2 = (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-12)
return F2.mean(0)
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images)
loss = F.binary_cross_entropy(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.binary_cross_entropy(out, labels)
acc = F_score(out, labels)
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
pass
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device | code |
49129249/cell_2 | [
"image_output_1.png"
] | !pip install imutils
from imutils import paths | code |
49129249/cell_45 | [
"image_output_1.png"
] | from PIL import Image
from imutils import paths
from torch.utils.data import Dataset, random_split, DataLoader
from torchvision.utils import make_grid
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
#counting number of images under each category
plt.figure(figsize=(10,6))
g=sns.countplot(train_csv['Target'])
g.set_xticklabels(g.get_xticklabels(),rotation=40);
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
transformer = transforms.Compose([transforms.Resize(130), transforms.CenterCrop(129), transforms.ToTensor()])
class heroDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.df = csv_file
self.transform = transform
self.root_dir = root_dir
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.loc[idx]
img_id, img_label = (row['File_name'], row['Target'])
img = Image.open(row['image_path'])
if self.transform:
img = self.transform(img)
return (img, encode_label(img_label))
train_dataset = heroDataset(train_csv, train_dir, transform=transformer)
def show_sample(img, target):
pass
torch.manual_seed(10)
val_pct = 0.11
val_size = int(val_pct * len(train_dataset))
train_size = len(train_dataset) - val_size
batch_size = 50
input_size = 129 * 129
output_size = 12
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=2, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size * 2, num_workers=2, pin_memory=True)
for a, b in val_dl:
break
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(16, 8))
ax.set_xticks([]); ax.set_yticks([])
data = images
ax.imshow(make_grid(data, nrow=15).permute(1, 2, 0))
break
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def F_score(output, label, threshold=0.5, beta=1):
prob = output > threshold
label = label > threshold
TP = (prob & label).sum(1).float()
TN = (~prob & ~label).sum(1).float()
FP = (prob & ~label).sum(1).float()
FN = (~prob & label).sum(1).float()
precision = torch.mean(TP / (TP + FP + 1e-12))
recall = torch.mean(TP / (TP + FN + 1e-12))
F2 = (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-12)
return F2.mean(0)
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images)
loss = F.binary_cross_entropy(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.binary_cross_entropy(out, labels)
acc = F_score(out, labels)
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
pass
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
train_dl = DeviceDataLoader(train_dl, device)
val_dl = DeviceDataLoader(val_dl, device)
model2 = to_device(ConvNet(), device)
for images, labels in train_dl:
print('images.shape:', images.shape)
out = model2(images)
print('out.shape:', out.shape)
print('out[0]:', out[0])
break | code |
49129249/cell_32 | [
"text_plain_output_1.png"
] | from PIL import Image
from imutils import paths
from torch.utils.data import Dataset, random_split, DataLoader
from torchvision.utils import make_grid
from tqdm import tqdm
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
#counting number of images under each category
plt.figure(figsize=(10,6))
g=sns.countplot(train_csv['Target'])
g.set_xticklabels(g.get_xticklabels(),rotation=40);
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
class heroDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.df = csv_file
self.transform = transform
self.root_dir = root_dir
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.loc[idx]
img_id, img_label = (row['File_name'], row['Target'])
img = Image.open(row['image_path'])
if self.transform:
img = self.transform(img)
return (img, encode_label(img_label))
def show_sample(img, target):
pass
batch_size = 50
input_size = 129 * 129
output_size = 12
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=2, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size * 2, num_workers=2, pin_memory=True)
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(16, 8))
ax.set_xticks([]); ax.set_yticks([])
data = images
ax.imshow(make_grid(data, nrow=15).permute(1, 2, 0))
break
show_batch(train_dl) | code |
49129249/cell_8 | [
"text_plain_output_1.png"
] | from imutils import paths
from tqdm import tqdm
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir) | code |
49129249/cell_15 | [
"text_html_output_1.png"
] | label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
encoded_lab = encode_label(4)
decoded_lab = decode_target(encoded_lab)
text = decode_target(encoded_lab, True)
print(encoded_lab, decoded_lab, text, sep='\n')
del (encoded_lab, decoded_lab, text) | code |
49129249/cell_24 | [
"text_plain_output_1.png"
] | from PIL import Image
from imutils import paths
from torch.utils.data import Dataset, random_split, DataLoader
from tqdm import tqdm
import torchvision.transforms as transforms
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
#counting number of images under each category
plt.figure(figsize=(10,6))
g=sns.countplot(train_csv['Target'])
g.set_xticklabels(g.get_xticklabels(),rotation=40);
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
transformer = transforms.Compose([transforms.Resize(130), transforms.CenterCrop(129), transforms.ToTensor()])
class heroDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.df = csv_file
self.transform = transform
self.root_dir = root_dir
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.loc[idx]
img_id, img_label = (row['File_name'], row['Target'])
img = Image.open(row['image_path'])
if self.transform:
img = self.transform(img)
return (img, encode_label(img_label))
train_dataset = heroDataset(train_csv, train_dir, transform=transformer)
def show_sample(img, target):
pass
show_sample(*train_dataset[2000]) | code |
49129249/cell_10 | [
"text_plain_output_1.png"
] | from imutils import paths
from tqdm import tqdm
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
plt.figure(figsize=(10, 6))
g = sns.countplot(train_csv['Target'])
g.set_xticklabels(g.get_xticklabels(), rotation=40) | code |
49129249/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imutils import paths
from torch.utils.data import Dataset, random_split, DataLoader
from tqdm import tqdm
import torchvision.transforms as transforms
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
label_enc = {'Ant-Man': 0, 'Aquaman': 1, 'Avengers': 2, 'Batman': 3, 'Black Panther': 4, 'Captain America': 5, 'Catwoman': 6, 'Ghost Rider': 7, 'Hulk': 8, 'Iron Man': 9, 'Spiderman': 10, 'Superman': 11}
label_deco = {0: 'Ant-Man', 1: 'Aquaman', 2: 'Avengers', 3: 'Batman', 4: 'Black Panther', 5: 'Captain America', 6: 'Catwoman', 7: 'Ghost Rider', 8: 'Hulk', 9: 'Iron Man', 10: 'Spiderman', 11: 'Superman'}
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
def encode_label(label):
target = torch.zeros(12, dtype=torch.float)
target[int(label)] = 1.0
return target
def decode_target(target, text_labels=False, threshold=0.5):
label = None
for i, x in enumerate(target):
if x >= threshold:
label = i
break
if text_labels:
return f'{label_deco[label]}({label})'
return label
transformer = transforms.Compose([transforms.Resize(130), transforms.CenterCrop(129), transforms.ToTensor()])
train_dataset = heroDataset(train_csv, train_dir, transform=transformer)
torch.manual_seed(10)
val_pct = 0.11
val_size = int(val_pct * len(train_dataset))
train_size = len(train_dataset) - val_size
train_ds, val_ds = random_split(train_dataset, [train_size, val_size])
(len(train_ds), len(val_ds)) | code |
49129249/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from imutils import paths
from tqdm import tqdm
data_dir = '../input/super-hero/Q4-superheroes_image_data/'
train_dir = data_dir + 'CAX_Superhero_Train'
test_dir = data_dir + 'CAX_Superhero_Test'
def create_img_df(dir):
img_list = list(paths.list_images(dir))
data = pd.DataFrame(columns=['File_name', 'Target'])
for i, ipaths in tqdm(enumerate(img_list), total=len(img_list)):
data.loc[i, 'image_path'] = ipaths
data.loc[i, 'File_name'] = os.path.basename(ipaths)
data.loc[i, 'Target'] = os.path.split(os.path.dirname(ipaths))[-1]
return data
train_csv = create_img_df(train_dir)
train_csv.head(5) | code |
73082451/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
totalcase = corruption.sort_values('Total cases', ascending=False)
plt.tick_params(axis='x', which='major', labelsize=15, rotation=90)
plt.tick_params(axis='y', which='major', labelsize=15)
chargesheet = corruption.sort_values('Cases Charge-sheeted', ascending=False)
plt.xticks(rotation=90)
pending = corruption.sort_values('Cases Pending Investigation at End of the Year', ascending=False)
plt.xticks(rotation=90)
previous = corruption.sort_values('Cases Pending Investigation from Previous Year', ascending=False)
plt.figure(figsize=(20, 12))
plt.xticks(rotation=90)
sns.pointplot(x='State/UT ', y='Cases Pending Investigation from Previous Year', data=previous, color='red') | code |
73082451/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
totalcase = corruption.sort_values('Total cases', ascending=False)
plt.tick_params(axis='x', which='major', labelsize=15, rotation=90)
plt.tick_params(axis='y', which='major', labelsize=15)
chargesheet = corruption.sort_values('Cases Charge-sheeted', ascending=False)
plt.xticks(rotation=90)
pending = corruption.sort_values('Cases Pending Investigation at End of the Year', ascending=False)
plt.xticks(rotation=90)
previous = corruption.sort_values('Cases Pending Investigation from Previous Year', ascending=False)
plt.xticks(rotation=90)
reported = corruption.sort_values('Cases Reported during the year', ascending=False)
plt.figure(figsize=(20, 12))
plt.xticks(rotation=90)
sns.pointplot(x='State/UT ', y='Cases Reported during the year', data=reported, color='red') | code |
73082451/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns | code |
73082451/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
totalcase = corruption.sort_values('Total cases', ascending=False)
plt.figure(figsize=(20, 14))
sns.pointplot(data=totalcase, x='State/UT ', y='Total cases', color='red')
plt.title('Total Case of Corruption by state')
plt.tick_params(axis='x', which='major', labelsize=15, rotation=90)
plt.tick_params(axis='y', which='major', labelsize=15) | code |
73082451/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
corruption.head() | code |
73082451/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
totalcase = corruption.sort_values('Total cases', ascending=False)
plt.tick_params(axis='x', which='major', labelsize=15, rotation=90)
plt.tick_params(axis='y', which='major', labelsize=15)
chargesheet = corruption.sort_values('Cases Charge-sheeted', ascending=False)
plt.xticks(rotation=90)
pending = corruption.sort_values('Cases Pending Investigation at End of the Year', ascending=False)
plt.figure(figsize=(20, 12))
plt.xticks(rotation=90)
sns.pointplot(x='State/UT ', y='Cases Pending Investigation at End of the Year', data=pending, color='red') | code |
73082451/cell_8 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
sns.pairplot(corruption) | code |
73082451/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.drop(['Category', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13', 'Unnamed: 14'], axis=1, inplace=True)
totalcase = corruption.sort_values('Total cases', ascending=False)
plt.tick_params(axis='x', which='major', labelsize=15, rotation=90)
plt.tick_params(axis='y', which='major', labelsize=15)
chargesheet = corruption.sort_values('Cases Charge-sheeted', ascending=False)
plt.figure(figsize=(20, 12))
plt.xticks(rotation=90)
sns.pointplot(x='State/UT ', y='Cases Charge-sheeted', data=chargesheet, color='red') | code |
73082451/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
corruption = pd.read_csv('../input/crime-in-india-2019/Corruption 2019.csv')
corruption.columns
corruption.info() | code |
16160251/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
points[['rallyid', 'winner']].groupby('winner').count() | code |
16160251/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
rallies.head() | code |
16160251/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df4 = serves.groupby(['server']).count().iloc[:, :1]
df4.columns = ['Serves']
df4 | code |
16160251/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="strokes", hue='serve',data=points, palette="Set1", ax=axes[0])
sns.countplot(x="strokes", hue='winner',data=points ,palette="Set1", ax=axes[1])
sns.distplot(points['totaltime'], color='red') | code |
16160251/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.countplot(x='reason', data=points, ax=axes[0], palette='Set1')
sns.countplot(x='reason', hue='winner', data=points, ax=axes[1], palette='Set1') | code |
16160251/cell_40 | [
"text_html_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="strokes", hue='serve',data=points, palette="Set1", ax=axes[0])
sns.countplot(x="strokes", hue='winner',data=points ,palette="Set1", ax=axes[1])
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.scatterplot(x="totaltime", y="strokes", data=points, ax=axes[0])
sns.scatterplot(x="totaltime", y="strokes", hue="winner" ,data=points, ax=axes[1])
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="type",data=events, palette="Set1", ax=axes[0])
sns.countplot(x="type", hue="hitter", data=events, palette="Set1", ax=axes[1])
f, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.countplot(y='stroke', data=events, palette='Set1', ax=axes[0])
events1 = events.replace({'__undefined__': 'forehand'})
sns.countplot(y='stroke', data=events1, palette='Set1', ax=axes[1]) | code |
16160251/cell_26 | [
"text_html_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
f, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.countplot(x='strokes', hue='serve', data=points, palette='Set1', ax=axes[0])
sns.countplot(x='strokes', hue='winner', data=points, palette='Set1', ax=axes[1]) | code |
16160251/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3 | code |
16160251/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
points.head() | code |
16160251/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="strokes", hue='serve',data=points, palette="Set1", ax=axes[0])
sns.countplot(x="strokes", hue='winner',data=points ,palette="Set1", ax=axes[1])
f, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.scatterplot(x='totaltime', y='strokes', data=points, ax=axes[0])
sns.scatterplot(x='totaltime', y='strokes', hue='winner', data=points, ax=axes[1]) | code |
16160251/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
print('Segundos de juego: ' + str(points.totaltime.sum()))
print('Minutos de juego: ' + str(points.totaltime.sum() / 60))
print('Porcentaje de juego durante el partido (2h 4m): ' + str(points.totaltime.sum() / 60 / 124 * 100) + ' %') | code |
16160251/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
serves.head() | code |
16160251/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df | code |
16160251/cell_38 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="strokes", hue='serve',data=points, palette="Set1", ax=axes[0])
sns.countplot(x="strokes", hue='winner',data=points ,palette="Set1", ax=axes[1])
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.scatterplot(x="totaltime", y="strokes", data=points, ax=axes[0])
sns.scatterplot(x="totaltime", y="strokes", hue="winner" ,data=points, ax=axes[1])
f, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.countplot(x='type', data=events, palette='Set1', ax=axes[0])
sns.countplot(x='type', hue='hitter', data=events, palette='Set1', ax=axes[1]) | code |
16160251/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats
from sklearn import preprocessing
from statistics import mean
import os
print(os.listdir('../input')) | code |
16160251/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2 | code |
16160251/cell_24 | [
"text_html_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
sns.distplot(points['strokes'], color='red')
print('The mean of strokes was: ' + str(mean(points['strokes']))) | code |
16160251/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # visualizations
import pandas as pd # data frames
import seaborn as sns # visualizations
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
df = points.groupby(['winner', 'serve']).count().iloc[:, :1]
df.columns = ['Points Won']
df
df2 = points.groupby(['reason']).count().iloc[:, :1]
df2.columns = ['Points Won']
df2
df3 = points.groupby(['winner', 'reason']).count().iloc[:, :1]
df3.columns = ['Points Won']
df3
f, axes = plt.subplots(1,2, figsize=(15, 5))
sns.countplot(x="reason", data=points, ax=axes[0], palette="Set1")
sns.countplot(x="reason", hue='winner',data=points, ax=axes[1] ,palette="Set1")
sns.catplot(x='reason', hue='winner', col='serve', data=points, kind='count', palette='Set1') | code |
16160251/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
events.head() | code |
16160251/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data frames
points = pd.read_csv('../input/points.csv')
serves = pd.read_csv('../input/serves.csv')
rallies = pd.read_csv('../input/rallies.csv')
events = pd.read_csv('../input/events.csv')
rallies1 = rallies.replace({'__undefined__': 'Out/Net (Not Point)'})
df5 = rallies1.groupby(['server', 'winner']).count().iloc[:, :1]
df5.columns = ['Points Won']
df5 | code |
299160/cell_3 | [
"text_plain_output_1.png"
] | from time import sleep
for i in range(3):
print(i)
sleep(0.1) | code |
122265078/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
def DELETE_EMOJIS(datasetinit):
lendata = len(datasetinit)
e_pattern = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰🤀-🧿🤐-🤿🥀-🥿🦀-🧠]+', flags=re.UNICODE)
for co_ in range(lendata):
tt = datasetinit['text'][co_]
tt = e_pattern.sub('', tt)
datasetinit['text'][co_] == tt
def MULTI_EMOTION_MAPPING(datasetinit):
lendata = len(datasetinit)
for co_ in range(lendata):
val = datasetinit['label'][co_]
if val == 'joy':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['joy']))
elif val == 'sadness':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['sadness']))
elif val == 'anger':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['anger']))
elif val == 'fear':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['fear']))
else:
pass
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'PROCESS OF {x_name}\n')
MULTI_EMOTION_MAPPING(x_data)
x_data.reset_index(drop=True, inplace=True)
print(f'[+++] DONE FOR {x_name}\n\n') | code |
122265078/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'DATA NAME: {x_name}\n\nDATA:\n{x_data.head()}\n\n\n') | code |
122265078/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
def DELETE_EMOJIS(datasetinit):
lendata = len(datasetinit)
e_pattern = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰🤀-🧿🤐-🤿🥀-🥿🦀-🧠]+', flags=re.UNICODE)
for co_ in range(lendata):
tt = datasetinit['text'][co_]
tt = e_pattern.sub('', tt)
datasetinit['text'][co_] == tt
def MULTI_EMOTION_MAPPING(datasetinit):
lendata = len(datasetinit)
for co_ in range(lendata):
val = datasetinit['label'][co_]
if val == 'joy':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['joy']))
elif val == 'sadness':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['sadness']))
elif val == 'anger':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['anger']))
elif val == 'fear':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['fear']))
else:
pass
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
MULTI_EMOTION_MAPPING(x_data)
x_data.reset_index(drop=True, inplace=True)
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'PROCESS OF {x_name}\n')
DELETE_EMOJIS(x_data)
x_data.reset_index(drop=True, inplace=True)
print(f'[+++] DONE FOR {x_name}\n\n') | code |
122265078/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
def DELETE_EMOJIS(datasetinit):
lendata = len(datasetinit)
e_pattern = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰🤀-🧿🤐-🤿🥀-🥿🦀-🧠]+', flags=re.UNICODE)
for co_ in range(lendata):
tt = datasetinit['text'][co_]
tt = e_pattern.sub('', tt)
datasetinit['text'][co_] == tt
def MULTI_EMOTION_MAPPING(datasetinit):
lendata = len(datasetinit)
for co_ in range(lendata):
val = datasetinit['label'][co_]
if val == 'joy':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['joy']))
elif val == 'sadness':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['sadness']))
elif val == 'anger':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['anger']))
elif val == 'fear':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['fear']))
else:
pass
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
MULTI_EMOTION_MAPPING(x_data)
x_data.reset_index(drop=True, inplace=True)
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f"CONTROL FOR MAPPING - {x_name}\n\n{x_data['label'].value_counts()}\n\n\n") | code |
122265078/cell_6 | [
"text_plain_output_1.png"
] | import nltk
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english')
print(f'TOTAL LENGHT OF STOP WORDS IN ENGLISH: {len(stop_words_english)}') | code |
122265078/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(xtrain, ytrain)
ypred = clf.predict(xtest)
print(f'ACCURACY SCORE: {accuracy_score(ytest, ypred)}') | code |
122265078/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_train_data = pd.concat([train_pd, validation_pd, test_pd], ignore_index=True)
all_train_data | code |
122265078/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
train_pd.head() | code |
122265078/cell_41 | [
"text_plain_output_1.png"
] | from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(xtrain, ytrain)
ypred = clf.predict(xtest)
print(f'MODEL CLASSES:\n\n{clf.classes_}') | code |
122265078/cell_52 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import nltk
import pandas as pd
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english')
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_train_data = pd.concat([train_pd, validation_pd, test_pd], ignore_index=True)
vectorizer_train = CountVectorizer(stop_words=stop_words_english)
xall = vectorizer_train.fit_transform(all_train_data['text'])
yall = all_train_data['label']
voc = vectorizer_train.get_feature_names_out()
clf = MultinomialNB()
clf.fit(xtrain, ytrain)
ypred = clf.predict(xtest)
test_sentence = "I don't know what's going on in this world, but all you know is that connection will bring freedom. Although we live in an unknown world, the existence of a woman who burns heaven is the cause of all beauty."
words_of_sentence = nltk.word_tokenize(test_sentence)
words_of_sentence = [w_ for w_ in words_of_sentence if w_ not in stop_words_english]
transform_test = vectorizer_train.transform(words_of_sentence)
pred_test = clf.predict(transform_test)
print(f'PREDICTION:\n\n{pred_test[0]}') | code |
122265078/cell_49 | [
"image_output_1.png"
] | import nltk
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english')
test_sentence = "I don't know what's going on in this world, but all you know is that connection will bring freedom. Although we live in an unknown world, the existence of a woman who burns heaven is the cause of all beauty."
words_of_sentence = nltk.word_tokenize(test_sentence)
words_of_sentence = [w_ for w_ in words_of_sentence if w_ not in stop_words_english]
print(f'WORDS WITHOUT STOP WORDS:\n\n{words_of_sentence}') | code |
122265078/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'COLUMNS FOR {x_name}:\n{x_data.columns}\n\n') | code |
122265078/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f"VALUE COUNTS FOR {x_name}:\n{x_data['label'].value_counts()}\n\n") | code |
122265078/cell_38 | [
"text_html_output_1.png"
] | from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(xtrain, ytrain) | code |
122265078/cell_47 | [
"text_plain_output_1.png"
] | import nltk
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english')
test_sentence = "I don't know what's going on in this world, but all you know is that connection will bring freedom. Although we live in an unknown world, the existence of a woman who burns heaven is the cause of all beauty."
words_of_sentence = nltk.word_tokenize(test_sentence)
print(f'WORDS:\n\n{words_of_sentence}') | code |
122265078/cell_35 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import nltk
import pandas as pd
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english')
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_train_data = pd.concat([train_pd, validation_pd, test_pd], ignore_index=True)
vectorizer_train = CountVectorizer(stop_words=stop_words_english)
xall = vectorizer_train.fit_transform(all_train_data['text'])
yall = all_train_data['label']
voc = vectorizer_train.get_feature_names_out()
print(f'TARGET VOCABULARY:\n\n{voc}') | code |
122265078/cell_43 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import seaborn as sns
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
clf = MultinomialNB()
clf.fit(xtrain, ytrain)
ypred = clf.predict(xtest)
conf_matrix = confusion_matrix(ytest, ypred)
plt.style.use('dark_background')
plt.figure(figsize=(15, 8))
sns.heatmap(conf_matrix, annot=True, cmap='hot', xticklabels=emotions_categories.values(), yticklabels=emotions_categories.values())
plt.xlabel('PREDICTION')
plt.ylabel('ACTUAL')
plt.show() | code |
122265078/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
def DELETE_EMOJIS(datasetinit):
lendata = len(datasetinit)
e_pattern = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰🤀-🧿🤐-🤿🥀-🥿🦀-🧠]+', flags=re.UNICODE)
for co_ in range(lendata):
tt = datasetinit['text'][co_]
tt = e_pattern.sub('', tt)
datasetinit['text'][co_] == tt
def MULTI_EMOTION_MAPPING(datasetinit):
lendata = len(datasetinit)
for co_ in range(lendata):
val = datasetinit['label'][co_]
if val == 'joy':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['joy']))
elif val == 'sadness':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['sadness']))
elif val == 'anger':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['anger']))
elif val == 'fear':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['fear']))
else:
pass
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
MULTI_EMOTION_MAPPING(x_data)
x_data.reset_index(drop=True, inplace=True)
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'NULL CONTROL FOR {x_name}:\n{x_data.isnull().sum()}\n\n') | code |
122265078/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'NULL CONTROL FOR {x_name}:\n{x_data.isnull().sum()}\n\n') | code |
122265078/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import re
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
all_data_list = [train_pd, validation_pd, test_pd]
emotions_categories = {'joy': ['joy', 'happy', 'laugh', 'excited', 'surprise'], 'sadness': ['sad', 'disappointed', 'regret', 'depressed', 'lonely'], 'anger': ['angry', 'frustrated', 'annoyed', 'irritated', 'mad'], 'fear': ['afraid', 'scared', 'fear', 'terrified', 'nervous']}
def DELETE_EMOJIS(datasetinit):
lendata = len(datasetinit)
e_pattern = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰🤀-🧿🤐-🤿🥀-🥿🦀-🧠]+', flags=re.UNICODE)
for co_ in range(lendata):
tt = datasetinit['text'][co_]
tt = e_pattern.sub('', tt)
datasetinit['text'][co_] == tt
def MULTI_EMOTION_MAPPING(datasetinit):
lendata = len(datasetinit)
for co_ in range(lendata):
val = datasetinit['label'][co_]
if val == 'joy':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['joy']))
elif val == 'sadness':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['sadness']))
elif val == 'anger':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['anger']))
elif val == 'fear':
datasetinit['label'][co_] = ','.join((str(xc) for xc in emotions_categories['fear']))
else:
pass
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
MULTI_EMOTION_MAPPING(x_data)
x_data.reset_index(drop=True, inplace=True)
for x_data, x_name in zip(all_data_list, ['TRAIN', 'VALIDATION', 'TEST']):
print(f'CONTROL FOR - {x_name}\n\n{x_data.head()}\n\n\n') | code |
122265078/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-train.csv'
validation_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-val.csv'
test_path = '/kaggle/input/emotion-classification-nlp/emotion-labels-test.csv'
train_pd = pd.read_csv(train_path)
validation_pd = pd.read_csv(validation_path)
test_pd = pd.read_csv(test_path)
train_pd.tail() | code |
122265078/cell_5 | [
"text_plain_output_1.png"
] | import nltk
nltk.download('stopwords')
stop_words_english = nltk.corpus.stopwords.words('english') | code |
16148222/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
X_train = []
y_train = []
for i in range(tp, train.shape[0]):
X_train.append(train[i - tp:i, 0])
y_train.append(train[i, 0])
X_train, y_train = (np.array(X_train), np.array(y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
lstm_model = Sequential()
lstm_model.add(LSTM(12, input_shape=(X_train.shape[1], 1), activation='relu', kernel_initializer='lecun_uniform', return_sequences=True))
lstm_model.add(LSTM(12, activation='relu', kernel_initializer='lecun_uniform'))
lstm_model.add(Dense(1))
lstm_model.compile(optimizer='adam', loss='mean_squared_error')
lstm_model.fit(X_train, y_train, epochs=50, batch_size=4)
X_test = []
y_test = []
for i in range(tp, test.shape[0]):
X_test.append(test[i - tp:i, 0])
y_test.append(test[i, 0])
X_test, y_test = (np.array(X_test), np.array(y_test))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
plt.figure(figsize=(15, 10))
predicted = lstm_model.predict(X_train)
predicted = scaler.inverse_transform(predicted)
plt.plot(scaler.inverse_transform(train[-X_train.shape[0] - 1:]), color='red', label='Open Price')
plt.plot(predicted, color='green', label='Predicted Open Price')
plt.title('Apple Stock Market Open Price vs Time')
plt.xlabel('Time')
plt.ylabel('Open Price')
plt.legend()
plt.show() | code |
16148222/cell_13 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
X_train = []
y_train = []
for i in range(tp, train.shape[0]):
X_train.append(train[i - tp:i, 0])
y_train.append(train[i, 0])
X_train, y_train = (np.array(X_train), np.array(y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape | code |
16148222/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/AAPL.csv')
data.head() | code |
16148222/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import Sequential
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
X_train = []
y_train = []
for i in range(tp, train.shape[0]):
X_train.append(train[i - tp:i, 0])
y_train.append(train[i, 0])
X_train, y_train = (np.array(X_train), np.array(y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
lstm_model = Sequential()
lstm_model.add(LSTM(12, input_shape=(X_train.shape[1], 1), activation='relu', kernel_initializer='lecun_uniform', return_sequences=True))
lstm_model.add(LSTM(12, activation='relu', kernel_initializer='lecun_uniform'))
lstm_model.add(Dense(1))
X_test = []
y_test = []
for i in range(tp, test.shape[0]):
X_test.append(test[i - tp:i, 0])
y_test.append(test[i, 0])
X_test, y_test = (np.array(X_test), np.array(y_test))
print(X_test.shape)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_train.shape) | code |
16148222/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
data.head() | code |
16148222/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import Sequential
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
X_train = []
y_train = []
for i in range(tp, train.shape[0]):
X_train.append(train[i - tp:i, 0])
y_train.append(train[i, 0])
X_train, y_train = (np.array(X_train), np.array(y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
lstm_model = Sequential()
lstm_model.add(LSTM(12, input_shape=(X_train.shape[1], 1), activation='relu', kernel_initializer='lecun_uniform', return_sequences=True))
lstm_model.add(LSTM(12, activation='relu', kernel_initializer='lecun_uniform'))
lstm_model.add(Dense(1))
lstm_model.compile(optimizer='adam', loss='mean_squared_error')
lstm_model.fit(X_train, y_train, epochs=50, batch_size=4) | code |
16148222/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape | code |
16148222/cell_10 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
print(train.shape, '\n', test.shape) | code |
16148222/cell_12 | [
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1)
data = pd.read_csv('../input/AAPL.csv')
data['Date'] = pd.to_datetime(data['Date'])
X = np.array(data['Open'])
X = X.reshape(X.shape[0], 1)
X.shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
tp = 20
train = X[:1150]
test = X[1150 - tp:]
X_train = []
y_train = []
for i in range(tp, train.shape[0]):
X_train.append(train[i - tp:i, 0])
y_train.append(train[i, 0])
X_train, y_train = (np.array(X_train), np.array(y_train))
print(X_train.shape, '\n', y_train.shape) | code |
16148222/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/AAPL.csv')
plt.figure(figsize=(15, 10))
plt.plot(data['Open'], color='blue', label='Apple Open Stock Price')
plt.title('Apple Stock Market Open Price vs Time')
plt.xlabel('Date')
plt.ylabel('Apple Stock Price')
plt.legend()
plt.show() | code |
122263833/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
new = df[['Product line', 'Total']].groupby(['Product line'], as_index=False).sum().sort_values(by='Total', ascending=False)
new_grossIncome = df[['Product line', 'gross income']].groupby(['Product line'], as_index=False).sum().sort_values(by='gross income', ascending=False)
plt.figure(figsize=(15, 5))
sns.barplot(data=new_grossIncome, x='Product line', y='gross income')
plt.show() | code |
122263833/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
for i in range(len(df.Branch.value_counts())):
print(df.Branch.value_counts().index.tolist()[i], ':', df.Branch.value_counts()[i] / len(df.Branch) * 100, '%') | code |
122263833/cell_9 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique() | code |
122263833/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
new = df[['Product line', 'Total']].groupby(['Product line'], as_index=False).sum().sort_values(by='Total', ascending=False)
new_grossIncome = df[['Product line', 'gross income']].groupby(['Product line'], as_index=False).sum().sort_values(by='gross income', ascending=False)
sns.kdeplot(df['Rating'], fill=True)
plt.show() | code |
122263833/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.head() | code |
122263833/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
new = df[['Product line', 'Total']].groupby(['Product line'], as_index=False).sum().sort_values(by='Total', ascending=False)
plt.figure(figsize=(15, 5))
sns.barplot(data=new, x='Product line', y='Total')
plt.show() | code |
122263833/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues') | code |
122263833/cell_26 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
new = df[['Product line', 'Total']].groupby(['Product line'], as_index=False).sum().sort_values(by='Total', ascending=False)
new_grossIncome = df[['Product line', 'gross income']].groupby(['Product line'], as_index=False).sum().sort_values(by='gross income', ascending=False)
sns.distplot(df['Rating'])
plt.show() | code |
122263833/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
sns.boxplot(x='Branch', y='Total', data=df)
plt.show() | code |
122263833/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122263833/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df) | code |
122263833/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
plt.figure(figsize=(15, 5))
sns.countplot(data=df, x='Product line', order=df['Product line'].value_counts().index)
plt.show() | code |
122263833/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
sns.boxplot(x='Quantity', y='Product line', data=df)
plt.show() | code |
122263833/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.describe().style.background_gradient(cmap='Blues')
df.isna().mean() / len(df)
df.nunique()
plt.figure(figsize=(10, 5))
sns.countplot(x='Quantity', hue='Branch', data=df)
plt.show() | code |
122263833/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/supermarket-sales/supermarket_sales - Sheet1.csv')
df.info() | code |
1007503/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='white', color_codes=True)
iris = pd.read_csv('../input/Iris.csv')
sns.jointplot(x='SepalLengthCm', y='SepalWidthCm', data=iris, size=5) | code |
1007503/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='white', color_codes=True)
iris = pd.read_csv('../input/Iris.csv')
iris['Species'].value_counts() | code |
1007503/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='white', color_codes=True)
iris = pd.read_csv('../input/Iris.csv')
iris.head() | code |
1007503/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='white', color_codes=True)
iris = pd.read_csv('../input/Iris.csv')
iris.plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm') | code |
1007503/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | s | code |
130003964/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.columns
years_exp = df.YearsExperience.values
years_exp
salary = df.Salary.values
salary
x = years_exp
y = salary
plt.plot
x = x.reshape(-1, 1)
x
plt.scatter(x, y, color='blue')
plt.xlabel('YearsExperience')
plt.ylabel('Salary')
plt.plot | code |
130003964/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.columns
years_exp = df.YearsExperience.values
years_exp
salary = df.Salary.values
salary
x = years_exp
y = salary
x = x.reshape(-1, 1)
x | code |
130003964/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.columns
df.describe() | code |
130003964/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.columns
years_exp = df.YearsExperience.values
years_exp
salary = df.Salary.values
salary | code |
130003964/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.head(5) | code |
130003964/cell_11 | [
"text_html_output_1.png"
] | (x_train, len(x_train)) | code |
130003964/cell_19 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict([[1.2], [3.3]])
y_predict
lr.score(x_test, y_test) * 100
y_predict = lr.predict(x_test)
y_predict
print(metrics.mean_absolute_error(y_test, y_predict))
print(metrics.mean_squared_error(y_test, y_predict))
print(np.sqrt(metrics.mean_squared_error(y_test, y_predict))) | code |
130003964/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.