path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73099078/cell_10
[ "text_html_output_1.png" ]
y_train[:5]
code
73099078/cell_5
[ "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1') messages = messages.rename(columns={'v1': 'label', 'v2': 'message'}) y = list(messages['label']) y[:5] y = list(pd.get_dummies(y, drop_first=True)['spam']) y[:5]
code
128039963/cell_42
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_18.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_21.png" ]
from albumentations.pytorch import ToTensorV2 from pycocotools.coco import COCO from torch.utils.data import DataLoader, sampler, random_split, Dataset from torchvision import datasets, models from torchvision.utils import draw_bounding_boxes import albumentations as A # our data augmentation library import copy import cv2 import math import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys import torch import torchvision def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform class AquariumDetection(datasets.VisionDataset): def __init__(self, root, split='train', transform=None, target_transform=None, transforms=None): super().__init__(root, transforms, transform, target_transform) self.split = split self.coco = COCO(os.path.join(root, split, '_annotations.coco.json')) self.ids = list(sorted(self.coco.imgs.keys())) self.ids = [id for id in self.ids if len(self._load_target(id)) > 0] def _load_image(self, id: int): path = self.coco.loadImgs(id)[0]['file_name'] image = cv2.imread(os.path.join(self.root, self.split, path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image def _load_target(self, id): return self.coco.loadAnns(self.coco.getAnnIds(id)) def __getitem__(self, index): id = self.ids[index] image = self._load_image(id) target = self._load_target(id) target = copy.deepcopy(self._load_target(id)) boxes = [t['bbox'] + [t['category_id']] for t in target] if self.transforms is not None: transformed = self.transforms(image=image, bboxes=boxes) image = transformed['image'] boxes = transformed['bboxes'] new_boxes = [] for box in boxes: xmin = box[0] xmax = xmin + box[2] ymin = box[1] ymax = ymin + box[3] new_boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.tensor(new_boxes, dtype=torch.float32) targ = {} targ['boxes'] = boxes targ['labels'] = torch.tensor([t['category_id'] for t in target], dtype=torch.int64) targ['image_id'] = torch.tensor([t['image_id'] for t in target]) targ['area'] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) targ['iscrowd'] = torch.tensor([t['iscrowd'] for t in target], dtype=torch.int64) return (image.div(255), targ) def __len__(self): return len(self.ids) dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' coco = COCO(os.path.join(dataset_path, 'train', '_annotations.coco.json')) categories = coco.cats n_classes = len(categories.keys()) categories classes = [i[1]['name'] for i in categories.items()] classes train_dataset = AquariumDetection(root=dataset_path, transforms=get_transforms(True)) sample = train_dataset[2] img_int = torch.tensor(sample[0] * 255, dtype=torch.uint8) model = models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True) in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = models.detection.faster_rcnn.FastRCNNPredictor(in_features, n_classes) def collate_fn(batch): return tuple(zip(*batch)) train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4, collate_fn=collate_fn) images, targets = next(iter(train_loader)) images = list((image for image in images)) targets = [{k: v for k, v in t.items()} for t in targets] output = model(images, targets) device = torch.device('cuda') model = model.to(device) params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.01, momentum=0.9, nesterov=True, weight_decay=0.0001) def train_one_epoch(model, optimizer, loader, device, epoch): model.to(device) model.train() all_losses = [] all_losses_dict = [] for images, targets in tqdm(loader): images = list((image.to(device) for image in images)) targets = [{k: torch.tensor(v).to(device) for k, v in t.items()} for t in targets] loss_dict = model(images, targets) losses = sum((loss for loss in loss_dict.values())) loss_dict_append = {k: v.item() for k, v in loss_dict.items()} loss_value = losses.item() all_losses.append(loss_value) all_losses_dict.append(loss_dict_append) if not math.isfinite(loss_value): sys.exit(1) optimizer.zero_grad() losses.backward() optimizer.step() all_losses_dict = pd.DataFrame(all_losses_dict) num_epochs = 10 for epoch in range(num_epochs): train_one_epoch(model, optimizer, train_loader, device, epoch)
code
128039963/cell_13
[ "text_plain_output_1.png" ]
# our dataset is in cocoformat, we will need pypcoco tools !pip install pycocotools from pycocotools.coco import COCO
code
128039963/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from albumentations.pytorch import ToTensorV2 from pycocotools.coco import COCO from torchvision import datasets, models from torchvision.utils import draw_bounding_boxes import albumentations as A # our data augmentation library import copy import cv2 import matplotlib.pyplot as plt import os import torch import torchvision def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform class AquariumDetection(datasets.VisionDataset): def __init__(self, root, split='train', transform=None, target_transform=None, transforms=None): super().__init__(root, transforms, transform, target_transform) self.split = split self.coco = COCO(os.path.join(root, split, '_annotations.coco.json')) self.ids = list(sorted(self.coco.imgs.keys())) self.ids = [id for id in self.ids if len(self._load_target(id)) > 0] def _load_image(self, id: int): path = self.coco.loadImgs(id)[0]['file_name'] image = cv2.imread(os.path.join(self.root, self.split, path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image def _load_target(self, id): return self.coco.loadAnns(self.coco.getAnnIds(id)) def __getitem__(self, index): id = self.ids[index] image = self._load_image(id) target = self._load_target(id) target = copy.deepcopy(self._load_target(id)) boxes = [t['bbox'] + [t['category_id']] for t in target] if self.transforms is not None: transformed = self.transforms(image=image, bboxes=boxes) image = transformed['image'] boxes = transformed['bboxes'] new_boxes = [] for box in boxes: xmin = box[0] xmax = xmin + box[2] ymin = box[1] ymax = ymin + box[3] new_boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.tensor(new_boxes, dtype=torch.float32) targ = {} targ['boxes'] = boxes targ['labels'] = torch.tensor([t['category_id'] for t in target], dtype=torch.int64) targ['image_id'] = torch.tensor([t['image_id'] for t in target]) targ['area'] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) targ['iscrowd'] = torch.tensor([t['iscrowd'] for t in target], dtype=torch.int64) return (image.div(255), targ) def __len__(self): return len(self.ids) dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' coco = COCO(os.path.join(dataset_path, 'train', '_annotations.coco.json')) categories = coco.cats n_classes = len(categories.keys()) categories classes = [i[1]['name'] for i in categories.items()] classes train_dataset = AquariumDetection(root=dataset_path, transforms=get_transforms(True)) sample = train_dataset[2] img_int = torch.tensor(sample[0] * 255, dtype=torch.uint8) plt.imshow(draw_bounding_boxes(img_int, sample[1]['boxes'], [classes[i] for i in sample[1]['labels']], width=4).permute(1, 2, 0))
code
128039963/cell_23
[ "text_plain_output_1.png" ]
from albumentations.pytorch import ToTensorV2 import albumentations as A # our data augmentation library def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' train_dataset = AquariumDetection(root=dataset_path, transforms=get_transforms(True))
code
128039963/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from albumentations.pytorch import ToTensorV2 from pycocotools.coco import COCO from torchvision import datasets, models import albumentations as A # our data augmentation library import copy import cv2 import os import torch import torchvision def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform class AquariumDetection(datasets.VisionDataset): def __init__(self, root, split='train', transform=None, target_transform=None, transforms=None): super().__init__(root, transforms, transform, target_transform) self.split = split self.coco = COCO(os.path.join(root, split, '_annotations.coco.json')) self.ids = list(sorted(self.coco.imgs.keys())) self.ids = [id for id in self.ids if len(self._load_target(id)) > 0] def _load_image(self, id: int): path = self.coco.loadImgs(id)[0]['file_name'] image = cv2.imread(os.path.join(self.root, self.split, path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image def _load_target(self, id): return self.coco.loadAnns(self.coco.getAnnIds(id)) def __getitem__(self, index): id = self.ids[index] image = self._load_image(id) target = self._load_target(id) target = copy.deepcopy(self._load_target(id)) boxes = [t['bbox'] + [t['category_id']] for t in target] if self.transforms is not None: transformed = self.transforms(image=image, bboxes=boxes) image = transformed['image'] boxes = transformed['bboxes'] new_boxes = [] for box in boxes: xmin = box[0] xmax = xmin + box[2] ymin = box[1] ymax = ymin + box[3] new_boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.tensor(new_boxes, dtype=torch.float32) targ = {} targ['boxes'] = boxes targ['labels'] = torch.tensor([t['category_id'] for t in target], dtype=torch.int64) targ['image_id'] = torch.tensor([t['image_id'] for t in target]) targ['area'] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) targ['iscrowd'] = torch.tensor([t['iscrowd'] for t in target], dtype=torch.int64) return (image.div(255), targ) def __len__(self): return len(self.ids) dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' coco = COCO(os.path.join(dataset_path, 'train', '_annotations.coco.json')) categories = coco.cats n_classes = len(categories.keys()) categories
code
128039963/cell_26
[ "text_plain_output_1.png" ]
from albumentations.pytorch import ToTensorV2 import albumentations as A # our data augmentation library def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' train_dataset = AquariumDetection(root=dataset_path, transforms=get_transforms(True)) len(train_dataset)
code
128039963/cell_11
[ "text_plain_output_1.png" ]
import torch import torchvision print(torch.__version__) print(torchvision.__version__)
code
128039963/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
from albumentations.pytorch import ToTensorV2 from pycocotools.coco import COCO from torchvision import datasets, models import albumentations as A # our data augmentation library import copy import cv2 import os import torch import torchvision def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform class AquariumDetection(datasets.VisionDataset): def __init__(self, root, split='train', transform=None, target_transform=None, transforms=None): super().__init__(root, transforms, transform, target_transform) self.split = split self.coco = COCO(os.path.join(root, split, '_annotations.coco.json')) self.ids = list(sorted(self.coco.imgs.keys())) self.ids = [id for id in self.ids if len(self._load_target(id)) > 0] def _load_image(self, id: int): path = self.coco.loadImgs(id)[0]['file_name'] image = cv2.imread(os.path.join(self.root, self.split, path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image def _load_target(self, id): return self.coco.loadAnns(self.coco.getAnnIds(id)) def __getitem__(self, index): id = self.ids[index] image = self._load_image(id) target = self._load_target(id) target = copy.deepcopy(self._load_target(id)) boxes = [t['bbox'] + [t['category_id']] for t in target] if self.transforms is not None: transformed = self.transforms(image=image, bboxes=boxes) image = transformed['image'] boxes = transformed['bboxes'] new_boxes = [] for box in boxes: xmin = box[0] xmax = xmin + box[2] ymin = box[1] ymax = ymin + box[3] new_boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.tensor(new_boxes, dtype=torch.float32) targ = {} targ['boxes'] = boxes targ['labels'] = torch.tensor([t['category_id'] for t in target], dtype=torch.int64) targ['image_id'] = torch.tensor([t['image_id'] for t in target]) targ['area'] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) targ['iscrowd'] = torch.tensor([t['iscrowd'] for t in target], dtype=torch.int64) return (image.div(255), targ) def __len__(self): return len(self.ids) dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' coco = COCO(os.path.join(dataset_path, 'train', '_annotations.coco.json')) categories = coco.cats n_classes = len(categories.keys()) categories model = models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True) in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = models.detection.faster_rcnn.FastRCNNPredictor(in_features, n_classes)
code
128039963/cell_46
[ "text_plain_output_1.png" ]
from albumentations.pytorch import ToTensorV2 import albumentations as A # our data augmentation library def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' test_dataset = AquariumDetection(root=dataset_path, split='test', transforms=get_transforms(False))
code
128039963/cell_22
[ "text_plain_output_1.png" ]
from albumentations.pytorch import ToTensorV2 from pycocotools.coco import COCO from torchvision import datasets, models import albumentations as A # our data augmentation library import copy import cv2 import os import torch import torchvision def get_transforms(train=False): if train: transform = A.Compose([A.Resize(600, 600), A.HorizontalFlip(p=0.3), A.VerticalFlip(p=0.3), A.RandomBrightnessContrast(p=0.1), A.ColorJitter(p=0.1), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) else: transform = A.Compose([A.Resize(600, 600), ToTensorV2()], bbox_params=A.BboxParams(format='coco')) return transform class AquariumDetection(datasets.VisionDataset): def __init__(self, root, split='train', transform=None, target_transform=None, transforms=None): super().__init__(root, transforms, transform, target_transform) self.split = split self.coco = COCO(os.path.join(root, split, '_annotations.coco.json')) self.ids = list(sorted(self.coco.imgs.keys())) self.ids = [id for id in self.ids if len(self._load_target(id)) > 0] def _load_image(self, id: int): path = self.coco.loadImgs(id)[0]['file_name'] image = cv2.imread(os.path.join(self.root, self.split, path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image def _load_target(self, id): return self.coco.loadAnns(self.coco.getAnnIds(id)) def __getitem__(self, index): id = self.ids[index] image = self._load_image(id) target = self._load_target(id) target = copy.deepcopy(self._load_target(id)) boxes = [t['bbox'] + [t['category_id']] for t in target] if self.transforms is not None: transformed = self.transforms(image=image, bboxes=boxes) image = transformed['image'] boxes = transformed['bboxes'] new_boxes = [] for box in boxes: xmin = box[0] xmax = xmin + box[2] ymin = box[1] ymax = ymin + box[3] new_boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.tensor(new_boxes, dtype=torch.float32) targ = {} targ['boxes'] = boxes targ['labels'] = torch.tensor([t['category_id'] for t in target], dtype=torch.int64) targ['image_id'] = torch.tensor([t['image_id'] for t in target]) targ['area'] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) targ['iscrowd'] = torch.tensor([t['iscrowd'] for t in target], dtype=torch.int64) return (image.div(255), targ) def __len__(self): return len(self.ids) dataset_path = '/kaggle/input/aquarium-dataset/Aquarium Combined/' coco = COCO(os.path.join(dataset_path, 'train', '_annotations.coco.json')) categories = coco.cats n_classes = len(categories.keys()) categories classes = [i[1]['name'] for i in categories.items()] classes
code
88093804/cell_13
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import matplotlib.pyplot as plt import numpy as np import scanpy as sc import scipy import seaborn as sns import time import time import time adata = sc.datasets.krumsiek11() adata.var.index adata = sc.datasets.pbmc3k_processed() adata.var.index import time for dataset in ['moignard15', 'pbmc3k', 'pbmc3k_processed', 'pbmc68k_reduced', 'paul15', 'krumsiek11']: t0 = time.time() adata = getattr(sc.datasets, dataset)() dict_datasets_info = {'krumsiek11': 'Simulated myeloid progenitors [Krumsiek11].', 'moignard15': 'Hematopoiesis in early mouse embryos [Moignard15].', 'pbmc3k': '3k PBMCs from 10x Genomics', 'pbmc3k_processed': 'Processed 3k PBMCs from 10x Genomics.', 'pbmc68k_reduced': 'Subsampled and processed 68k PBMCs.', 'paul15': 'Development of Myeloid Progenitors [Paul15].'} import time from sklearn.decomposition import PCA import scipy for dataset in ['moignard15', 'pbmc3k', 'pbmc3k_processed', 'pbmc68k_reduced', 'paul15', 'krumsiek11']: print(dataset, dict_datasets_info[dataset]) t0 = time.time() adata = getattr(sc.datasets, dataset)() print(np.round(time.time() - t0, 0), 'Seconds passed for loading') print(adata) print() reducer = PCA(n_components=2) if not scipy.sparse.issparse(adata.X): r = reducer.fit_transform(adata.X) else: r = reducer.fit_transform(adata.X.toarray()) plt.figure(figsize=(20, 8)) if not 'n_counts' in adata.obs.columns: sns.scatterplot(x=r[:, 0], y=r[:, 1]) else: sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=adata.obs['n_counts']) plt.title(dataset + ' ' + str(len(adata)) + 'cells ' + '\n' + dict_datasets_info[dataset], fontsize=20) plt.xlabel('PCA1', fontsize=20) plt.ylabel('PCA2', fontsize=20) plt.show()
code
88093804/cell_6
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import scanpy as sc adata = sc.datasets.krumsiek11() print(adata) adata.var.index
code
88093804/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
!pip install scanpy
code
88093804/cell_7
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_9.png", "text_plain_output_4.png", "image_output_5.png", "application_vnd.jupyter.stderr_output_8.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_6.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "text_plain_output_11.png" ]
!pip install openpyxl # Requires: !pip install openpyxl adata = getattr(sc.datasets, "moignard15")() print( adata ) adata.var.index
code
88093804/cell_8
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import scanpy as sc adata = sc.datasets.krumsiek11() adata.var.index adata = sc.datasets.pbmc3k_processed() print(adata) adata.var.index
code
88093804/cell_15
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.decomposition import PCA import matplotlib.pyplot as plt import numpy as np import scanpy as sc import scipy import seaborn as sns import time import time import time adata = sc.datasets.krumsiek11() adata.var.index adata = sc.datasets.pbmc3k_processed() adata.var.index import time for dataset in ['moignard15', 'pbmc3k', 'pbmc3k_processed', 'pbmc68k_reduced', 'paul15', 'krumsiek11']: t0 = time.time() adata = getattr(sc.datasets, dataset)() dict_datasets_info = {'krumsiek11': 'Simulated myeloid progenitors [Krumsiek11].', 'moignard15': 'Hematopoiesis in early mouse embryos [Moignard15].', 'pbmc3k': '3k PBMCs from 10x Genomics', 'pbmc3k_processed': 'Processed 3k PBMCs from 10x Genomics.', 'pbmc68k_reduced': 'Subsampled and processed 68k PBMCs.', 'paul15': 'Development of Myeloid Progenitors [Paul15].'} import time from sklearn.decomposition import PCA import scipy for dataset in ['moignard15', 'pbmc3k', 'pbmc3k_processed', 'pbmc68k_reduced', 'paul15', 'krumsiek11']: t0 = time.time() adata = getattr(sc.datasets, dataset)() reducer = PCA(n_components=2) if not scipy.sparse.issparse(adata.X): r = reducer.fit_transform(adata.X) else: r = reducer.fit_transform(adata.X.toarray()) for accession in ['E-GEOD-98816', 'E-MTAB-9154']: t0 = time.time() adata = sc.datasets.ebi_expression_atlas(accession) print(np.round(time.time() - t0, 0), 'Seconds passed for loading') print(adata) print() reducer = PCA(n_components=2) if not scipy.sparse.issparse(adata.X): r = reducer.fit_transform(adata.X) else: r = reducer.fit_transform(adata.X.toarray()) plt.figure(figsize=(20, 8)) if not 'n_counts' in adata.obs.columns: sns.scatterplot(x=r[:, 0], y=r[:, 1]) else: sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=adata.obs['n_counts']) plt.title(accession + ' ' + str(len(adata)) + 'cells ', fontsize=20) plt.xlabel('PCA1', fontsize=20) plt.ylabel('PCA2', fontsize=20) plt.show()
code
88093804/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install openpyxl
code
88093804/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import scanpy as sc import time import time adata = sc.datasets.krumsiek11() adata.var.index adata = sc.datasets.pbmc3k_processed() adata.var.index import time for dataset in ['moignard15', 'pbmc3k', 'pbmc3k_processed', 'pbmc68k_reduced', 'paul15', 'krumsiek11']: print(dataset) t0 = time.time() adata = getattr(sc.datasets, dataset)() print(np.round(time.time() - t0, 0), 'Seconds passed for loading') print(adata) print()
code
74044395/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import statsmodels.formula.api as smf mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) data_train = pd.concat([X_train, y_train], axis=1) data_train.columns data_train.rename(columns={'model year': 'model_year'}, inplace=True) import statsmodels.formula.api as smf lm1 = smf.ols(formula='mpg ~ cylinders+displacement+horsepower+weight+acceleration+model_year', data=data_train).fit() lm1.params
code
74044395/cell_13
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train)
code
74044395/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes
code
74044395/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) intercept = regression_model.intercept_[0] regression_model.score(X_train, y_train) regression_model.score(X_test, y_test) mse = np.mean((regression_model.predict(X_test) - y_test) ** 2) regression_model.score(X_test, y_test)
code
74044395/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns
code
74044395/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) mpg_df.describe().transpose()
code
74044395/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns
code
74044395/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) data_train = pd.concat([X_train, y_train], axis=1) data_train.head()
code
74044395/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74044395/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False]
code
74044395/cell_8
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)]
code
74044395/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) intercept = regression_model.intercept_[0] print('The intercept for our model is {}'.format(intercept))
code
74044395/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) intercept = regression_model.intercept_[0] regression_model.score(X_train, y_train)
code
74044395/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) intercept = regression_model.intercept_[0] regression_model.score(X_train, y_train) regression_model.score(X_test, y_test)
code
74044395/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import math import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) intercept = regression_model.intercept_[0] regression_model.score(X_train, y_train) regression_model.score(X_test, y_test) mse = np.mean((regression_model.predict(X_test) - y_test) ** 2) import math math.sqrt(mse)
code
74044395/cell_14
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) for idx, col_name in enumerate(X_train.columns): print('The coefficient for {} is {}'.format(col_name, regression_model.coef_[0][idx]))
code
74044395/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import statsmodels.formula.api as smf mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] mpg_df.columns X = mpg_df.drop('mpg', axis=1) X = X.drop({'origin_1', 'origin_2', 'origin_3'}, axis=1) y = mpg_df[['mpg']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) regression_model = LinearRegression() regression_model.fit(X_train, y_train) data_train = pd.concat([X_train, y_train], axis=1) data_train.columns data_train.rename(columns={'model year': 'model_year'}, inplace=True) import statsmodels.formula.api as smf lm1 = smf.ols(formula='mpg ~ cylinders+displacement+horsepower+weight+acceleration+model_year', data=data_train).fit() lm1.params print(lm1.summary())
code
74044395/cell_10
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns mpg_df = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv') mpg_df.columns mpg_df = mpg_df.drop('car name', axis=1) mpg_df = pd.get_dummies(mpg_df, columns=['origin']) temp = pd.DataFrame(mpg_df.horsepower.str.isdigit()) temp[temp['horsepower'] == False] mpg_df = mpg_df.replace('?', np.nan) mpg_df[mpg_df.isnull().any(axis=1)] mpg_df.median() mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()), axis=0) mpg_df['hp'] = mpg_df['horsepower'].astype('float64') mpg_df.dtypes mpg_df_attr = mpg_df.iloc[:, 0:10] sns.pairplot(mpg_df_attr, diag_kind='kde')
code
129009262/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.head()
code
129009262/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from IPython.display import clear_output !pip3 install -U lazypredict !pip3 install -U pandas #Upgrading pandas clear_output()
code
129009262/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median'
code
128048432/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4],y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City',ylabel='No. of Hotels') plt.show() myplot_price = sns.histplot(df.price) plt.show() df[(df.price > 800) & (df.price < 1400)].shape[0] / df.shape[0] df.price.mean()
code
128048432/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.info()
code
128048432/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4],y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City',ylabel='No. of Hotels') plt.show() myplot_price = sns.histplot(df.price) plt.show() df[(df.price > 800) & (df.price < 1400)].shape[0] / df.shape[0] df.price.mean() df.price.median() grp_Bangalore = df[df.city.str.contains('Bangalore')] sns.histplot(grp_Bangalore.price, kde=True) plt.show()
code
128048432/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.describe()
code
128048432/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4],y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City',ylabel='No. of Hotels') plt.show() myplot_price = sns.histplot(df.price) plt.show() df[(df.price > 800) & (df.price < 1400)].shape[0] / df.shape[0]
code
128048432/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4],y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City',ylabel='No. of Hotels') plt.show() myplot_price = sns.histplot(df.price) plt.show()
code
128048432/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape
code
128048432/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4], y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City', ylabel='No. of Hotels') plt.show()
code
128048432/cell_3
[ "text_plain_output_1.png" ]
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25)
code
128048432/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1.25) df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum() df.dropna(inplace=True) filt_loc = df.location.str.contains(',') F_index = filt_loc.loc[lambda x: x == False].index df.drop(index=F_index, inplace=True) df.reset_index(drop=True, inplace=True) df['city'] = df.location.apply(lambda x: x.rsplit(',', 1)[1]) myplot_city = sns.barplot(x=df.city.value_counts().index[:4],y=df.city.value_counts()[:4]) myplot_city.set(xlabel='City',ylabel='No. of Hotels') plt.show() myplot_price = sns.histplot(df.price) plt.show() df[(df.price > 800) & (df.price < 1400)].shape[0] / df.shape[0] df.price.mean() df.price.median()
code
128048432/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.drop(columns=['Unnamed: 0'], inplace=True) df.columns = df.columns.str.lower() df.shape df.isna().sum()
code
128048432/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df
code
105196046/cell_4
[ "text_plain_output_1.png" ]
a = 4 b = 89 c = 47 a = 4 b = -89 c = 47 if a > 0 and b > 0 and (c > 0): print('positive') else: print('negative')
code
105196046/cell_6
[ "text_plain_output_1.png" ]
a = 4 b = 89 c = 47 a = 4 b = -89 c = 47 a = 4 b = 89 c = 47 if a > 0 or b < 0 or c == 0: print('positive') else: print('negative')
code
105196046/cell_8
[ "text_plain_output_1.png" ]
t = 8 v = 876 not t < v
code
105196046/cell_3
[ "text_plain_output_1.png" ]
a = 4 b = 89 c = 47 if a > 0 and b > 0 and (c > 0): print('positive') else: print('negative')
code
2004795/cell_6
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel x_columns = [i for i in train.columns if i not in list(['id', 'formation_energy_ev_natom', 'bandgap_energy_ev'])] label1 = 'formation_energy_ev_natom' label2 = 'bandgap_energy_ev' X = train[x_columns] y = train[[label1, label2]] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=2017) X_train = X_train.as_matrix() X_valid = X_valid.as_matrix() y_train_values1 = np.log1p(y_train['formation_energy_ev_natom'].values) y_train_values2 = np.log1p(y_train['bandgap_energy_ev'].values) y_valid_values1 = np.log1p(y_valid['formation_energy_ev_natom'].values) y_valid_values2 = np.log1p(y_valid['bandgap_energy_ev'].values) clf1 = KernelRidge(kernel='linear', alpha=1.0) clf2 = KernelRidge(kernel='linear', alpha=1.0) clf1.fit(X_train, y_train_values1) clf2.fit(X_train, y_train_values2) preds1 = clf1.predict(X_valid) preds2 = clf2.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) print('RSME for formation energy:') print(rsme_valid1) print('RSME for band gap:') print(rsme_valid2) print('RSME for total:') print(rsme_total)
code
2004795/cell_8
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel x_columns = [i for i in train.columns if i not in list(['id', 'formation_energy_ev_natom', 'bandgap_energy_ev'])] label1 = 'formation_energy_ev_natom' label2 = 'bandgap_energy_ev' X = train[x_columns] y = train[[label1, label2]] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=2017) X_train = X_train.as_matrix() X_valid = X_valid.as_matrix() y_train_values1 = np.log1p(y_train['formation_energy_ev_natom'].values) y_train_values2 = np.log1p(y_train['bandgap_energy_ev'].values) y_valid_values1 = np.log1p(y_valid['formation_energy_ev_natom'].values) y_valid_values2 = np.log1p(y_valid['bandgap_energy_ev'].values) clf1 = KernelRidge(kernel='linear', alpha=1.0) clf2 = KernelRidge(kernel='linear', alpha=1.0) clf1.fit(X_train, y_train_values1) clf2.fit(X_train, y_train_values2) preds1 = clf1.predict(X_valid) preds2 = clf2.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf3 = KernelRidge(kernel='polynomial', alpha=1.0) clf4 = KernelRidge(kernel='polynomial', alpha=1.0) clf3.fit(X_train, y_train_values1) clf4.fit(X_train, y_train_values2) preds1 = clf3.predict(X_valid) preds2 = clf4.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) print('RSME for formation energy:') print(rsme_valid1) print('RSME for band gap:') print(rsme_valid2) print('RSME for total:') print(rsme_total)
code
2004795/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv')
code
2004795/cell_10
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel x_columns = [i for i in train.columns if i not in list(['id', 'formation_energy_ev_natom', 'bandgap_energy_ev'])] label1 = 'formation_energy_ev_natom' label2 = 'bandgap_energy_ev' X = train[x_columns] y = train[[label1, label2]] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=2017) X_train = X_train.as_matrix() X_valid = X_valid.as_matrix() y_train_values1 = np.log1p(y_train['formation_energy_ev_natom'].values) y_train_values2 = np.log1p(y_train['bandgap_energy_ev'].values) y_valid_values1 = np.log1p(y_valid['formation_energy_ev_natom'].values) y_valid_values2 = np.log1p(y_valid['bandgap_energy_ev'].values) clf1 = KernelRidge(kernel='linear', alpha=1.0) clf2 = KernelRidge(kernel='linear', alpha=1.0) clf1.fit(X_train, y_train_values1) clf2.fit(X_train, y_train_values2) preds1 = clf1.predict(X_valid) preds2 = clf2.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf3 = KernelRidge(kernel='polynomial', alpha=1.0) clf4 = KernelRidge(kernel='polynomial', alpha=1.0) clf3.fit(X_train, y_train_values1) clf4.fit(X_train, y_train_values2) preds1 = clf3.predict(X_valid) preds2 = clf4.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf5 = KernelRidge(kernel='rbf', alpha=1.0) clf6 = KernelRidge(kernel='rbf', alpha=1.0) clf5.fit(X_train, y_train_values1) clf6.fit(X_train, y_train_values2) preds1 = clf5.predict(X_valid) preds2 = clf6.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) print('RSME for formation energy:') print(rsme_valid1) print('RSME for band gap:') print(rsme_valid2) print('RSME for total:') print(rsme_total)
code
2004795/cell_12
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from subprocess import check_output train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel x_columns = [i for i in train.columns if i not in list(['id', 'formation_energy_ev_natom', 'bandgap_energy_ev'])] label1 = 'formation_energy_ev_natom' label2 = 'bandgap_energy_ev' X = train[x_columns] y = train[[label1, label2]] X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=2017) X_train = X_train.as_matrix() X_valid = X_valid.as_matrix() y_train_values1 = np.log1p(y_train['formation_energy_ev_natom'].values) y_train_values2 = np.log1p(y_train['bandgap_energy_ev'].values) y_valid_values1 = np.log1p(y_valid['formation_energy_ev_natom'].values) y_valid_values2 = np.log1p(y_valid['bandgap_energy_ev'].values) clf1 = KernelRidge(kernel='linear', alpha=1.0) clf2 = KernelRidge(kernel='linear', alpha=1.0) clf1.fit(X_train, y_train_values1) clf2.fit(X_train, y_train_values2) preds1 = clf1.predict(X_valid) preds2 = clf2.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf3 = KernelRidge(kernel='polynomial', alpha=1.0) clf4 = KernelRidge(kernel='polynomial', alpha=1.0) clf3.fit(X_train, y_train_values1) clf4.fit(X_train, y_train_values2) preds1 = clf3.predict(X_valid) preds2 = clf4.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf5 = KernelRidge(kernel='rbf', alpha=1.0) clf6 = KernelRidge(kernel='rbf', alpha=1.0) clf5.fit(X_train, y_train_values1) clf6.fit(X_train, y_train_values2) preds1 = clf5.predict(X_valid) preds2 = clf6.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) clf7 = KernelRidge(kernel='laplacian', alpha=1.0) clf8 = KernelRidge(kernel='laplacian', alpha=1.0) clf7.fit(X_train, y_train_values1) clf8.fit(X_train, y_train_values2) preds1 = clf7.predict(X_valid) preds2 = clf8.predict(X_valid) y_pred1 = np.exp(preds1) - 1 y_pred2 = np.exp(preds2) - 1 rsme_valid1 = np.sqrt(mean_squared_error(y_valid_values1, preds1)) rsme_valid2 = np.sqrt(mean_squared_error(y_valid_values2, preds2)) rsme_total = np.sqrt(rsme_valid1 * rsme_valid1 + rsme_valid2 * rsme_valid2) print('RSME for formation energy:') print(rsme_valid1) print('RSME for band gap:') print(rsme_valid2) print('RSME for total:') print(rsme_total)
code
74063412/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import re import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') suv = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/surv.csv') vic = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/vict.csv') suv['survived'] = 1 vic['survived'] = 0 ground_truth = pd.concat([suv, vic]) ground_truth['fsname'] = [re.search('^(.*?)( |$)', item).group(1) for item in ground_truth['given name']] tmp_f = [item.encode('ascii', 'ignore').decode('ascii') for item in ground_truth['family name']] non_ascii = [True if x != y else False for x, y in zip(tmp_f, ground_truth['family name'])] ground_truth['uni_f'] = non_ascii print('Non-ascii family names') pd.value_counts(non_ascii)
code
74063412/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') suv = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/surv.csv') vic = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/vict.csv') print(f'{suv.shape}_surv, {vic.shape}_vic')
code
74063412/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') suv = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/surv.csv') vic = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/vict.csv') suv['survived'] = 1 vic['survived'] = 0 ground_truth = pd.concat([suv, vic]) ground_truth['fsname'] = [re.search('^(.*?)( |$)', item).group(1) for item in ground_truth['given name']] ground_truth.head()
code
74063412/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import re import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') suv = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/surv.csv') vic = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/vict.csv') suv['survived'] = 1 vic['survived'] = 0 ground_truth = pd.concat([suv, vic]) ground_truth['fsname'] = [re.search('^(.*?)( |$)', item).group(1) for item in ground_truth['given name']] tmp_f = [item.encode('ascii', 'ignore').decode('ascii') for item in ground_truth['family name']] non_ascii = [True if x != y else False for x, y in zip(tmp_f, ground_truth['family name'])] ground_truth['uni_f'] = non_ascii pd.value_counts(non_ascii) tmp_fs = [item.encode('ascii', 'ignore').decode('ascii') for item in ground_truth['fsname']] non_ascii_ = [True if x != y else False for x, y in zip(tmp_fs, ground_truth['fsname'])] ground_truth['uni_g'] = non_ascii_ pd.value_counts(non_ascii_) ground_truth.set_index(np.arange(0, ground_truth.shape[0]), inplace=True) for i, item in ground_truth.iterrows(): dash = re.search('-', item['alt name']) if item.uni_f | item.uni_g | bool(dash): ground_truth.at[i, 'family name'] = item['alt name'].split('-')[-1].upper() ground_truth.at[i, 'fsname'] = item['alt name'].split('-')[0].capitalize() train['fname'] = [re.search('^(.*?), ', item).group(1) for item in train.Name] train['prefix'] = [re.search('^.*?, (.*?)\\. ', item).group(1) for item in train.Name] train['gname'] = [re.search('^.*?, .*?\\. (.*)', item).group(1) for item in train.Name] tmp = [re.search('^.*?, .*?\\. ([^ ]*?)( |$)', item).group(1) for item in train.Name] tmp2 = [re.search('\\((.*?)( |\\)|$)', item).group(1) if item.startswith('(') else item for item in tmp] tmp3 = [z.group(1) if y == 'Mrs' and (z := re.search('^.*?\\((.*?)( |\\))', x)) is not None else w for x, y, w in zip(train.gname, train.prefix, tmp2)] train['fsname'] = tmp3 train['fname'] = [item.split('-')[-1] if bool(re.search('-', item)) else item for item in train['fname']] train['fname'] = [item.split(' ')[-1] if bool(re.search(' ', item)) else item for item in train['fname']] train['fname'] = [item.replace("'", '') if bool(re.search("'", item)) else item for item in train['fname']]
code
74063412/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') print(f'{train.shape}_train, {test.shape}_test')
code
74063412/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd import re import numpy as np train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') suv = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/surv.csv') vic = pd.read_csv('/kaggle/input/titanicscraper/src/kaggle/titanic/vict.csv') suv['survived'] = 1 vic['survived'] = 0 ground_truth = pd.concat([suv, vic]) ground_truth['fsname'] = [re.search('^(.*?)( |$)', item).group(1) for item in ground_truth['given name']] tmp_f = [item.encode('ascii', 'ignore').decode('ascii') for item in ground_truth['family name']] non_ascii = [True if x != y else False for x, y in zip(tmp_f, ground_truth['family name'])] ground_truth['uni_f'] = non_ascii pd.value_counts(non_ascii) tmp_fs = [item.encode('ascii', 'ignore').decode('ascii') for item in ground_truth['fsname']] non_ascii_ = [True if x != y else False for x, y in zip(tmp_fs, ground_truth['fsname'])] ground_truth['uni_g'] = non_ascii_ print('Non-ascii first names') pd.value_counts(non_ascii_)
code
1008146/cell_2
[ "text_plain_output_1.png" ]
from collections import defaultdict import csv import re import re import csv import operator from collections import defaultdict stop_words = set(['a', "a's", 'able', 'about', 'above', 'according', 'accordingly', 'across', 'actually', 'after', 'actual', 'afterwards', 'again', 'against', "ain't", 'all', 'allow', 'allows', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'am', 'among', 'amongst', 'an', 'and', 'another', 'any', 'anybody', 'anyhow', 'anyone', 'anything', 'anyway', 'anyways', 'anywhere', 'apart', 'appear', 'appreciate', 'appropriate', 'are', "aren't", 'around', 'as', 'aside', 'ask', 'asking', 'associated', 'at', 'available', 'away', 'awfully', 'b', 'be', 'became', 'because', 'become', 'becomes', 'becoming', 'been', 'before', 'beforehand', 'behind', 'being', 'believe', 'below', 'beside', 'besides', 'best', 'better', 'between', 'beyond', 'both', 'brief', 'but', 'by', 'c', "c'mon", "c's", 'came', 'can', "can't", 'cannot', 'cant', 'cause', 'causes', 'certain', 'certainly', 'changes', 'clearly', 'co', 'com', 'come', 'comes', 'concerning', 'consequently', 'consider', 'considering', 'contain', 'containing', 'contains', 'corresponding', 'could', "couldn't", 'course', 'currently', 'd', 'definitely', 'described', 'despite', 'did', "didn't", 'different', 'do', 'does', "doesn't", 'doing', "don't", 'done', 'down', 'downwards', 'during', 'e', 'each', 'edu', 'eg', 'eight', 'either', 'else', 'elsewhere', 'enough', 'entirely', 'especially', 'et', 'etc', 'even', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'ex', 'exactly', 'example', 'except', 'f', 'far', 'few', 'fifth', 'first', 'five', 'followed', 'following', 'follows', 'for', 'former', 'formerly', 'forth', 'four', 'from', 'further', 'furthermore', 'g', 'get', 'gets', 'getting', 'given', 'gives', 'go', 'goes', 'going', 'gone', 'got', 'gotten', 'greetings', 'h', 'had', "hadn't", 'happens', 'hardly', 'has', "hasn't", 'have', "haven't", 'having', 'he', "he's", 'hello', 'help', 'hence', 'her', 'here', "here's", 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'hi', 'him', 'himself', 'his', 'hither', 'hopefully', 'how', 'howbeit', 'however', 'i', "i'd", "i'll", "i'm", "i've", 'ie', 'if', 'ignored', 'immediate', 'in', 'inasmuch', 'inc', 'indeed', 'indicate', 'indicated', 'indicates', 'inner', 'insofar', 'instead', 'into', 'inward', 'is', "isn't", 'it', "it'd", "it'll", "it's", 'its', 'itself', 'j', 'just', 'k', 'keep', 'keeps', 'kept', 'know', 'knows', 'known', 'l', 'last', 'lately', 'later', 'latter', 'latterly', 'least', 'less', 'lest', 'let', "let's", 'like', 'liked', 'likely', 'little', 'look', 'looking', 'looks', 'ltd', 'm', 'mainly', 'many', 'may', 'maybe', 'me', 'mean', 'meanwhile', 'merely', 'might', 'more', 'moreover', 'most', 'mostly', 'much', 'must', 'my', 'myself', 'n', 'name', 'namely', 'nd', 'near', 'nearly', 'necessary', 'need', 'needs', 'neither', 'never', 'nevertheless', 'new', 'next', 'nine', 'no', 'nobody', 'non', 'none', 'noone', 'nor', 'normally', 'not', 'nothing', 'novel', 'now', 'nowhere', 'o', 'obviously', 'of', 'off', 'often', 'oh', 'ok', 'okay', 'old', 'on', 'once', 'one', 'ones', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'ought', 'our', 'ours', 'ourselves', 'out', 'outside', 'over', 'overall', 'own', 'p', 'particular', 'particularly', 'per', 'perhaps', 'placed', 'please', 'plus', 'possible', 'presumably', 'probably', 'provides', 'q', 'que', 'quite', 'qv', 'r', 'rather', 'rd', 're', 'really', 'reasonably', 'regarding', 'regardless', 'regards', 'relatively', 'respectively', 'right', 's', 'said', 'same', 'saw', 'say', 'saying', 'says', 'second', 'secondly', 'see', 'seeing', 'seem', 'seemed', 'seeming', 'seems', 'seen', 'self', 'selves', 'sensible', 'sent', 'serious', 'seriously', 'seven', 'several', 'shall', 'she', 'should', "shouldn't", 'since', 'six', 'so', 'some', 'somebody', 'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhat', 'somewhere', 'soon', 'sorry', 'specified', 'specify', 'specifying', 'still', 'sub', 'such', 'sup', 'sure', 't', "t's", 'take', 'taken', 'tell', 'tends', 'th', 'than', 'thank', 'thanks', 'thanx', 'that', "that's", 'thats', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'thence', 'there', "there's", 'thereafter', 'thereby', 'therefore', 'therein', 'theres', 'thereupon', 'these', 'they', "they'd", "they'll", "they're", "they've", 'think', 'third', 'this', 'thorough', 'thoroughly', 'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to', 'together', 'too', 'took', 'toward', 'towards', 'tried', 'tries', 'truly', 'try', 'trying', 'twice', 'two', 'u', 'un', 'under', 'unfortunately', 'unless', 'unlikely', 'until', 'unto', 'up', 'upon', 'us', 'use', 'used', 'useful', 'uses', 'using', 'usually', 'uucp', 'v', 'value', 'various', 'very', 'via', 'viz', 'vs', 'w', 'want', 'wants', 'was', "wasn't", 'way', 'we', "we'd", "we'll", "we're", "we've", 'welcome', 'well', 'went', 'were', "weren't", 'what', "what's", 'whatever', 'when', 'whence', 'whenever', 'where', "where's", 'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who', "who's", 'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'willing', 'wish', 'with', 'within', 'without', "won't", 'wonder', 'would', 'would', "wouldn't", 'x', 'y', 'yes', 'yet', 'you', "you'd", "you'll", "you're", "you've", 'your', 'yours', 'yourself', 'yourselves', 'z', 'zero', '']) def f1score(tp, fp, fn): p = tp * 1.0 / (tp + fp) r = tp * 1.0 / (tp + fn) f1 = 2 * p * r / (p + r) return f1 def cleantext(raw_html): cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', raw_html) return cleantext def get_words(text): word_split = re.compile('[^a-zA-Z0-9_\\+\\-/]') return [word.strip().lower() for word in word_split.split(text)] datapath = '../input' in_file = open(datapath + '/test.csv') out_file = open('sub_freq.csv', 'w') reader = csv.DictReader(in_file) writer = csv.writer(out_file) writer.writerow(['id', 'tags']) for ind, row in enumerate(reader): text = cleantext(row['title']) tfrequency_dict = defaultdict(int) word_count = 0.0 for word in get_words(text): if word not in stop_words and word.isalpha(): tfrequency_dict[word] += 1 word_count += 1.0 for word in tfrequency_dict: tf = tfrequency_dict[word] / word_count tfrequency_dict[word] = tf pred_title_tags = sorted(tfrequency_dict, key=tfrequency_dict.get, reverse=True)[:10] text = cleantext(row['content']) dfrequency_dict = defaultdict(int) word_count = 0.0 for word in get_words(text): if word not in stop_words and word.isalpha(): dfrequency_dict[word] += 1 word_count += 1.0 for word in dfrequency_dict: tf = dfrequency_dict[word] / word_count dfrequency_dict[word] = tf pred_content_tags = sorted(dfrequency_dict, key=dfrequency_dict.get, reverse=True)[:10] pred_tags_dict = {} for word in set(pred_title_tags + pred_content_tags): pred_tags_dict[word] = tfrequency_dict.get(word, 0) + dfrequency_dict.get(word, 0) pred_tags = set(sorted(pred_tags_dict, key=pred_tags_dict.get, reverse=True)[:3]) writer.writerow([row['id'], ' '.join(pred_tags)]) if ind % 50000 == 0: print('processed', ind) in_file.close() out_file.close()
code
1008146/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
90130653/cell_23
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') allteams_list = [allteams_2010, allteams_2011, allteams_2012, allteams_2013, allteams_2014, allteams_2015, allteams_2016, allteams_2017, allteams_2018, allteams_2019, allteams_2021] teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] teamstats_2010_2021 = pd.concat(teamstats_list) allteams_2010_2021 = pd.concat(allteams_list) allteams_2010_2021.tail()
code
90130653/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') allteams_list = [allteams_2010, allteams_2011, allteams_2012, allteams_2013, allteams_2014, allteams_2015, allteams_2016, allteams_2017, allteams_2018, allteams_2019, allteams_2021] teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] i = 0 for df in teamstats_list: df.School = df.School.str.rstrip() year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 year_val = 2021 years = [year_val] * df.shape[0] teamstats_2021['YEAR'] = [year_val] * teamstats_2021.shape[0] i = 0 for df in allteams_list: print(df.shape) df[['W', 'L']] = df['W-L'].str.split('-', expand=True) df.drop(['W-L'], inplace=True, axis=1) year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1
code
90130653/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90130653/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] teamstats_2010_2021 = pd.concat(teamstats_list) teamstats_2010_2021.tail()
code
90130653/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') allteams_list = [allteams_2010, allteams_2011, allteams_2012, allteams_2013, allteams_2014, allteams_2015, allteams_2016, allteams_2017, allteams_2018, allteams_2019, allteams_2021] teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] i = 0 for df in teamstats_list: df.School = df.School.str.rstrip() year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 year_val = 2021 years = [year_val] * df.shape[0] teamstats_2021['YEAR'] = [year_val] * teamstats_2021.shape[0] teamstats_2010_2021 = pd.concat(teamstats_list) i = 0 for df in allteams_list: df[['W', 'L']] = df['W-L'].str.split('-', expand=True) df.drop(['W-L'], inplace=True, axis=1) year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 allteams_2010_2021 = pd.concat(allteams_list) schoolname_list1 = teamstats_2010_2021.School.unique() schoolname_list1.sort() schoolname_list2 = allteams_2010_2021.TEAM.unique() schoolname_list2.sort() different_names_list = [] for i in range(len(schoolname_list1)): if schoolname_list1[i] not in schoolname_list2: different_names_list.append(schoolname_list1[i]) print('List of unique differences between school names of dataframes: ', different_names_list)
code
90130653/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] teamstats_2010_2021 = pd.concat(teamstats_list) teamstats_2010_2021.head()
code
90130653/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') allteams_list = [allteams_2010, allteams_2011, allteams_2012, allteams_2013, allteams_2014, allteams_2015, allteams_2016, allteams_2017, allteams_2018, allteams_2019, allteams_2021] teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] i = 0 for df in teamstats_list: df.School = df.School.str.rstrip() year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 year_val = 2021 years = [year_val] * df.shape[0] teamstats_2021['YEAR'] = [year_val] * teamstats_2021.shape[0] teamstats_2010_2021 = pd.concat(teamstats_list) i = 0 for df in allteams_list: df[['W', 'L']] = df['W-L'].str.split('-', expand=True) df.drop(['W-L'], inplace=True, axis=1) year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 allteams_2010_2021 = pd.concat(allteams_list) schoolname_list1 = teamstats_2010_2021.School.unique() schoolname_list1.sort() schoolname_list2 = allteams_2010_2021.TEAM.unique() schoolname_list2.sort() different_names_list = [] for i in range(len(schoolname_list1)): if schoolname_list1[i] not in schoolname_list2: different_names_list.append(schoolname_list1[i]) corrected_name_list = ['UAB', 'Albany', 'Bowling Green', 'BYU', 'CSU Bakersfield', 'CSU Fullerton', 'Long Beach State', 'CSU Northridge', 'Centenary', 'Central Connecticut', 'UCF', 'The Citadel', 'Charleston', 'UConn', "Hawai'i", 'UIC', 'LSU', 'UL Monroe', 'Loyola Chicago', 'UMBC', 'UMass', 'UMass Lowell', 'McNeese', 'Miami', 'Ole Miss', 'UM Kansas City', 'Morgan St', 'UNLV', 'Nicholls', 'Norfolk St', 'NC State', 'UNC Asheville', 'UNC Greensboro', 'UNC Wilmington', 'Prairie View A&M', 'Purdue Fort Wayne', 'St. Francis (PA)', "Saint Mary's", 'San José St', 'Seattle U', 'SE Louisiana', 'USC', 'SMU', 'Southern Miss', 'St. Francis (BKN)', "St. John's", 'Tarleton', 'UT Martin', 'Texas A&M-CC', 'TCU', 'UT Arlington', 'UTEP', 'UT Rio Grande Valley', 'UTSA', 'UC Davis', 'UC Irvine', 'UC Riverside', 'UC San Diego', 'UC Santa Barbara', 'California', 'VCU', 'Winston Salem'] for i in range(len(corrected_name_list)): print(different_names_list[i], 'vs. ', corrected_name_list[i])
code
90130653/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] i = 0 for df in teamstats_list: print(df.shape) df.School = df.School.str.rstrip() year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1
code
90130653/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) allteams_2010 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2009-2010.csv') allteams_2011 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2010-2011.csv') allteams_2012 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2011-2012.csv') allteams_2013 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2012-2013.csv') allteams_2014 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2013-2014.csv') allteams_2015 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2014-2015.csv') allteams_2016 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2015-2016.csv') allteams_2017 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2016-2017.csv') allteams_2018 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2017-2018.csv') allteams_2019 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2018-2019.csv') allteams_2021 = pd.read_csv('../input/ncaa-bpi-ranks-by-year/Ranking csv/ESPN Rank 2020-2021.csv') allteams_list = [allteams_2010, allteams_2011, allteams_2012, allteams_2013, allteams_2014, allteams_2015, allteams_2016, allteams_2017, allteams_2018, allteams_2019, allteams_2021] teamstats_2010 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2010 Team Stats.csv', skiprows=[0]) teamstats_2011 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2011 Team Stats.csv', skiprows=[0]) teamstats_2012 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2012 Team Stats.csv', skiprows=[0]) teamstats_2013 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2013 Team Stats.csv', skiprows=[0]) teamstats_2014 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2014 Team Stats.csv', skiprows=[0]) teamstats_2015 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2015 Team Stats.csv', skiprows=[0]) teamstats_2016 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2016 Team Stats.csv', skiprows=[0]) teamstats_2017 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2017 Team Stats.csv', skiprows=[0]) teamstats_2018 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2018 Team Stats.csv', skiprows=[0]) teamstats_2019 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2019 Team Stats.csv', skiprows=[0]) teamstats_2021 = pd.read_csv('../input/team-stats-csv/Team Stats CSV/2021 Team Stats.csv', skiprows=[0]) teamstats_list = [teamstats_2010, teamstats_2011, teamstats_2012, teamstats_2013, teamstats_2014, teamstats_2015, teamstats_2016, teamstats_2017, teamstats_2018, teamstats_2019, teamstats_2021] i = 0 for df in teamstats_list: df.School = df.School.str.rstrip() year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 year_val = 2021 years = [year_val] * df.shape[0] teamstats_2021['YEAR'] = [year_val] * teamstats_2021.shape[0] teamstats_2010_2021 = pd.concat(teamstats_list) i = 0 for df in allteams_list: df[['W', 'L']] = df['W-L'].str.split('-', expand=True) df.drop(['W-L'], inplace=True, axis=1) year_val = 2010 + i years = [year_val] * df.shape[0] df['YEAR'] = years i += 1 allteams_2010_2021 = pd.concat(allteams_list) schoolname_list1 = teamstats_2010_2021.School.unique() schoolname_list1.sort() schoolname_list2 = allteams_2010_2021.TEAM.unique() schoolname_list2.sort() print(len(schoolname_list2)) print(len(schoolname_list2)) for i in range(len(schoolname_list1)): print(schoolname_list1[i], '-', schoolname_list2[i])
code
89135256/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') transaction.head(5)
code
89135256/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') transaction.loc[transaction['customer_id'] == '00000dbacae5abe5e23885899a1fa44253a17956c6d1c3d25f88aa139fdfc657'].shape
code
89135256/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') transaction.loc[transaction['customer_id'] == '00000dbacae5abe5e23885899a1fa44253a17956c6d1c3d25f88aa139fdfc657'].shape customer_detail_by_trans = pd.merge(transaction, customer, on='customer_id') customer_detail_by_trans.loc[customer_detail_by_trans['customer_id'] == '00000dbacae5abe5e23885899a1fa44253a17956c6d1c3d25f88aa139fdfc657'].shape
code
89135256/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89135256/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') articles.head(5)
code
89135256/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') transaction.loc[transaction['customer_id'] == '00000dbacae5abe5e23885899a1fa44253a17956c6d1c3d25f88aa139fdfc657'].shape customer_detail_by_trans = pd.merge(transaction, customer, on='customer_id') customer_detail_by_trans.head(10)
code
89135256/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) transaction = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customer = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') customer.head(10)
code
1004737/cell_6
[ "text_plain_output_1.png" ]
from csv import DictReader from keras import layers from keras import models import numpy as np # linear algebra import random people = list(DictReader(open('../input/train.csv'))) people def letter_for_cabin(cabin): return cabin[0] if len(cabin) else '' cabin_letters = list(set([letter_for_cabin(p['Cabin']) for p in people])) def onehot(idx, maximum): v = [0] * maximum v[idx] = 1 return v ages = [float(p['Age']) for p in people if p['Age']] avg_age = sum(ages) / len(ages) def vectorize(person): pclass = int(person['Pclass']) salutations = [1 if sal in person['Name'] else 0 for sal in ['Mr.', 'Miss.', 'Mrs.', 'Master.']] embarked = 'CQS'.index(person['Embarked']) age = float(person['Age']) if person['Age'] else avg_age sibs = 0 parents = 0 spouses = 0 children = 0 if age > 18: children = int(person['Parch']) spouses = int(person['SibSp']) else: parents = int(person['Parch']) sibs = int(person['SibSp']) sex = -1 if person['Sex'] == 'male' else 1 fare = float(person['Fare']) cabin = cabin_letters.index(letter_for_cabin(person['Cabin'])) return np.array([pclass, sex, fare, age] + salutations + onehot(embarked, 3) + [sibs, parents, spouses, children] + onehot(cabin, len(cabin_letters))) def make_vecs_and_labels(people): xs = np.stack([vectorize(p) for p in people]) ys = np.stack([int(p['Survived']) for p in people]) return (xs, ys) test_split = 0.8 random.shuffle(people) input_dim = len(vectorize(people[0])) x, y = make_vecs_and_labels(people) def create_model(n_layers=3, layer_size=256, activation='relu', dropout=0): model = models.Sequential() prev_dim = input_dim for i in range(n_layers): model.add(layers.Dense(layer_size, input_dim=prev_dim)) prev_dim = layer_size model.add(layers.Activation(activation)) if dropout > 0: model.add(layers.Dropout(dropout)) model.add(layers.Dense(2)) model.add(layers.Activation('softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model options = {'n_layers': [1, 2, 3, 4], 'layer_size': [128, 256, 512, 1024], 'activation': ['relu', 'softmax'], 'dropout': [0, 0.1]} def all_configs(options): if len(options) == 0: return [[]] else: child_configs = all_configs(options[1:]) configs = [] key = options[0][0] for value in options[0][1]: for child in child_configs: configs.append([(key, value)] + child) return configs configs = all_configs(list(options.items())) random.shuffle(configs) results = [] for config in configs[:20]: model = create_model(**dict(config)) hist = model.fit(x, np.expand_dims(y, -1), validation_split=0.8, verbose=0, nb_epoch=20) print(dict(config)) print('Validation set accuracy:', max(hist.history['val_acc'])) print() results.append((dict(config), max(hist.history['val_acc'])))
code
1004737/cell_2
[ "text_plain_output_1.png" ]
from csv import DictReader people = list(DictReader(open('../input/train.csv'))) people
code
1004737/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import keras from keras import models from keras import layers from csv import DictReader import random from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1004737/cell_7
[ "text_plain_output_1.png" ]
from csv import DictReader from keras import layers from keras import models import numpy as np # linear algebra import random people = list(DictReader(open('../input/train.csv'))) people def letter_for_cabin(cabin): return cabin[0] if len(cabin) else '' cabin_letters = list(set([letter_for_cabin(p['Cabin']) for p in people])) def onehot(idx, maximum): v = [0] * maximum v[idx] = 1 return v ages = [float(p['Age']) for p in people if p['Age']] avg_age = sum(ages) / len(ages) def vectorize(person): pclass = int(person['Pclass']) salutations = [1 if sal in person['Name'] else 0 for sal in ['Mr.', 'Miss.', 'Mrs.', 'Master.']] embarked = 'CQS'.index(person['Embarked']) age = float(person['Age']) if person['Age'] else avg_age sibs = 0 parents = 0 spouses = 0 children = 0 if age > 18: children = int(person['Parch']) spouses = int(person['SibSp']) else: parents = int(person['Parch']) sibs = int(person['SibSp']) sex = -1 if person['Sex'] == 'male' else 1 fare = float(person['Fare']) cabin = cabin_letters.index(letter_for_cabin(person['Cabin'])) return np.array([pclass, sex, fare, age] + salutations + onehot(embarked, 3) + [sibs, parents, spouses, children] + onehot(cabin, len(cabin_letters))) def make_vecs_and_labels(people): xs = np.stack([vectorize(p) for p in people]) ys = np.stack([int(p['Survived']) for p in people]) return (xs, ys) test_split = 0.8 random.shuffle(people) input_dim = len(vectorize(people[0])) x, y = make_vecs_and_labels(people) def create_model(n_layers=3, layer_size=256, activation='relu', dropout=0): model = models.Sequential() prev_dim = input_dim for i in range(n_layers): model.add(layers.Dense(layer_size, input_dim=prev_dim)) prev_dim = layer_size model.add(layers.Activation(activation)) if dropout > 0: model.add(layers.Dropout(dropout)) model.add(layers.Dense(2)) model.add(layers.Activation('softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model options = {'n_layers': [1, 2, 3, 4], 'layer_size': [128, 256, 512, 1024], 'activation': ['relu', 'softmax'], 'dropout': [0, 0.1]} def all_configs(options): if len(options) == 0: return [[]] else: child_configs = all_configs(options[1:]) configs = [] key = options[0][0] for value in options[0][1]: for child in child_configs: configs.append([(key, value)] + child) return configs configs = all_configs(list(options.items())) random.shuffle(configs) results = [] for config in configs[:20]: model = create_model(**dict(config)) hist = model.fit(x, np.expand_dims(y, -1), validation_split=0.8, verbose=0, nb_epoch=20) results.append((dict(config), max(hist.history['val_acc']))) best = max(results, key=lambda x: x[1]) best
code
1004737/cell_3
[ "text_plain_output_1.png" ]
from csv import DictReader people = list(DictReader(open('../input/train.csv'))) people def letter_for_cabin(cabin): return cabin[0] if len(cabin) else '' cabin_letters = list(set([letter_for_cabin(p['Cabin']) for p in people])) print(cabin_letters)
code
130017710/cell_4
[ "text_plain_output_1.png" ]
import os data_dir = '/kaggle/input/audio-mnist/data' paths = [] labels = [] t = 0 for dirname, _, filenames in os.walk(data_dir): if t < 20: t += 1 for filename in filenames: if filename[-4:] == '.wav': paths += [os.path.join(dirname, filename)] labels += [dirname.split('/')[-1]] print(len(paths))
code
130017710/cell_6
[ "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
import cv2 import librosa import matplotlib.pyplot as plt import numpy as np import os data_dir = '/kaggle/input/audio-mnist/data' paths = [] labels = [] t = 0 for dirname, _, filenames in os.walk(data_dir): if t < 20: t += 1 for filename in filenames: if filename[-4:] == '.wav': paths += [os.path.join(dirname, filename)] labels += [dirname.split('/')[-1]] for i in range(len(paths)): if i % 100 == 0: print('i=', i) path = paths[i] label = path.split('/')[-2] file = path.split('/')[-1][0:-4] try: y, sr = librosa.load(path) mel_spectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128) log_mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max) img = log_mel_spectrogram img = cv2.resize(np.array(img), dsize=(128, 128)) X = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) X = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) plt.imshow(X) plt.axis('off') plt.savefig('./mel/' + label + '_' + file + '.png', bbox_inches='tight', pad_inches=0) plt.close except: print('except', label, file) continue
code
17136778/cell_21
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) class CustomImageList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack((img,) * 3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path: PathOrStr, csv_name: str, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': df = pd.read_csv(Path(path) / csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res @classmethod def from_df_custom(cls, path: PathOrStr, df: DataFrame, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1, len(labels) + 1)), 'Label': labels}) submission_df.to_csv(f'submission_orig.csv', index=False) train_df = pd.read_csv(path + '/train.csv') from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(train_df, test_size=0.2) train_df['label'].hist(figsize=(10, 5))
code
17136778/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True)
code
17136778/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) class CustomImageList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack((img,) * 3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path: PathOrStr, csv_name: str, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': df = pd.read_csv(Path(path) / csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res @classmethod def from_df_custom(cls, path: PathOrStr, df: DataFrame, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1, len(labels) + 1)), 'Label': labels}) submission_df.to_csv(f'submission_orig.csv', index=False) train_df = pd.read_csv(path + '/train.csv') from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(train_df, test_size=0.2) proportions = pd.DataFrame({0: [0.5], 1: [0.05], 2: [0.1], 3: [0.03], 4: [0.03], 5: [0.03], 6: [0.03], 7: [0.5], 8: [0.5], 9: [0.5]}) imbalanced_train_df = train_df.groupby('label').apply(lambda x: x.sample(frac=proportions[x.name])) imbalanced_train_df['label'].hist(figsize=(10, 5))
code
17136778/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) class CustomImageList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack((img,) * 3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path: PathOrStr, csv_name: str, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': df = pd.read_csv(Path(path) / csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res @classmethod def from_df_custom(cls, path: PathOrStr, df: DataFrame, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1, len(labels) + 1)), 'Label': labels}) submission_df.to_csv(f'submission_orig.csv', index=False) train_df = pd.read_csv(path + '/train.csv') from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(train_df, test_size=0.2) proportions = pd.DataFrame({0: [0.5], 1: [0.05], 2: [0.1], 3: [0.03], 4: [0.03], 5: [0.03], 6: [0.03], 7: [0.5], 8: [0.5], 9: [0.5]}) imbalanced_train_df = train_df.groupby('label').apply(lambda x: x.sample(frac=proportions[x.name])) df = pd.concat([imbalanced_train_df, val_df]) data = CustomImageList.from_df_custom(df=df, path=path, imgIdx=1).split_by_idx(range(len(imbalanced_train_df) - 1, len(df))).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01)
code
17136778/cell_33
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) class CustomImageList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack((img,) * 3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path: PathOrStr, csv_name: str, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': df = pd.read_csv(Path(path) / csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res @classmethod def from_df_custom(cls, path: PathOrStr, df: DataFrame, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1, len(labels) + 1)), 'Label': labels}) submission_df.to_csv(f'submission_orig.csv', index=False) train_df = pd.read_csv(path + '/train.csv') from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(train_df, test_size=0.2) proportions = pd.DataFrame({0: [0.5], 1: [0.05], 2: [0.1], 3: [0.03], 4: [0.03], 5: [0.03], 6: [0.03], 7: [0.5], 8: [0.5], 9: [0.5]}) imbalanced_train_df = train_df.groupby('label').apply(lambda x: x.sample(frac=proportions[x.name])) df = pd.concat([imbalanced_train_df, val_df]) data = CustomImageList.from_df_custom(df=df, path=path, imgIdx=1).split_by_idx(range(len(imbalanced_train_df) - 1, len(df))).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.recorder.plot(suggestion=True)
code
17136778/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) class CustomImageList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack((img,) * 3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path: PathOrStr, csv_name: str, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': df = pd.read_csv(Path(path) / csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res @classmethod def from_df_custom(cls, path: PathOrStr, df: DataFrame, imgIdx: int=1, header: str='infer', **kwargs) -> 'ItemList': res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values return res test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') learn.lr_find() learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.lr_find() learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1, len(labels) + 1)), 'Label': labels}) submission_df.to_csv(f'submission_orig.csv', index=False) train_df = pd.read_csv(path + '/train.csv') from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(train_df, test_size=0.2) proportions = pd.DataFrame({0: [0.5], 1: [0.05], 2: [0.1], 3: [0.03], 4: [0.03], 5: [0.03], 6: [0.03], 7: [0.5], 8: [0.5], 9: [0.5]}) imbalanced_train_df = train_df.groupby('label').apply(lambda x: x.sample(frac=proportions[x.name])) df = pd.concat([imbalanced_train_df, val_df]) data = CustomImageList.from_df_custom(df=df, path=path, imgIdx=1).split_by_idx(range(len(imbalanced_train_df) - 1, len(df))).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats) data.show_batch(rows=3, figsize=(5, 5))
code
17136778/cell_2
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd from fastai.vision import * from fastai.metrics import * import os path = '../input' print(os.listdir(path))
code