path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18157731/cell_20
[ "text_html_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) lr = 1e-05 learn.fit_one_cycle(5, max_lr=lr) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) doc(interp.plot_top_losses) interp.plot_confusion_matrix(figsize=(8, 8))
code
18157731/cell_11
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate)
code
18157731/cell_19
[ "image_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) lr = 1e-05 learn.fit_one_cycle(5, max_lr=lr) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) interp.plot_top_losses(9, figsize=(15, 11))
code
18157731/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18157731/cell_18
[ "image_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) lr = 1e-05 learn.fit_one_cycle(5, max_lr=lr) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs)
code
18157731/cell_8
[ "image_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes
code
18157731/cell_15
[ "text_html_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) learn.recorder.plot()
code
18157731/cell_16
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) learn.recorder.plot_losses()
code
18157731/cell_17
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) lr = 1e-05 learn.fit_one_cycle(5, max_lr=lr)
code
18157731/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001)
code
18157731/cell_10
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds))
code
18157731/cell_12
[ "image_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50)
code
18157731/cell_5
[ "image_output_1.png" ]
path = Path('../input/dataset') path.ls()
code
74056828/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/california-housing-prices/housing.csv') df1 = pd.DataFrame() for location in df['ocean_proximity'].unique(): df1 = df1.append(df.loc[df['ocean_proximity'] == location][:4]) df1 = df1.reset_index() df = df1.drop(columns='index') df for i, c in zip(df.index, df.columns[:-1]): df.at[i, c] = df.at[i + len(df.columns) - 1, c] = np.nan df
code
74056828/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/california-housing-prices/housing.csv') df1 = pd.DataFrame() for location in df['ocean_proximity'].unique(): df1 = df1.append(df.loc[df['ocean_proximity'] == location][:4]) df1 = df1.reset_index() df = df1.drop(columns='index') df for i, c in zip(df.index, df.columns[:-1]): df.at[i, c] = df.at[i + len(df.columns) - 1, c] = np.nan df class Imputer_p: def __init__(self, strategy_='median', f=1): """ Imputer_p (Pandas): strategy_:{"median", tbd} default=’median’ Specifies the strategy to calucate the statistics for replacing NaNs f: int, default=1 Factor to multiply the first column """ self.strategy_ = strategy_ self.factor = f def fit(self, df): if self.strategy_ == 'median': self.statistics_ = df.median() def transform(self, df_): df = df_.copy() if self.strategy_ == 'median': for column_ in df.columns: df.loc[df[column_].isna(), column_] = self.statistics_[column_] elif self.strategy_ == 'mult': df[df.columns[0]] *= self.factor return df imput_p = Imputer_p() imput_p.fit(df) p1 = imput_p.transform(df[df.columns[:-1]]) p1 class Group_Imputer_p(Imputer_p): def __init__(self, strategy_='median', f=None): super().__init__(strategy_='median', f=None) def fit(self, df_, y=-1): """ y index of column with depended variabls default -1 (last one)""" df = df_[df_.columns[:y]].copy() self.categories = df_[df_.columns[y]].copy() self.stack = [] for category in self.categories.unique(): self.stack.append(df.loc[self.categories == category]) def transform(self): df = pd.DataFrame() for astack in self.stack: super().fit(astack) df = df.append(super().transform(astack)) return df group_imput_p = Group_Imputer_p(strategy_='median') group_imput_p.fit(df) p2 = group_imput_p.transform() p2 - p1
code
74056828/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/california-housing-prices/housing.csv') df1 = pd.DataFrame() for location in df['ocean_proximity'].unique(): df1 = df1.append(df.loc[df['ocean_proximity'] == location][:4]) df1 = df1.reset_index() df = df1.drop(columns='index') df for i, c in zip(df.index, df.columns[:-1]): df.at[i, c] = df.at[i + len(df.columns) - 1, c] = np.nan df class Imputer_p: def __init__(self, strategy_='median', f=1): """ Imputer_p (Pandas): strategy_:{"median", tbd} default=’median’ Specifies the strategy to calucate the statistics for replacing NaNs f: int, default=1 Factor to multiply the first column """ self.strategy_ = strategy_ self.factor = f def fit(self, df): if self.strategy_ == 'median': self.statistics_ = df.median() def transform(self, df_): df = df_.copy() if self.strategy_ == 'median': for column_ in df.columns: df.loc[df[column_].isna(), column_] = self.statistics_[column_] elif self.strategy_ == 'mult': df[df.columns[0]] *= self.factor return df imput_p = Imputer_p() imput_p.fit(df) p1 = imput_p.transform(df[df.columns[:-1]]) p1
code
74056828/cell_16
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/california-housing-prices/housing.csv') df1 = pd.DataFrame() for location in df['ocean_proximity'].unique(): df1 = df1.append(df.loc[df['ocean_proximity'] == location][:4]) df1 = df1.reset_index() df = df1.drop(columns='index') df for i, c in zip(df.index, df.columns[:-1]): df.at[i, c] = df.at[i + len(df.columns) - 1, c] = np.nan df class Imputer_p: def __init__(self, strategy_='median', f=1): """ Imputer_p (Pandas): strategy_:{"median", tbd} default=’median’ Specifies the strategy to calucate the statistics for replacing NaNs f: int, default=1 Factor to multiply the first column """ self.strategy_ = strategy_ self.factor = f def fit(self, df): if self.strategy_ == 'median': self.statistics_ = df.median() def transform(self, df_): df = df_.copy() if self.strategy_ == 'median': for column_ in df.columns: df.loc[df[column_].isna(), column_] = self.statistics_[column_] elif self.strategy_ == 'mult': df[df.columns[0]] *= self.factor return df imput_p = Imputer_p() imput_p.fit(df) p1 = imput_p.transform(df[df.columns[:-1]]) p1 class Group_Imputer_p(Imputer_p): def __init__(self, strategy_='median', f=None): super().__init__(strategy_='median', f=None) def fit(self, df_, y=-1): """ y index of column with depended variabls default -1 (last one)""" df = df_[df_.columns[:y]].copy() self.categories = df_[df_.columns[y]].copy() self.stack = [] for category in self.categories.unique(): self.stack.append(df.loc[self.categories == category]) def transform(self): df = pd.DataFrame() for astack in self.stack: super().fit(astack) df = df.append(super().transform(astack)) return df X = df[df.columns[:-1]].to_numpy() y = df[df.columns[-1]].to_numpy() class Imputer_n: def __init__(self, stategy_='median', f=1): self.stategy_ = stategy_ self.factor = f def fit(self, X): if self.stategy_ == 'median': self.statistics_ = np.nanmedian(X, axis=0) def transform(self, X): X_ = X.copy() if self.stategy_ == 'median': X_ = np.where(np.isnan(X_), self.statistics_, X_) elif self.stategy_ == 'mult': X_[:, 0] *= self.factor return X_ imput_n = Imputer_n('median', 90) imput_n.fit(X) n1 = imput_n.transform(X) n1 - p1.to_numpy()
code
74056828/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/california-housing-prices/housing.csv') df1 = pd.DataFrame() for location in df['ocean_proximity'].unique(): df1 = df1.append(df.loc[df['ocean_proximity'] == location][:4]) df1 = df1.reset_index() df = df1.drop(columns='index') df
code
105200540/cell_25
[ "image_output_1.png" ]
from tensorflow.keras.utils import to_categorical import h5py import matplotlib.pyplot as plt import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() plt.style.use('ggplot') plt.figure(figsize=(16, 10)) plt.plot(np.arange(0, NUM_EPOCHS), H.history['loss'], label='loss') plt.title('Loss on super resolution training') plt.xlabel('Epoch #') plt.ylabel('Loss') plt.legend() plt.show()
code
105200540/cell_34
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import to_categorical import cv2 import h5py import numpy as np import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() args = {'image': '../input/sample-images/jemma.png', 'baseline': 'output/baseline.png', 'output': 'output/output.png'} print('[INFO] generating image...') image = cv2.imread(args['image']) h, w = image.shape[:2] print('(h, w): ', (h, w)) w -= int(w % SCALE) h -= int(h % SCALE) image = image[0:h, 0:w] print('image.shape: ', image.shape)
code
105200540/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras import backend from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Conv2D from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import plot_model INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Activation from tensorflow.keras import backend class SRCNN: @staticmethod def build(width, height, depth): model = Sequential() input_shape = (height, width, depth) if backend.image_data_format() == 'channels_first': input_shape = (depth, height, width) model.add(Conv2D(64, (9, 9), kernel_initializer='he_normal', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (1, 1), kernel_initializer='he_normal')) model.add(Activation('relu')) model.add(Conv2D(depth, (5, 5), kernel_initializer='he_normal')) model.add(Activation('relu')) return model from tensorflow.keras.utils import plot_model model = SRCNN.build(33, 33, 3) print('[INFO] compiling model...') opt = Adam(learning_rate=0.001, decay=0.001 / NUM_EPOCHS) model = SRCNN.build(width=INPUT_DIM, height=INPUT_DIM, depth=3) model.compile(loss='mse', optimizer=opt)
code
105200540/cell_40
[ "text_plain_output_1.png" ]
from PIL import Image from tensorflow.keras.utils import to_categorical import PIL import cv2 import h5py import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() plt.style.use('ggplot') args = {'image': '../input/sample-images/jemma.png', 'baseline': 'output/baseline.png', 'output': 'output/output.png'} image = cv2.imread(args['image']) h, w = image.shape[:2] w -= int(w % SCALE) h -= int(h % SCALE) image = image[0:h, 0:w] lowW = int(w * (1.0 / SCALE)) lowH = int(h * (1.0 / SCALE)) highW = int(lowW * (SCALE / 1.0)) highH = int(lowH * (SCALE / 1.0)) scaled = np.array(Image.fromarray(image).resize((lowW, lowH), resample=PIL.Image.BICUBIC)) scaled = np.array(Image.fromarray(scaled).resize((highW, highH), resample=PIL.Image.BICUBIC)) cv2.imwrite(args['baseline'], scaled) output = np.zeros(scaled.shape) h, w = output.shape[:2] output = output[PAD:h - (h % INPUT_DIM + PAD), PAD:w - (w % INPUT_DIM + PAD)] output = np.clip(output, 0, 255).astype('uint8') cv2.imwrite(args['output'], output)
code
105200540/cell_41
[ "image_output_1.png" ]
from tensorflow.keras.utils import to_categorical from typing import List import cv2 import h5py import matplotlib.image as img import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() plt.style.use('ggplot') args = {'image': '../input/sample-images/jemma.png', 'baseline': 'output/baseline.png', 'output': 'output/output.png'} image = cv2.imread(args['image']) h, w = image.shape[:2] w -= int(w % SCALE) h -= int(h % SCALE) image = image[0:h, 0:w] from typing import List import matplotlib.pyplot as plt import matplotlib.image as img images = {'origin': args['image'], 'baseline': args['baseline'], 'output': args['output']} f: plt.Figure axes: List[plt.Axes] f, axes = plt.subplots(1, len(images)) f.set_size_inches((16, 9)) for i, (name, path) in enumerate(images.items()): image = img.imread(path) axes[i].axis('off') axes[i].set_title(name) axes[i].imshow(image) plt.show()
code
105200540/cell_32
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import load_model INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 print('[INFO] loading model...') model = load_model(MODEL_PATH)
code
105200540/cell_38
[ "text_plain_output_1.png" ]
for y in range(0, h - INPUT_DIM + 1, LABEL_SIZE): for x in range(0, w - INPUT_DIM + 1, LABEL_SIZE): crop = scaled[y:y + INPUT_DIM, x:x + INPUT_DIM].astype('float32') P = model.predict(np.expand_dims(crop, axis=0)) P = P.reshape((LABEL_SIZE, LABEL_SIZE, 3)) output[y + PAD:y + PAD + LABEL_SIZE, x + PAD:x + PAD + LABEL_SIZE] = P
code
105200540/cell_35
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from tensorflow.keras.utils import to_categorical import PIL import cv2 import h5py import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() plt.style.use('ggplot') args = {'image': '../input/sample-images/jemma.png', 'baseline': 'output/baseline.png', 'output': 'output/output.png'} image = cv2.imread(args['image']) h, w = image.shape[:2] w -= int(w % SCALE) h -= int(h % SCALE) image = image[0:h, 0:w] lowW = int(w * (1.0 / SCALE)) lowH = int(h * (1.0 / SCALE)) highW = int(lowW * (SCALE / 1.0)) highH = int(lowH * (SCALE / 1.0)) scaled = np.array(Image.fromarray(image).resize((lowW, lowH), resample=PIL.Image.BICUBIC)) scaled = np.array(Image.fromarray(scaled).resize((highW, highH), resample=PIL.Image.BICUBIC)) cv2.imwrite(args['baseline'], scaled)
code
105200540/cell_24
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras import backend from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Conv2D from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import plot_model INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Activation from tensorflow.keras import backend class SRCNN: @staticmethod def build(width, height, depth): model = Sequential() input_shape = (height, width, depth) if backend.image_data_format() == 'channels_first': input_shape = (depth, height, width) model.add(Conv2D(64, (9, 9), kernel_initializer='he_normal', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (1, 1), kernel_initializer='he_normal')) model.add(Activation('relu')) model.add(Conv2D(depth, (5, 5), kernel_initializer='he_normal')) model.add(Activation('relu')) return model from tensorflow.keras.utils import plot_model model = SRCNN.build(33, 33, 3) opt = Adam(learning_rate=0.001, decay=0.001 / NUM_EPOCHS) model = SRCNN.build(width=INPUT_DIM, height=INPUT_DIM, depth=3) model.compile(loss='mse', optimizer=opt) print('[INFO] serializing model...') model.save(MODEL_PATH, overwrite=True)
code
105200540/cell_22
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
H = model.fit_generator(super_res_generator(inputs.generator(), targets.generator()), steps_per_epoch=inputs.num_images // BATCH_SIZE, epochs=NUM_EPOCHS, verbose=1)
code
105200540/cell_12
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from tensorflow.keras import backend from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Conv2D from tensorflow.keras.models import Sequential from tensorflow.keras.utils import plot_model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Activation from tensorflow.keras import backend class SRCNN: @staticmethod def build(width, height, depth): model = Sequential() input_shape = (height, width, depth) if backend.image_data_format() == 'channels_first': input_shape = (depth, height, width) model.add(Conv2D(64, (9, 9), kernel_initializer='he_normal', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (1, 1), kernel_initializer='he_normal')) model.add(Activation('relu')) model.add(Conv2D(depth, (5, 5), kernel_initializer='he_normal')) model.add(Activation('relu')) return model from tensorflow.keras.utils import plot_model model = SRCNN.build(33, 33, 3) plot_model(model, to_file='srcnn.png', show_shapes=True)
code
105200540/cell_36
[ "text_plain_output_1.png" ]
from PIL import Image from tensorflow.keras.utils import to_categorical import PIL import cv2 import h5py import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np INPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/inputs.hdf5' OUTPUTS_DB = '../input/ukbench100-patches-for-sr-hdf5/output/outputs.hdf5' MODEL_PATH = 'output/srcnn.model' PLOT_PATH = 'output/plot.png' BATCH_SIZE = 128 NUM_EPOCHS = 10 SCALE = 2.0 INPUT_DIM = 33 LABEL_SIZE = 21 PAD = int((INPUT_DIM - LABEL_SIZE) / 2.0) STRIDE = 14 from tensorflow.keras.utils import to_categorical import numpy as np import h5py class HDF5DatasetGenerator: def __init__(self, db_path, batch_size, preprocessors=None, aug=None, binarize=True, classes=2): self.batch_size = batch_size self.preprocessors = preprocessors self.aug = aug self.binarize = binarize self.classes = classes self.db = h5py.File(db_path, 'r') self.num_images = self.db['labels'].shape[0] def generator(self, passes=np.inf): epochs = 0 while epochs < passes: for i in np.arange(0, self.num_images, self.batch_size): images = self.db['images'][i:i + self.batch_size] labels = self.db['labels'][i:i + self.batch_size] if self.binarize: labels = to_categorical(labels, self.classes) if self.preprocessors is not None: proc_images = [] for image in images: for p in self.preprocessors: image = p.preprocess(image) proc_images.append(image) images = np.array(proc_images) if self.aug is not None: images, labels = next(self.aug.flow(images, labels, batch_size=self.batch_size)) yield (images, labels) epochs += 1 def close(self): self.db.close() plt.style.use('ggplot') args = {'image': '../input/sample-images/jemma.png', 'baseline': 'output/baseline.png', 'output': 'output/output.png'} image = cv2.imread(args['image']) h, w = image.shape[:2] w -= int(w % SCALE) h -= int(h % SCALE) image = image[0:h, 0:w] lowW = int(w * (1.0 / SCALE)) lowH = int(h * (1.0 / SCALE)) highW = int(lowW * (SCALE / 1.0)) highH = int(lowH * (SCALE / 1.0)) scaled = np.array(Image.fromarray(image).resize((lowW, lowH), resample=PIL.Image.BICUBIC)) scaled = np.array(Image.fromarray(scaled).resize((highW, highH), resample=PIL.Image.BICUBIC)) cv2.imwrite(args['baseline'], scaled) output = np.zeros(scaled.shape) h, w = output.shape[:2] print('(h, w): ', (h, w))
code
74070177/cell_2
[ "text_plain_output_1.png" ]
!apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg -y libasound2-dev
code
74070177/cell_1
[ "text_plain_output_1.png" ]
!git clone https://github.com/mcdermottLab/pycochleagram.git
code
74070177/cell_7
[ "text_plain_output_1.png" ]
from distutils.dir_util import copy_tree from distutils.dir_util import copy_tree copy_tree('../input/cochleagramfile', '../working/')
code
74070177/cell_3
[ "text_plain_output_1.png" ]
!pip install pqdm
code
74070177/cell_22
[ "text_plain_output_1.png" ]
from pqdm.processes import pqdm from scipy import signal import cochleagram as cgram import cv2 import erbfilter as erb import glob import librosa import librosa import librosa import numpy as np import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch os.chdir('/kaggle/working') output_dir = './data' if not os.path.isdir(output_dir): os.mkdir(output_dir) os.mkdir('./data/nocall') path_to_json = '../input/freefield1010/freefield1010/*/*' json_pattern = os.path.join(path_to_json, '*.json') file_list = glob.glob(json_pattern) import pandas as pd dfs = [] for file in file_list: with open(file) as f: json_data = pd.json_normalize(json.loads(f.read())) dfs.append(json_data) df = pd.concat(dfs, sort=False) df['hasbird'] = df.apply(lambda x: 'bird' in x['tags'], axis=1).astype(int) train_ff1010 = df.sort_values(by='id').reset_index(drop=True) train_ff1010 = train_ff1010[train_ff1010.hasbird == 0].reset_index(drop=True) def get_clip(path): clip, sr_native = librosa.load(path, sr=None, mono=True, dtype=np.float32) sr = 32000 if sr_native != 0: clip = librosa.resample(clip, sr_native, sr, res_type='kaiser_best') return (clip, sr, sr_native) class cochleagram: IMAGE_HEIGHT = 256 IMAGE_WIDTH = 576 SR = 32000 hi_lim = SR // 2 low_lim = 10 n_filters = int(np.floor(erb.freq2erb(hi_lim) - erb.freq2erb(low_lim)) - 1) nonlinearity = 'db' ret_mode = ('envs',) sample_factor = 2 def cochlea(signal): human_co = cgram.human_cochleagram(signal, sr=cochleagram.SR, n=cochleagram.n_filters, low_lim=cochleagram.low_lim, hi_lim=cochleagram.hi_lim, sample_factor=cochleagram.sample_factor, nonlinearity=cochleagram.nonlinearity) return human_co config_patch = {'use_inv_stem': True, 'use_patch': [True, 16]} def inv_stem(x): x1 = x.transpose(0, 1).view(24, 24, 16, 16) y = torch.zeros(384, 384, dtype=x.dtype) for i in range(24): for j in range(24): y[i * 16:(i + 1) * 16, j * 16:(j + 1) * 16] = x1[i, j] return y def image_patch(mel_spect): if config_patch.get('use_inv_stem'): spect = torch.from_numpy(mel_spect) spect = inv_stem(spect) else: if config_patch.get('use_patch')[0]: patch_size = config_patch.get('use_patch')[1] spect = np.zeros((384, 384), dtype=np.float32) for i in range(0, 192, patch_size): spect[2 * i:2 * i + patch_size, :] = mel_spect[i:i + patch_size, :384] spect[2 * i + patch_size:2 * i + 2 * patch_size, :] = mel_spect[i:i + patch_size, 384:] mel_spect = spect return spect.cpu().detach().numpy() def work_sub(path): output_path = './data/nocall' clip, sr, sr_native = get_clip(path) clip = clip.astype('float32') length = clip.shape[0] head, tail = os.path.split(path[:-4]) spect = cochlea(clip) res = cv2.resize(np.flipud(spect), dsize=(576, 256), interpolation=cv2.INTER_CUBIC) filename = 'ff1010_%d_0.jpg' % int(tail) img_patch = image_patch(res) cv2.imwrite(os.path.join(output_path, filename), img_patch * 255) return (sr, sr_native, length) path_to_wav = '../input/freefield1010/freefield1010/*/*' json_pattern = os.path.join(path_to_wav, '*.wav') file_list_wav = glob.glob(json_pattern) check = [] for file in file_list_wav: head, tail = os.path.split(file[:-4]) check.append(int(tail)) list_remove = file_list_wav.copy() for file in file_list_wav: head, tail = os.path.split(file[:-4]) if not int(tail) in list(train_ff1010['id']): list_remove.remove(file) res = pqdm(list_remove, work_sub, n_jobs=8)
code
74070177/cell_12
[ "text_plain_output_1.png" ]
import glob import pandas as pd import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) os.chdir('/kaggle/working') output_dir = './data' if not os.path.isdir(output_dir): os.mkdir(output_dir) os.mkdir('./data/nocall') path_to_json = '../input/freefield1010/freefield1010/*/*' json_pattern = os.path.join(path_to_json, '*.json') file_list = glob.glob(json_pattern) import pandas as pd dfs = [] for file in file_list: with open(file) as f: json_data = pd.json_normalize(json.loads(f.read())) dfs.append(json_data) df = pd.concat(dfs, sort=False) df['hasbird'] = df.apply(lambda x: 'bird' in x['tags'], axis=1).astype(int) train_ff1010 = df.sort_values(by='id').reset_index(drop=True) train_ff1010 = train_ff1010[train_ff1010.hasbird == 0].reset_index(drop=True) train_ff1010.head(2)
code
74070177/cell_5
[ "text_html_output_1.png" ]
print(os.getcwd()) os.chdir('./pycochleagram') print(os.getcwd()) !python setup.py install
code
128016087/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True sns.scatterplot(data=train, x='Duration', y='Calories_Burned', hue='Gender', hue_order=['male', 'female'])
code
128016087/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() plt.figure(figsize=(10, 10)) mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True sns.heatmap(train.corr(), mask=mask, annot=True, cmap='Blues') plt.show()
code
128016087/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True def GenderEncoder(df): df.loc[df['Gender'] == 'male', 'Gender'] = 0 df.loc[df['Gender'] == 'female', 'Gender'] = 1 return df train = GenderEncoder(train).reset_index(drop=True) test = GenderEncoder(test).reset_index(drop=True) train
code
128016087/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True sns.scatterplot(data=train, x='Height', y='Weight', hue='Gender', hue_order=['male', 'female'])
code
128016087/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd exercise = pd.read_csv('/kaggle/input/fmendesdat263xdemos/exercise.csv') calories = pd.read_csv('/kaggle/input/fmendesdat263xdemos/calories.csv') exercise['Calories_Burned'] = calories['Calories'] exercise = exercise.drop(['User_ID'], axis=1) exercise
code
128016087/cell_2
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import warnings import pandas as pd import numpy as np import random import os import gc from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore')
code
128016087/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns fig, axes = plt.subplots(2, 3, figsize=(10, 10)) sns.boxplot(y=train['Age'], ax=axes[0][0]) sns.boxplot(y=train['Height'], ax=axes[0][1]) sns.boxplot(y=train['Weight'], ax=axes[0][2]) sns.boxplot(y=train['Duration'], ax=axes[1][0]) sns.boxplot(y=train['Heart_Rate'], ax=axes[1][1]) sns.boxplot(y=train['Body_Temp'], ax=axes[1][2]) plt.tight_layout() plt.show()
code
128016087/cell_19
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True sns.scatterplot(data=train, x='Weight', y='Calories_Burned', hue='Gender', hue_order=['male', 'female'])
code
128016087/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.plot(train['Age'], train['Calories_Burned'], 'g*') plt.title('Age vs Calories Burned') plt.xlabel('Age') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Height'], train['Calories_Burned'], 'g*') plt.title('Height vs Calories Burned') plt.xlabel('Height') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Weight'], train['Calories_Burned'], 'g*') plt.title('Weight vs Calories Burned') plt.xlabel('Weight') plt.ylabel('Calories Burned') plt.show()
code
128016087/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.plot(train['Heart_Rate'], train['Calories_Burned'], 'go') plt.title('Heart_Rate vs Calories Burned') plt.xlabel('Heart_Rate') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Body_Temp'], train['Calories_Burned'], 'go') plt.title('Body Temp vs Calories Burned') plt.xlabel('Body Temp') plt.ylabel('Calories Burned') plt.show() plt.plot(train['Duration'], train['Calories_Burned'], 'go') plt.title('Duration vs Calories Burned') plt.xlabel('Duration') plt.ylabel('Calories Burned') plt.show()
code
128016087/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True def GenderEncoder(df): df.loc[df['Gender'] == 'male', 'Gender'] = 0 df.loc[df['Gender'] == 'female', 'Gender'] = 1 return df train = GenderEncoder(train).reset_index(drop=True) test = GenderEncoder(test).reset_index(drop=True) train train_x = train[['Exercise_Duration', 'Gender', 'BPM', 'Age', 'Weight(lb)']] train_y = train['Calories_Burned']
code
128016087/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import random import seaborn as sns def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seed_everything(42) fig, axes = plt.subplots(2,3, figsize = (10,10)) sns.boxplot(y = train['Age'], ax = axes[0][0]) sns.boxplot(y = train['Height'], ax = axes[0][1]) sns.boxplot(y = train['Weight'], ax = axes[0][2]) sns.boxplot(y = train['Duration'], ax = axes[1][0]) sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1]) sns.boxplot(y = train['Body_Temp'],ax = axes[1][2]) plt.tight_layout() plt.show() mask = np.zeros_like(train.corr()) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(10, 10)) sns.heatmap(train.corr(), annot=True, cmap='YlOrRd') plt.show()
code
90120014/cell_4
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt path = '../input/ham1000-segmentation-and-classification/images/ISIC_0024306.jpg' img = cv2.imread(path) img2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = (img2[:, :, 0], img2[:, :, 1], img2[:, :, 2]) hist_h = cv2.calcHist([h], [0], None, [256], [0, 256]) hist_h = cv2.normalize(hist_h, hist_h) hist_s = cv2.calcHist([s], [0], None, [256], [0, 256]) hist_s = cv2.normalize(hist_s, hist_s) hist_v = cv2.calcHist([v], [0], None, [256], [0, 256]) hist_v = cv2.normalize(hist_v, hist_v) plt.plot(hist_h, color='r', label='h') plt.plot(hist_s, color='g', label='s') plt.plot(hist_v, color='b', label='v') plt.legend() plt.show()
code
90120014/cell_14
[ "image_output_1.png" ]
import cv2 import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_labels = pd.read_csv('../input/ham1000-segmentation-and-classification/GroundTruth.csv') df_labels['image'] = df_labels['image'] + '.jpg' labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC'] label_list = [] for i in range(len(df_labels)): row = list(df_labels.iloc[i]) del row[0] index = np.argmax(row) label = labels[index] label_list.append(label) df_labels['label'] = label_list df_labels = df_labels.drop(labels, axis=1) path = '../input/ham1000-segmentation-and-classification/images/ISIC_0024306.jpg' img = cv2.imread(path) img2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = (img2[:, :, 0], img2[:, :, 1], img2[:, :, 2]) hist_h = cv2.calcHist([h], [0], None, [256], [0, 256]) hist_h = cv2.normalize(hist_h, hist_h) hist_s = cv2.calcHist([s], [0], None, [256], [0, 256]) hist_s = cv2.normalize(hist_s, hist_s) hist_v = cv2.calcHist([v], [0], None, [256], [0, 256]) hist_v = cv2.normalize(hist_v, hist_v) class HSVHistogram: def __init__(self, bins): self.bins = bins def describe(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hist = cv2.calcHist([image], [0, 1, 2], None, self.bins, [0, 256, 0, 256, 0, 256]) hist = cv2.normalize(hist, hist) return hist.flatten() desc = HSVHistogram([8, 8, 8]) features_names = ['feature_' + str(i) for i in range(8 ** 3)] df_features = pd.DataFrame(columns=features_names) img_list = glob.glob('../input/ham1000-segmentation-and-classification/images/' + "/*.jpg") for idx,img_path in enumerate(sorted(img_list[:1000])): k = img_path[img_path.rfind("/") + 1:] image = cv2.imread(img_path) features = desc.describe(image) df_features.loc[k,:] = features np_features = df_features[features_names].values query_path = '../input/ham1000-segmentation-and-classification/images/ISIC_0024306.jpg' query_image = cv2.imread(query_path) RGB_im = cv2.cvtColor(query_image, cv2.COLOR_BGR2RGB) label = df_labels[df_labels['image'] == 'ISIC_0024306.jpg']['label'].iloc[0] plt.imshow(RGB_im) plt.title(label) query_features = desc.describe(query_image) query_features = np.array(query_features) results = {} for idx in range(0, len(np_features)): dist = np.linalg.norm(query_features - np_features[idx]) results[idx] = dist results = {k: v for k, v in sorted(results.items(), key=lambda item: item[1])} top_10 = list(results.keys())[:10] top_10_dist = list(results.values())[:10] fig, axes = plt.subplots(2, 5, figsize=(14, 5), sharey=True, sharex=True) results_img = df_features.iloc[top_10].index for idx, img_path in enumerate(results_img): ax = axes.flat[idx] path = '../input/ham1000-segmentation-and-classification/images/' + img_path label = df_labels[df_labels['image'] == img_path]['label'].iloc[0] image = cv2.imread(path) RGB_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) ax.imshow(RGB_im) ax.set_title(str(round(top_10_dist[idx], 2)) + '(' + label + ')')
code
90120014/cell_10
[ "image_output_1.png" ]
import cv2 import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_labels = pd.read_csv('../input/ham1000-segmentation-and-classification/GroundTruth.csv') df_labels['image'] = df_labels['image'] + '.jpg' labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC'] label_list = [] for i in range(len(df_labels)): row = list(df_labels.iloc[i]) del row[0] index = np.argmax(row) label = labels[index] label_list.append(label) df_labels['label'] = label_list df_labels = df_labels.drop(labels, axis=1) path = '../input/ham1000-segmentation-and-classification/images/ISIC_0024306.jpg' img = cv2.imread(path) img2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = (img2[:, :, 0], img2[:, :, 1], img2[:, :, 2]) hist_h = cv2.calcHist([h], [0], None, [256], [0, 256]) hist_h = cv2.normalize(hist_h, hist_h) hist_s = cv2.calcHist([s], [0], None, [256], [0, 256]) hist_s = cv2.normalize(hist_s, hist_s) hist_v = cv2.calcHist([v], [0], None, [256], [0, 256]) hist_v = cv2.normalize(hist_v, hist_v) class HSVHistogram: def __init__(self, bins): self.bins = bins def describe(self, image): image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hist = cv2.calcHist([image], [0, 1, 2], None, self.bins, [0, 256, 0, 256, 0, 256]) hist = cv2.normalize(hist, hist) return hist.flatten() desc = HSVHistogram([8, 8, 8]) features_names = ['feature_' + str(i) for i in range(8 ** 3)] df_features = pd.DataFrame(columns=features_names) img_list = glob.glob('../input/ham1000-segmentation-and-classification/images/' + "/*.jpg") for idx,img_path in enumerate(sorted(img_list[:1000])): k = img_path[img_path.rfind("/") + 1:] image = cv2.imread(img_path) features = desc.describe(image) df_features.loc[k,:] = features np_features = df_features[features_names].values query_path = '../input/ham1000-segmentation-and-classification/images/ISIC_0024306.jpg' query_image = cv2.imread(query_path) RGB_im = cv2.cvtColor(query_image, cv2.COLOR_BGR2RGB) label = df_labels[df_labels['image'] == 'ISIC_0024306.jpg']['label'].iloc[0] plt.imshow(RGB_im) plt.title(label) query_features = desc.describe(query_image) query_features = np.array(query_features)
code
130005921/cell_9
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
def data_exploration(df): return data_exploration(greeks) data_exploration(train)
code
130005921/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
def data_exploration(df): print(f'Shape of this set:', df.shape) print('*' * 90) print(f'Columns of this set:', df.columns) print('*' * 90) print(f'Missing rows in this set:', df.info()) print('*' * 90) return df.head() data_exploration(greeks)
code
130005921/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
def data_exploration(df): return data_exploration(greeks) data_exploration(sample_submission)
code
130005921/cell_8
[ "text_plain_output_1.png" ]
def data_exploration(df): return data_exploration(greeks) data_exploration(test)
code
130005921/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import seaborn as sns def data_exploration(df): return data_exploration(greeks) def impute_and_replace(df): numerical_columns = df.select_dtypes(include=[np.number]).columns df[numerical_columns] = df[numerical_columns].fillna(df[numerical_columns].mean()) categorical_column = 'EJ' df[categorical_column] = df[categorical_column].replace({'A': 0, 'B': 1}) return df train1 = impute_and_replace(train.copy()) X = train1.drop(['Id', 'Class'], axis=1) y = train1['Class'] scaler = StandardScaler() X_train = scaler.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y) clf = LazyClassifier(verbose=0, ignore_warnings=True, custom_metric=None) models, predictions = clf.fit(X_train, X_test, y_train, y_test) models
code
130005921/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import seaborn as sns sns.heatmap(train.isnull(), cmap='cool')
code
130005921/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import seaborn as sns def data_exploration(df): return data_exploration(greeks) def impute_and_replace(df): numerical_columns = df.select_dtypes(include=[np.number]).columns df[numerical_columns] = df[numerical_columns].fillna(df[numerical_columns].mean()) categorical_column = 'EJ' df[categorical_column] = df[categorical_column].replace({'A': 0, 'B': 1}) return df train1 = impute_and_replace(train.copy()) train1.info()
code
2041217/cell_13
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNetCV from statsmodels.graphics.gofplots import ProbPlot import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() # statsmodels Q-Q plot on model residuals QQ = ProbPlot(model.resid) fig = QQ.qqplot(alpha=0.5, markersize=5, line='s') plt.title('QQ plot'); model_norm_resid = model.get_influence().resid_studentized_internal model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid)) model_leverage = model.get_influence().hat_matrix_diag plt.xlim(xmin=0, xmax=0.037) model.summary()
code
2041217/cell_9
[ "image_output_1.png" ]
from sklearn.linear_model import ElasticNetCV import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() sns.residplot(model.fittedvalues, df['Income'], lowess=True, line_kws={'color': 'r', 'lw': 1}) plt.title('Residual plot') plt.xlabel('Predicted values') plt.ylabel('Residuals')
code
2041217/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] print('Number of data points: ', len(y)) print('Mean of our predicted value: ', y.mean())
code
2041217/cell_6
[ "text_html_output_1.png" ]
from sklearn.linear_model import ElasticNetCV import pandas as pd df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) print('Intercept: ', elnet.intercept_) coefs = list(zip(x.columns, elnet.coef_)) coefs
code
2041217/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNetCV from statsmodels.graphics.gofplots import ProbPlot import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() # statsmodels Q-Q plot on model residuals QQ = ProbPlot(model.resid) fig = QQ.qqplot(alpha=0.5, markersize=5, line='s') plt.title('QQ plot'); model_norm_resid = model.get_influence().resid_studentized_internal model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid)) sns.regplot(model.fittedvalues, model_norm_resid_abs_sqrt, lowess=True, line_kws={'color': 'r', 'lw': 1}) plt.xlabel('Fitted values') plt.ylabel('Sqrt abs standardized residuals') plt.title('Scale-location')
code
2041217/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from statsmodels.graphics.gofplots import ProbPlot from sklearn.linear_model import ElasticNetCV
code
2041217/cell_7
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNetCV import pandas as pd df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs
code
2041217/cell_15
[ "image_output_1.png" ]
from sklearn.linear_model import ElasticNetCV from statsmodels.graphics.gofplots import ProbPlot import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() # statsmodels Q-Q plot on model residuals QQ = ProbPlot(model.resid) fig = QQ.qqplot(alpha=0.5, markersize=5, line='s') plt.title('QQ plot'); model_norm_resid = model.get_influence().resid_studentized_internal model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid)) model_leverage = model.get_influence().hat_matrix_diag plt.xlim(xmin=0, xmax=0.037) model.summary() model2 = sm.GLM(y, x, family=sm.families.Gaussian()).fit() fig = plt.figure(figsize=(12, 8)) fig = sm.graphics.plot_partregress_grid(model, fig=fig)
code
2041217/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) df.head()
code
2041217/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNetCV import pandas as pd import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() model2 = sm.GLM(y, x, family=sm.families.Gaussian()).fit() print('Null deviance: {:.1f}'.format(model2.null_deviance)) print('Residual deviance: {:.1f}'.format(model2.deviance))
code
2041217/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import ElasticNetCV from statsmodels.graphics.gofplots import ProbPlot import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() QQ = ProbPlot(model.resid) fig = QQ.qqplot(alpha=0.5, markersize=5, line='s') plt.title('QQ plot')
code
2041217/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import ElasticNetCV from statsmodels.graphics.gofplots import ProbPlot import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y) coefs = list(zip(x.columns, elnet.coef_)) coefs nonzero_coefs = [i[0] for i in coefs if i[1] != 0] nonzero_coefs x = df[nonzero_coefs] x = sm.add_constant(x) model = sm.OLS(y, x).fit() # statsmodels Q-Q plot on model residuals QQ = ProbPlot(model.resid) fig = QQ.qqplot(alpha=0.5, markersize=5, line='s') plt.title('QQ plot'); model_norm_resid = model.get_influence().resid_studentized_internal model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid)) model_leverage = model.get_influence().hat_matrix_diag sns.regplot(model_leverage, model.resid_pearson, fit_reg=False) plt.xlim(xmin=0, xmax=0.037) plt.xlabel('Leverage') plt.ylabel('Pearson residuals') plt.title('Residuals vs leverage')
code
2041217/cell_5
[ "image_output_1.png" ]
from sklearn.linear_model import ElasticNetCV import pandas as pd df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', usecols=['Gender', 'Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'HasDebt', 'HoursLearning', 'MonthsProgramming', 'Income']) df.rename(columns={'Gender': 'IsWoman'}, inplace=True) df['IsWoman'] = df['IsWoman'] == 'female' df.dropna(inplace=True) x = df[['Age', 'CommuteTime', 'HasChildren', 'AttendedBootcamp', 'IsWoman', 'HasDebt', 'HoursLearning', 'MonthsProgramming']] y = df['Income'] elnet = ElasticNetCV(cv=10) elnet.fit(x, y)
code
34136171/cell_42
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes
code
34136171/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum()
code
34136171/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes
code
34136171/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data[cleaned_data['Age'] < 0]
code
34136171/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) cleaned_data.head(2)
code
34136171/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34136171/cell_54
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']] patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']] patient_df.duplicated().sum() patient_df.drop_duplicates(inplace=True) patient_df.reset_index(drop=True, inplace=True) patient_df.duplicated().sum()
code
34136171/cell_60
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']] patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']] patient_df.duplicated().sum() patient_df.drop_duplicates(inplace=True) patient_df.reset_index(drop=True, inplace=True) patient_df.duplicated().sum() appointment_df.to_csv('Appointment_Data.csv', index=False) patient_df.to_csv('Patient_Data.csv', index=False) patient_df['Age'].hist(bins=100) plt.xlabel('Patient Age (years)') plt.ylabel('Count') plt.title("Histogram of Patients' Age") plt.show()
code
34136171/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df['Neighbourhood'].unique()
code
34136171/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']] patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']] patient_df.duplicated().sum()
code
34136171/cell_52
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']] patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']] patient_df.duplicated().sum() patient_df.drop_duplicates(inplace=True) patient_df.reset_index(drop=True, inplace=True)
code
34136171/cell_7
[ "image_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.head(2)
code
34136171/cell_62
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns base_color = sns.color_palette()[0] data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']] patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']] patient_df.duplicated().sum() patient_df.drop_duplicates(inplace=True) patient_df.reset_index(drop=True, inplace=True) patient_df.duplicated().sum() appointment_df.to_csv('Appointment_Data.csv', index=False) patient_df.to_csv('Patient_Data.csv', index=False) sns.catplot(x='Gender', data=patient_df, kind='count', color=base_color) plt.xlabel("Patient's Gender") plt.ylabel('Count') plt.title("Patient's Gender Distribution") plt.show()
code
34136171/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df['Age'].describe()
code
34136171/cell_38
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes
code
34136171/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum() cleaned_data = data_df.copy() cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True) neg_age_idx = cleaned_data[cleaned_data['Age'] < 0].index.tolist() cleaned_data.drop(neg_age_idx, inplace=True) cleaned_data.reset_index(drop=True, inplace=True) cleaned_data.dtypes cleaned_data.dtypes cleaned_data.dtypes
code
34136171/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') data_df.isnull().sum()
code
72080001/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df_train df_train.describe()
code
72080001/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(submission.shape) submission
code
72080001/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72080001/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(df_train.shape) df_train
code
72080001/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df_train df_train.info()
code
72080001/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from lightgbm import LGBMRegressor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df_train X_train = df_train.copy().drop('target', axis=1) y_train = df_train['target'].copy() df_cols = [col for col in X_train.columns if 'cat' in col] lgbm_model = LGBMRegressor() lgbm_model.fit(X_train, y_train) y_pred = lgbm_model.predict(df_test) y_pred
code
72080001/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from lightgbm import LGBMRegressor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/30-days-of-ml/train.csv') df_test = pd.read_csv('../input/30-days-of-ml/test.csv') submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') submission df_train X_train = df_train.copy().drop('target', axis=1) y_train = df_train['target'].copy() df_cols = [col for col in X_train.columns if 'cat' in col] lgbm_model = LGBMRegressor() lgbm_model.fit(X_train, y_train) y_pred = lgbm_model.predict(df_test) y_pred submission['target'] = y_pred submission.to_csv('submission01.csv', index=False) submission
code
72080001/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder from sklearn.model_selection import train_test_split from lightgbm import LGBMRegressor
code
32063423/cell_21
[ "text_plain_output_1.png" ]
from xgboost import XGBRegressor model2 = XGBRegressor(n_estimators=1000) model2.fit(X_train, y_train[:, 1])
code
32063423/cell_13
[ "text_plain_output_1.png" ]
from fastai.tabular import add_datepart from sklearn.preprocessing import OrdinalEncoder import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_train['Date'] = pd.to_datetime(df_train['Date'], format='%Y-%m-%d') df_test['Date'] = pd.to_datetime(df_test['Date'], format='%Y-%m-%d') def categoricalToInteger(df): df.Province_State.fillna('NaN', inplace=True) oe = OrdinalEncoder() df[['Province_State', 'Country_Region']] = oe.fit_transform(df.loc[:, ['Province_State', 'Country_Region']]) return df add_datepart(df_train, 'Date', drop=False) df_train.drop('Elapsed', axis=1, inplace=True) df_train = categoricalToInteger(df_train) def lag_feature(df, lags, col): tmp = df[['Dayofyear', 'Country_Region', 'Province_State', col]] for i in lags: shifted = tmp.copy() shifted.columns = ['Dayofyear', 'Country_Region', 'Province_State', col + '_lag_' + str(i)] shifted['Dayofyear'] += i df = pd.merge(df, shifted, on=['Dayofyear', 'Country_Region', 'Province_State'], how='left') return df df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'ConfirmedCases') df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'Fatalities') df_train.columns
code
32063423/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_date_min = df_train['Date'].min() train_date_max = df_train['Date'].max() print('Minimum date from training set: {}'.format(train_date_min)) print('Maximum date from training set: {}'.format(train_date_max))
code