path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17109150/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from torchvision import datasets, models, transforms model = models.resnet152(pretrained=True) for param in model.parameters(): param.requires_grad = False print(model)
code
17109150/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) labels = pd.read_csv('../input/train.csv') labels.head() print(type(labels))
code
17109150/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image from io import BytesIO from sklearn.preprocessing import LabelEncoder, OneHotEncoder from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch train_on_gpu = torch.cuda.is_available() labels = pd.read_csv('../input/train.csv') data_dir = '../input' train_dir = data_dir + '/train' test_dir = data_dir + '/test' pil2tensor = transforms.ToTensor() tensor2pil = transforms.ToPILImage() pil_image = Image.open(train_dir + '/0a750c2e8.jpg') rgb_image = pil2tensor(pil_image) def plot_image(tensor): pass from io import BytesIO import IPython.display r_image = rgb_image[0] g_image = rgb_image[1] b_image = rgb_image[2] def show_grayscale_image(tensor): f = BytesIO() a = np.uint8(tensor.mul(255).numpy()) Image.fromarray(a).save(f, 'png') mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] train_data = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomRotation(25), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0), transforms.RandomAffine(degrees=4, translate=None, scale=None, shear=None, resample=False, fillcolor=0), transforms.ToTensor(), transforms.Normalize(mean, std)]) test_data = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)]) def encode_labels(y): values = np.array(y) label_encoder = LabelEncoder() integer_encoded = label_encoder.fit_transform(values) onehot_encoder = OneHotEncoder(sparse=False) integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(integer_encoded) y = onehot_encoded return (y, label_encoder) y, label_encoder = encode_labels(labels['Id']) image_datasets = dict() image_datasets['train'] = WhaleTailDataset(image_folder=train_dir, data_type='train', df=labels, transform=train_data, y=y) image_datasets['test'] = WhaleTailDataset(image_folder=test_dir, data_type='test', transform=test_data) train_size = 512 test_size = 32 num_workers = 0 dataloaders = dict() dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=train_size, num_workers=num_workers) dataloaders['test'] = torch.utils.data.DataLoader(image_datasets['test'], batch_size=test_size, num_workers=num_workers) dataiter = iter(dataloaders['train']) images, labels = dataiter.next() print('Batch shape: ', images.size())
code
33104188/cell_13
[ "text_html_output_1.png" ]
from math import sqrt from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold import lightgbm as lgb import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/electrical-consumption/train_6BJx641.csv') test = pd.read_csv('/kaggle/input/electrical-consumption/test_pavJagI.csv') train['datetime'] = pd.to_datetime(train['datetime']) test['datetime'] = pd.to_datetime(test['datetime']) def booleancon(x): if x == True: return 1 else: return 0 def var2(x): if x == 'A': return 1 elif x == 'B': return 2 else: return 3 def time_pr(train): train = add_datepart(train, 'datetime', drop=True, time=True) train['datetimeIs_month_end'] = train['datetimeIs_month_end'].apply(booleancon) train['datetimeIs_month_start'] = train['datetimeIs_month_start'].apply(booleancon) train['datetimeIs_quarter_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_quarter_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['datetimeIs_year_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_year_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['var2'] = train['var2'].apply(var2) return train pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) pd.set_option('display.max_colwidth', 199) train = time_pr(train) test = time_pr(test) col = [] for i in train.columns: if i != 'electricity_consumption' and i != 'ID' and (i != 'datetime'): col.append(i) X = train[col].values Y = train['electricity_consumption'].values import concurrent.futures import lightgbm as lgb import numpy as np from sklearn.model_selection._split import KFold from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix kf = StratifiedKFold(n_splits=10, random_state=None, shuffle=True) index = [] for train_index, test_index in kf.split(X, Y): index.append([train_index, test_index]) def model(train_index, test_index, XX, YY): X_train, X_test = (XX[train_index], XX[test_index]) Y_train, Y_test = (YY[train_index], YY[test_index]) d_train = lgb.Dataset(X_train, label=Y_train) params = {} params['application'] = 'root_mean_squared_error' params['num_boost_round'] = 800 params['learning_rate'] = 0.01 params['boosting_type'] = 'gbdt' params['metric'] = 'rmse' params['sub_feature'] = 0.833 params['num_leaves'] = 207 params['min_split_gain'] = 0.05 params['min_child_weight'] = 27 params['max_depth'] = -1 params['num_threads'] = 10 params['max_bin'] = 50 params['lambda_l2'] = 0.1 params['lambda_l1'] = 0.3 params['feature_fraction'] = 0.833 params['bagging_fraction'] = 0.979 clf = lgb.train(params, d_train, 1000) pred = clf.predict(X_test) from sklearn.metrics import mean_squared_error from math import sqrt rmae = sqrt(mean_squared_error(pred, Y_test)) return rmae with concurrent.futures.ThreadPoolExecutor() as executor: future1 = executor.submit(model, index[0][0], index[0][1], X, Y) return_value1 = future1.result() future2 = executor.submit(model, index[1][0], index[1][1], X, Y) return_value2 = future2.result() future3 = executor.submit(model, index[2][0], index[2][1], X, Y) return_value3 = future3.result() future4 = executor.submit(model, index[3][0], index[3][1], X, Y) return_value4 = future4.result() future5 = executor.submit(model, index[4][0], index[4][1], X, Y) return_value5 = future5.result() future6 = executor.submit(model, index[5][0], index[5][1], X, Y) return_value6 = future6.result() future7 = executor.submit(model, index[6][0], index[6][1], X, Y) return_value7 = future7.result() future8 = executor.submit(model, index[7][0], index[7][1], X, Y) return_value8 = future8.result() future9 = executor.submit(model, index[8][0], index[8][1], X, Y) return_value9 = future9.result() future10 = executor.submit(model, index[9][0], index[9][1], X, Y) return_value10 = future10.result() rmae = [return_value1, return_value2, return_value3, return_value4, return_value5, return_value6, return_value7, return_value8, return_value9, return_value10] d_train = lgb.Dataset(X, label=Y) params = {} params['application'] = 'root_mean_squared_error' params['num_boost_round'] = 800 params['learning_rate'] = 0.01 params['boosting_type'] = 'gbdt' params['metric'] = 'rmse' params['sub_feature'] = 0.833 params['num_leaves'] = 207 params['min_split_gain'] = 0.05 params['min_child_weight'] = 27 params['max_depth'] = -1 params['num_threads'] = 10 params['max_bin'] = 50 params['lambda_l2'] = 0.1 params['lambda_l1'] = 0.3 params['feature_fraction'] = 0.833 params['bagging_fraction'] = 0.979 clf = lgb.train(params, d_train, 1000)
code
33104188/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/electrical-consumption/train_6BJx641.csv') test = pd.read_csv('/kaggle/input/electrical-consumption/test_pavJagI.csv') train.head()
code
33104188/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33104188/cell_18
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/electrical-consumption/train_6BJx641.csv') test = pd.read_csv('/kaggle/input/electrical-consumption/test_pavJagI.csv') train['datetime'] = pd.to_datetime(train['datetime']) test['datetime'] = pd.to_datetime(test['datetime']) def booleancon(x): if x == True: return 1 else: return 0 def var2(x): if x == 'A': return 1 elif x == 'B': return 2 else: return 3 def time_pr(train): train = add_datepart(train, 'datetime', drop=True, time=True) train['datetimeIs_month_end'] = train['datetimeIs_month_end'].apply(booleancon) train['datetimeIs_month_start'] = train['datetimeIs_month_start'].apply(booleancon) train['datetimeIs_quarter_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_quarter_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['datetimeIs_year_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_year_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['var2'] = train['var2'].apply(var2) return train pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) pd.set_option('display.max_colwidth', 199) train = time_pr(train) test = time_pr(test) col = [] for i in train.columns: if i != 'electricity_consumption' and i != 'ID' and (i != 'datetime'): col.append(i) col
code
33104188/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/electrical-consumption/train_6BJx641.csv') test = pd.read_csv('/kaggle/input/electrical-consumption/test_pavJagI.csv') train['datetime'] = pd.to_datetime(train['datetime']) test['datetime'] = pd.to_datetime(test['datetime']) def booleancon(x): if x == True: return 1 else: return 0 def var2(x): if x == 'A': return 1 elif x == 'B': return 2 else: return 3 def time_pr(train): train = add_datepart(train, 'datetime', drop=True, time=True) train['datetimeIs_month_end'] = train['datetimeIs_month_end'].apply(booleancon) train['datetimeIs_month_start'] = train['datetimeIs_month_start'].apply(booleancon) train['datetimeIs_quarter_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_quarter_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['datetimeIs_year_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_year_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['var2'] = train['var2'].apply(var2) return train pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) pd.set_option('display.max_colwidth', 199) train = time_pr(train) test = time_pr(test) train
code
33104188/cell_12
[ "text_html_output_1.png" ]
from math import sqrt from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold import lightgbm as lgb import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/electrical-consumption/train_6BJx641.csv') test = pd.read_csv('/kaggle/input/electrical-consumption/test_pavJagI.csv') train['datetime'] = pd.to_datetime(train['datetime']) test['datetime'] = pd.to_datetime(test['datetime']) def booleancon(x): if x == True: return 1 else: return 0 def var2(x): if x == 'A': return 1 elif x == 'B': return 2 else: return 3 def time_pr(train): train = add_datepart(train, 'datetime', drop=True, time=True) train['datetimeIs_month_end'] = train['datetimeIs_month_end'].apply(booleancon) train['datetimeIs_month_start'] = train['datetimeIs_month_start'].apply(booleancon) train['datetimeIs_quarter_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_quarter_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['datetimeIs_year_start'] = train['datetimeIs_quarter_start'].apply(booleancon) train['datetimeIs_year_end'] = train['datetimeIs_quarter_end'].apply(booleancon) train['var2'] = train['var2'].apply(var2) return train pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) pd.set_option('display.max_colwidth', 199) train = time_pr(train) test = time_pr(test) col = [] for i in train.columns: if i != 'electricity_consumption' and i != 'ID' and (i != 'datetime'): col.append(i) X = train[col].values Y = train['electricity_consumption'].values import concurrent.futures import lightgbm as lgb import numpy as np from sklearn.model_selection._split import KFold from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix kf = StratifiedKFold(n_splits=10, random_state=None, shuffle=True) index = [] for train_index, test_index in kf.split(X, Y): index.append([train_index, test_index]) def model(train_index, test_index, XX, YY): X_train, X_test = (XX[train_index], XX[test_index]) Y_train, Y_test = (YY[train_index], YY[test_index]) d_train = lgb.Dataset(X_train, label=Y_train) params = {} params['application'] = 'root_mean_squared_error' params['num_boost_round'] = 800 params['learning_rate'] = 0.01 params['boosting_type'] = 'gbdt' params['metric'] = 'rmse' params['sub_feature'] = 0.833 params['num_leaves'] = 207 params['min_split_gain'] = 0.05 params['min_child_weight'] = 27 params['max_depth'] = -1 params['num_threads'] = 10 params['max_bin'] = 50 params['lambda_l2'] = 0.1 params['lambda_l1'] = 0.3 params['feature_fraction'] = 0.833 params['bagging_fraction'] = 0.979 clf = lgb.train(params, d_train, 1000) pred = clf.predict(X_test) from sklearn.metrics import mean_squared_error from math import sqrt rmae = sqrt(mean_squared_error(pred, Y_test)) return rmae with concurrent.futures.ThreadPoolExecutor() as executor: future1 = executor.submit(model, index[0][0], index[0][1], X, Y) return_value1 = future1.result() future2 = executor.submit(model, index[1][0], index[1][1], X, Y) return_value2 = future2.result() future3 = executor.submit(model, index[2][0], index[2][1], X, Y) return_value3 = future3.result() future4 = executor.submit(model, index[3][0], index[3][1], X, Y) return_value4 = future4.result() future5 = executor.submit(model, index[4][0], index[4][1], X, Y) return_value5 = future5.result() future6 = executor.submit(model, index[5][0], index[5][1], X, Y) return_value6 = future6.result() future7 = executor.submit(model, index[6][0], index[6][1], X, Y) return_value7 = future7.result() future8 = executor.submit(model, index[7][0], index[7][1], X, Y) return_value8 = future8.result() future9 = executor.submit(model, index[8][0], index[8][1], X, Y) return_value9 = future9.result() future10 = executor.submit(model, index[9][0], index[9][1], X, Y) return_value10 = future10.result() rmae = [return_value1, return_value2, return_value3, return_value4, return_value5, return_value6, return_value7, return_value8, return_value9, return_value10] print(sum(rmae) / len(rmae))
code
74067618/cell_9
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index netflix = netflix.fillna(method='ffill') netflix.head()
code
74067618/cell_4
[ "text_html_output_1.png" ]
!pip install pycaret-ts-alpha
code
74067618/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix.head()
code
74067618/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.head()
code
74067618/cell_11
[ "text_plain_output_1.png" ]
from sktime.utils.plotting import plot_series import matplotlib.pyplot as plt import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index netflix = netflix.fillna(method='ffill') data = netflix.Close data import matplotlib.pyplot as plt _ = plot_series(data) plt.xticks(rotation=90) plt.grid()
code
74067618/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix.head()
code
74067618/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index
code
74067618/cell_8
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index netflix.head()
code
74067618/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.info()
code
74067618/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index netflix = netflix.fillna(method='ffill') data = netflix.Close data
code
74067618/cell_12
[ "text_plain_output_1.png" ]
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment import pandas as pd import pandas as pd netflix = pd.read_csv('../input/tcs-share/TCS.NS (1).csv') netflix = netflix[['Date', 'Close']] netflix.index = pd.DatetimeIndex(netflix['Date']) netflix.drop(['Date'], axis=1, inplace=True) netflix = netflix.asfreq('d') netflix.index netflix = netflix.fillna(method='ffill') data = netflix.Close data exp = TimeSeriesExperiment() exp.setup(data=data, session_id=42, fh=365)
code
74067618/cell_5
[ "text_html_output_1.png" ]
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment from sktime.utils.plotting import plot_series
code
18140030/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader import matplotlib.image as Image import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torchvision.transforms as transforms import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as Image import os from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms data_dir = '../input' train_dir = data_dir + '/train/train' test_dir = data_dir + '/test/test' labels = pd.read_csv(data_dir + '/train.csv') train, valid = train_test_split(labels, stratify=labels.has_cactus, test_size=0.2) num_epochs = 25 num_classes = 2 batch_size = 128 learning_rate = 0.0001 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device class cactData(Dataset): def __init__(self, split_data, data_root='./', transform=None): super().__init__() self.df = split_data.values self.data_root = data_root self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, index): img_name, label = self.df[index] img_path = os.path.join(self.data_root, img_name) image = Image.imread(img_path) if self.transform is not None: image = self.transform(image) return (image, label) mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] train_transf = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()]) valid_transf = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()]) train_data = cactData(train, train_dir, train_transf) valid_data = cactData(valid, train_dir, valid_transf) train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True, num_workers=0) valid_loader = DataLoader(dataset=valid_data, batch_size=batch_size // 2, shuffle=False, num_workers=0) class CactCNN(nn.Module): def __init__(self): super(CactCNN, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(3, 32, 4, 2, 0), nn.BatchNorm2d(32), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(32, 64, 3, 2, 0), nn.BatchNorm2d(64), nn.ReLU()) self.conv3 = nn.Sequential(nn.Conv2d(64, 128, 3, 2, 0), nn.BatchNorm2d(128), nn.ReLU()) self.conv4 = nn.Sequential(nn.Conv2d(128, 256, 3, 2, 0), nn.BatchNorm2d(256), nn.ReLU()) self.fc = nn.Sequential(nn.Linear(256 * 1 * 1, 1024), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(1024, 2)) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.view(x.shape[0], -1) x = self.fc(x) return x model = CactCNN().to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): images = images.to(device) labels = labels.to(device) out = model(images) loss = criterion(out, labels) optimizer.zero_grad() loss.backward() optimizer.step() print('Epoch: {}/{}, Loss: {}'.format(epoch + 1, num_epochs, loss.item()))
code
18140030/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_dir = '../input' train_dir = data_dir + '/train/train' test_dir = data_dir + '/test/test' labels = pd.read_csv(data_dir + '/train.csv') balance = labels['has_cactus'].value_counts() balance
code
18140030/cell_6
[ "text_plain_output_1.png" ]
import torch num_epochs = 25 num_classes = 2 batch_size = 128 learning_rate = 0.0001 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device
code
18140030/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as Image import os print(os.listdir('../input')) from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms
code
18140030/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_dir = '../input' train_dir = data_dir + '/train/train' test_dir = data_dir + '/test/test' labels = pd.read_csv(data_dir + '/train.csv') labels.head()
code
18140030/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader import matplotlib.image as Image import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torchvision.transforms as transforms import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as Image import os from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms data_dir = '../input' train_dir = data_dir + '/train/train' test_dir = data_dir + '/test/test' labels = pd.read_csv(data_dir + '/train.csv') train, valid = train_test_split(labels, stratify=labels.has_cactus, test_size=0.2) num_epochs = 25 num_classes = 2 batch_size = 128 learning_rate = 0.0001 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device class cactData(Dataset): def __init__(self, split_data, data_root='./', transform=None): super().__init__() self.df = split_data.values self.data_root = data_root self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, index): img_name, label = self.df[index] img_path = os.path.join(self.data_root, img_name) image = Image.imread(img_path) if self.transform is not None: image = self.transform(image) return (image, label) mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] train_transf = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()]) valid_transf = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()]) train_data = cactData(train, train_dir, train_transf) valid_data = cactData(valid, train_dir, valid_transf) train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True, num_workers=0) valid_loader = DataLoader(dataset=valid_data, batch_size=batch_size // 2, shuffle=False, num_workers=0) class CactCNN(nn.Module): def __init__(self): super(CactCNN, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(3, 32, 4, 2, 0), nn.BatchNorm2d(32), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(32, 64, 3, 2, 0), nn.BatchNorm2d(64), nn.ReLU()) self.conv3 = nn.Sequential(nn.Conv2d(64, 128, 3, 2, 0), nn.BatchNorm2d(128), nn.ReLU()) self.conv4 = nn.Sequential(nn.Conv2d(128, 256, 3, 2, 0), nn.BatchNorm2d(256), nn.ReLU()) self.fc = nn.Sequential(nn.Linear(256 * 1 * 1, 1024), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(1024, 2)) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.view(x.shape[0], -1) x = self.fc(x) return x model = CactCNN().to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): images = images.to(device) labels = labels.to(device) out = model(images) loss = criterion(out, labels) optimizer.zero_grad() loss.backward() optimizer.step() model.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in valid_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Test Accuracy: {} %'.format(100 * correct / total))
code
18140030/cell_12
[ "text_html_output_1.png" ]
from torch.utils.data import Dataset, DataLoader import matplotlib.image as Image import os import torch import torch.nn as nn import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as Image import os from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms num_epochs = 25 num_classes = 2 batch_size = 128 learning_rate = 0.0001 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device class cactData(Dataset): def __init__(self, split_data, data_root='./', transform=None): super().__init__() self.df = split_data.values self.data_root = data_root self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, index): img_name, label = self.df[index] img_path = os.path.join(self.data_root, img_name) image = Image.imread(img_path) if self.transform is not None: image = self.transform(image) return (image, label) class CactCNN(nn.Module): def __init__(self): super(CactCNN, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(3, 32, 4, 2, 0), nn.BatchNorm2d(32), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(32, 64, 3, 2, 0), nn.BatchNorm2d(64), nn.ReLU()) self.conv3 = nn.Sequential(nn.Conv2d(64, 128, 3, 2, 0), nn.BatchNorm2d(128), nn.ReLU()) self.conv4 = nn.Sequential(nn.Conv2d(128, 256, 3, 2, 0), nn.BatchNorm2d(256), nn.ReLU()) self.fc = nn.Sequential(nn.Linear(256 * 1 * 1, 1024), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(1024, 2)) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.view(x.shape[0], -1) x = self.fc(x) return x model = CactCNN().to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(device)
code
122256475/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) chart = sns.countplot(x = "Diagnosis", data = Bbox) labels = chart.get_xticklabels() chart.set_xticklabels(labels, rotation = 45, horizontalalignment='right') sns.pairplot(Data_entry)
code
122256475/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry
code
122256475/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) chart = sns.countplot(x='Diagnosis', data=Bbox) labels = chart.get_xticklabels() chart.set_xticklabels(labels, rotation=45, horizontalalignment='right')
code
122256475/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' print(file) Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox
code
122256475/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) chart = sns.countplot(x = "Diagnosis", data = Bbox) labels = chart.get_xticklabels() chart.set_xticklabels(labels, rotation = 45, horizontalalignment='right') sns.violinplot(x='Patient Gender', y='Patient Age', data=Data_entry)
code
122256475/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) chart = sns.countplot(x = "Diagnosis", data = Bbox) labels = chart.get_xticklabels() chart.set_xticklabels(labels, rotation = 45, horizontalalignment='right') sns.violinplot(x='Patient Gender', y='Patient Age', data=Data_entry)
code
122256475/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) Data_entry[Data_entry['Patient Age'] > 120]
code
122256475/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) Bbox.head(5)
code
122256475/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) Data_entry[Data_entry['Patient Age'] > 120]
code
122256475/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox Bbox.rename(columns={'Finding Label': 'Diagnosis'}, inplace=True) path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) chart = sns.countplot(x = "Diagnosis", data = Bbox) labels = chart.get_xticklabels() chart.set_xticklabels(labels, rotation = 45, horizontalalignment='right') sns.pairplot(Bbox)
code
122256475/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/data/' file = path + 'BBox_List_2017.csv' Bbox = pd.read_csv(file) Bbox = Bbox.drop(columns=['Unnamed: 6', 'Unnamed: 7', 'Unnamed: 8']) Bbox path = '/kaggle/input/data/' file = path + 'Data_Entry_2017.csv' Data_entry = pd.read_csv(file) Data_entry = Data_entry.drop(columns=['Unnamed: 11']) Data_entry Data_entry.rename(columns={'Finding Labels': 'Specific Diagnosis'}, inplace=True) Data_entry.head(5)
code
89142178/cell_4
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder, LabelEncoder import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') train_data = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') print(customers['FN'].unique()) print(customers['Active'].unique()) print(customers['club_member_status'].unique()) print(customers['fashion_news_frequency'].unique()) print(customers['age'].unique())
code
89142178/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder, LabelEncoder import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') train_data = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') print('Articles:') print(articles.shape) print(articles.keys()) print(articles.head()) print('') print('Customers:') print(customers.head()) print(customers.shape) print(customers.keys()) print('') print('Train_data:') print(train_data.shape) print(train_data.keys()) print(train_data.head()) print('')
code
89142178/cell_3
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder, LabelEncoder import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') train_data = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') print('Articles:') print(articles.isnull().sum()) print('') print('Customers:') print(customers.isnull().sum()) print('') print('Train_data:') print(train_data.isnull().sum())
code
89142178/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder, LabelEncoder import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns articles = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/customers.csv') train_data = pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_clean = customers customers_clean['FN'].fillna(0, inplace=True) customers_clean['FN'].fillna(0, inplace=True) customers_clean[customers_clean['fashion_news_frequency'] == 'NONE'] = 'None' customers_age_compare = customers_clean.dropna(subset=['age']) customers_age_compare = customers_age_compare[customers_age_compare['age'] != 'None'] print(customers_age_compare['age'].unique()) print('') print(customers_clean['FN'].unique()) print(customers_clean['Active'].unique()) print(customers_clean['club_member_status'].unique()) print(customers_clean['fashion_news_frequency'].unique()) print(customers_clean['age'].unique()) print('') print(customers_clean.head())
code
128042254/cell_21
[ "text_html_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor, VotingRegressor from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() def model(model, x_train, y_train, x_test, y_test): mod = model mod_fit = mod.fit(x_train, y_train) y_pred = mod_fit.predict(x_test) df_model = pd.DataFrame({'Test': y_test, 'Predict': y_pred}) model(GradientBoostingRegressor(), X_train, y_train, X_test, y_test)
code
128042254/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum()
code
128042254/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LassoCV, RidgeCV, LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() def model(model, x_train, y_train, x_test, y_test): mod = model mod_fit = mod.fit(x_train, y_train) y_pred = mod_fit.predict(x_test) df_model = pd.DataFrame({'Test': y_test, 'Predict': y_pred}) model(LinearRegression(), X_train, y_train, X_test, y_test)
code
128042254/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.info()
code
128042254/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() plt.figure(figsize=(16, 16)) sns.heatmap(df.corr(), annot=True, fmt='.1f')
code
128042254/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, RepeatedKFold from sklearn.ensemble import GradientBoostingRegressor, VotingRegressor from sklearn.linear_model import LassoCV, RidgeCV, LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score sns.set(rc={'figure.figsize': [10, 10]}, font_scale=1.2) import warnings warnings.filterwarnings('ignore')
code
128042254/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum()
code
128042254/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df
code
128042254/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() df.columns
code
128042254/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LassoCV, RidgeCV, LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score from sklearn.model_selection import train_test_split, RepeatedKFold import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() def model(model, x_train, y_train, x_test, y_test): mod = model mod_fit = mod.fit(x_train, y_train) y_pred = mod_fit.predict(x_test) df_model = pd.DataFrame({'Test': y_test, 'Predict': y_pred}) model(LassoCV(alphas=np.arange(0, 1, 0.01), cv=RepeatedKFold(n_splits=10, n_repeats=3, random_state=1), n_jobs=-1), X_train, y_train, X_test, y_test)
code
128042254/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() df.describe()
code
128042254/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') df.isnull().sum() df.isnull().sum() sns.barplot(df)
code
128042254/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv') for col in df.columns: print(col + '\n------') print(df[col].value_counts()) print('---------------------')
code
73069551/cell_19
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import plotly.express as px class CustomLinearRegression: def __init__(self, learning_rate=0.1, n_iters=100, regularization=None, regularization_alpha=0.1): self.lr = learning_rate self.n_iters = n_iters self.weights = None self.bias = None self.regularization = regularization self.regularization_alpha = regularization_alpha def fit(self, X, y): n_samples, n_features = X.shape self.weights = np.zeros(n_features) self.bias = 0 for _ in range(self.n_iters): y_predicted = self.predict(X) dw = (X.T * (y_predicted - y)).T.mean(axis=0) db = (y_predicted - y).mean(axis=0) if self.regularization == 'L1': if self.weights.any() != 0: dw += self.regularization_alpha * (self.weights / np.absolute(self.weights)) elif self.regularization == 'L2': dw += self.regularization_alpha * (2 * self.weights) self.weights -= self.lr * dw self.bias -= self.lr * db def predict(self, X): return X @ self.weights + self.bias X, y = datasets.load_diabetes(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) df = pd.DataFrame(columns=['Number of iterations', 'MSE', 'Regularization', 'Average Weights']) for n_iters in range(1, 51): model = CustomLinearRegression(n_iters=n_iters, regularization=None) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'No', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L1', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L1, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=11) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 11', 'Average Weights': average_weights}, ignore_index=True) average_weight = round(model.weights[1:].mean(), 2) fig = px.line(df, x='Number of iterations', y='MSE', color='Regularization', title='Linear Regression models with regularization VS without regularization') X, y = datasets.load_diabetes(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) model = CustomLinearRegression(n_iters=10000) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) print('MSE for our model: {}'.format(MSE)) model = LinearRegression() model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) print('MSE for Sklearn model: {}'.format(MSE))
code
73069551/cell_15
[ "text_html_output_2.png" ]
from sklearn.metrics import mean_squared_error import pandas as pd import plotly.express as px df = pd.DataFrame(columns=['Number of iterations', 'MSE', 'Regularization', 'Average Weights']) for n_iters in range(1, 51): model = CustomLinearRegression(n_iters=n_iters, regularization=None) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'No', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L1', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L1, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=11) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 11', 'Average Weights': average_weights}, ignore_index=True) average_weight = round(model.weights[1:].mean(), 2) fig = px.line(df, x='Number of iterations', y='MSE', color='Regularization', title='Linear Regression models with regularization VS without regularization') fig.show()
code
73069551/cell_17
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error import pandas as pd import plotly.express as px df = pd.DataFrame(columns=['Number of iterations', 'MSE', 'Regularization', 'Average Weights']) for n_iters in range(1, 51): model = CustomLinearRegression(n_iters=n_iters, regularization=None) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'No', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L1', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L1, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=1) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 0.1', 'Average Weights': average_weights}, ignore_index=True) model = CustomLinearRegression(n_iters=n_iters, regularization='L2', regularization_alpha=11) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) average_weights = model.weights.mean() df = df.append({'Number of iterations': n_iters, 'MSE': MSE, 'Regularization': 'L2, alpha: 11', 'Average Weights': average_weights}, ignore_index=True) average_weight = round(model.weights[1:].mean(), 2) fig = px.line(df, x='Number of iterations', y='MSE', color='Regularization', title='Linear Regression models with regularization VS without regularization') df.tail(4)
code
72117495/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from functools import partial from sklearn.feature_selection import f_regression, f_classif from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data_x = train.loc[:, [col for col in train.columns if col not in ['Survived']]] data_y = train.loc[:, ['Survived']] test_x = test.loc[:, [col for col in train.columns if col not in ['Survived']]] all_cols = list(data_x.columns) # Exploratory Data Analysis # Col-wise univ_numeric = train_x.describe().T numeric_cols = univ_numeric.index.to_list() cat_cols = [col for col in all_cols if col not in numeric_cols] univ_numeric['missing_perc'] = 1 - univ_numeric['count'] / train_x.shape[0] univ_numeric['cov'] = univ_numeric['std'] / univ_numeric['mean'] univ_numeric['skew'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.skew(), axis=0) univ_numeric['kurtosis'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.kurtosis(), axis=0) def return_outlier(ser, threshold=3, return_mask=False): not_missing = ~pd.isna(ser) z = (ser - ser.mean()) / ser.std() out_mask = z.apply(lambda x: not -threshold < x < +threshold) if not return_mask: out_mask = out_mask * not_missing out_mask.sum() / not_missing.sum() return out_mask.sum() / not_missing.sum() else: return out_mask univ_numeric['outlier_perc'] = train_x.loc[:, numeric_cols].apply(func=return_outlier, axis=0) out_mask = train_x.loc[:, numeric_cols].apply(func=partial(return_outlier, return_mask=True), axis=0) univ_categorical = train_x.loc[:, cat_cols].describe().T univ_categorical['missing_perc'] = 1 - univ_categorical['count'] / train_x.shape[0] univ_categorical['mode_freq_perc'] = univ_categorical['freq'] / univ_categorical['count'] def profile_importance_numeric(ser): non_missing_mask = ~pd.isna(ser) freg = f_regression(X=ser.loc[non_missing_mask].values.reshape(-1, 1), y=train_y.loc[non_missing_mask].values.ravel(), center=True) return freg[1][0] def profile_importance_categorical(ser): non_missing_mask = ~pd.isna(ser) x = OneHotEncoder().fit_transform(ser.loc[non_missing_mask].values.reshape(-1, 1)) y = train_y.loc[non_missing_mask].values.ravel() freg = f_classif(X=x, y=y) return freg[1][0] bivariate_importance_numeric = {col: profile_importance_numeric(train_x[col]) for col in numeric_cols} bivariate_importance_categorical = {col: profile_importance_categorical(train_x[col]) for col in cat_cols} correlation = train_x.corr() def numeric_categorical(cat, num): non_missing_cat = ~pd.isna(cat) non_missing_num = ~pd.isna(num) non_missing_mask = non_missing_cat * non_missing_num x = OneHotEncoder(sparse=False).fit_transform(cat.loc[non_missing_mask].values.reshape(-1, 1)) y = num.loc[non_missing_mask].values.ravel() freg = f_classif(X=x, y=y) return freg[1][0] bivariate_importance_categorical_xxs = pd.DataFrame({cat_col: [numeric_categorical(cat=train_x[cat_col], num=train_x[numeric_col]) for numeric_col in numeric_cols] for cat_col in cat_cols}, index=numeric_cols) bivariate_importance_categorical_xxs
code
72117495/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from functools import partial from sklearn.feature_selection import f_regression, f_classif from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data_x = train.loc[:, [col for col in train.columns if col not in ['Survived']]] data_y = train.loc[:, ['Survived']] test_x = test.loc[:, [col for col in train.columns if col not in ['Survived']]] all_cols = list(data_x.columns) # Exploratory Data Analysis # Col-wise univ_numeric = train_x.describe().T numeric_cols = univ_numeric.index.to_list() cat_cols = [col for col in all_cols if col not in numeric_cols] univ_numeric['missing_perc'] = 1 - univ_numeric['count'] / train_x.shape[0] univ_numeric['cov'] = univ_numeric['std'] / univ_numeric['mean'] univ_numeric['skew'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.skew(), axis=0) univ_numeric['kurtosis'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.kurtosis(), axis=0) def return_outlier(ser, threshold=3, return_mask=False): not_missing = ~pd.isna(ser) z = (ser - ser.mean()) / ser.std() out_mask = z.apply(lambda x: not -threshold < x < +threshold) if not return_mask: out_mask = out_mask * not_missing out_mask.sum() / not_missing.sum() return out_mask.sum() / not_missing.sum() else: return out_mask univ_numeric['outlier_perc'] = train_x.loc[:, numeric_cols].apply(func=return_outlier, axis=0) out_mask = train_x.loc[:, numeric_cols].apply(func=partial(return_outlier, return_mask=True), axis=0) univ_categorical = train_x.loc[:, cat_cols].describe().T univ_categorical['missing_perc'] = 1 - univ_categorical['count'] / train_x.shape[0] univ_categorical['mode_freq_perc'] = univ_categorical['freq'] / univ_categorical['count'] def profile_importance_numeric(ser): non_missing_mask = ~pd.isna(ser) freg = f_regression(X=ser.loc[non_missing_mask].values.reshape(-1, 1), y=train_y.loc[non_missing_mask].values.ravel(), center=True) return freg[1][0] def profile_importance_categorical(ser): non_missing_mask = ~pd.isna(ser) x = OneHotEncoder().fit_transform(ser.loc[non_missing_mask].values.reshape(-1, 1)) y = train_y.loc[non_missing_mask].values.ravel() freg = f_classif(X=x, y=y) return freg[1][0] bivariate_importance_numeric = {col: profile_importance_numeric(train_x[col]) for col in numeric_cols} bivariate_importance_categorical = {col: profile_importance_categorical(train_x[col]) for col in cat_cols} bivariate_importance_categorical
code
72117495/cell_17
[ "text_plain_output_1.png" ]
from functools import partial from sklearn.feature_selection import f_regression, f_classif from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data_x = train.loc[:, [col for col in train.columns if col not in ['Survived']]] data_y = train.loc[:, ['Survived']] test_x = test.loc[:, [col for col in train.columns if col not in ['Survived']]] all_cols = list(data_x.columns) # Exploratory Data Analysis # Col-wise univ_numeric = train_x.describe().T numeric_cols = univ_numeric.index.to_list() cat_cols = [col for col in all_cols if col not in numeric_cols] univ_numeric['missing_perc'] = 1 - univ_numeric['count'] / train_x.shape[0] univ_numeric['cov'] = univ_numeric['std'] / univ_numeric['mean'] univ_numeric['skew'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.skew(), axis=0) univ_numeric['kurtosis'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.kurtosis(), axis=0) def return_outlier(ser, threshold=3, return_mask=False): not_missing = ~pd.isna(ser) z = (ser - ser.mean()) / ser.std() out_mask = z.apply(lambda x: not -threshold < x < +threshold) if not return_mask: out_mask = out_mask * not_missing out_mask.sum() / not_missing.sum() return out_mask.sum() / not_missing.sum() else: return out_mask univ_numeric['outlier_perc'] = train_x.loc[:, numeric_cols].apply(func=return_outlier, axis=0) out_mask = train_x.loc[:, numeric_cols].apply(func=partial(return_outlier, return_mask=True), axis=0) univ_categorical = train_x.loc[:, cat_cols].describe().T univ_categorical['missing_perc'] = 1 - univ_categorical['count'] / train_x.shape[0] univ_categorical['mode_freq_perc'] = univ_categorical['freq'] / univ_categorical['count'] def profile_importance_numeric(ser): non_missing_mask = ~pd.isna(ser) freg = f_regression(X=ser.loc[non_missing_mask].values.reshape(-1, 1), y=train_y.loc[non_missing_mask].values.ravel(), center=True) return freg[1][0] def profile_importance_categorical(ser): non_missing_mask = ~pd.isna(ser) x = OneHotEncoder().fit_transform(ser.loc[non_missing_mask].values.reshape(-1, 1)) y = train_y.loc[non_missing_mask].values.ravel() freg = f_classif(X=x, y=y) return freg[1][0] bivariate_importance_numeric = {col: profile_importance_numeric(train_x[col]) for col in numeric_cols} bivariate_importance_categorical = {col: profile_importance_categorical(train_x[col]) for col in cat_cols} correlation = train_x.corr() def numeric_categorical(cat, num): non_missing_cat = ~pd.isna(cat) non_missing_num = ~pd.isna(num) non_missing_mask = non_missing_cat * non_missing_num x = OneHotEncoder(sparse=False).fit_transform(cat.loc[non_missing_mask].values.reshape(-1, 1)) y = num.loc[non_missing_mask].values.ravel() freg = f_classif(X=x, y=y) return freg[1][0] bivariate_importance_categorical_xxs = pd.DataFrame({cat_col: [numeric_categorical(cat=train_x[cat_col], num=train_x[numeric_col]) for numeric_col in numeric_cols] for cat_col in cat_cols}, index=numeric_cols)
code
72117495/cell_12
[ "text_plain_output_1.png" ]
from functools import partial import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data_x = train.loc[:, [col for col in train.columns if col not in ['Survived']]] data_y = train.loc[:, ['Survived']] test_x = test.loc[:, [col for col in train.columns if col not in ['Survived']]] all_cols = list(data_x.columns) # Exploratory Data Analysis # Col-wise univ_numeric = train_x.describe().T numeric_cols = univ_numeric.index.to_list() cat_cols = [col for col in all_cols if col not in numeric_cols] univ_numeric['missing_perc'] = 1 - univ_numeric['count'] / train_x.shape[0] univ_numeric['cov'] = univ_numeric['std'] / univ_numeric['mean'] univ_numeric['skew'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.skew(), axis=0) univ_numeric['kurtosis'] = train_x.loc[:, numeric_cols].apply(func=lambda x: x.kurtosis(), axis=0) def return_outlier(ser, threshold=3, return_mask=False): not_missing = ~pd.isna(ser) z = (ser - ser.mean()) / ser.std() out_mask = z.apply(lambda x: not -threshold < x < +threshold) if not return_mask: out_mask = out_mask * not_missing out_mask.sum() / not_missing.sum() return out_mask.sum() / not_missing.sum() else: return out_mask univ_numeric['outlier_perc'] = train_x.loc[:, numeric_cols].apply(func=return_outlier, axis=0) out_mask = train_x.loc[:, numeric_cols].apply(func=partial(return_outlier, return_mask=True), axis=0)
code
72117495/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data_x = train.loc[:, [col for col in train.columns if col not in ['Survived']]] data_y = train.loc[:, ['Survived']] test_x = test.loc[:, [col for col in train.columns if col not in ['Survived']]] data_x.info()
code
129007856/cell_9
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model import numpy as np import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/flicktime/rating.csv') import pandas as pd import numpy as np import tensorflow as tf from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model n_users = data.userId.nunique() n_movies = data.movieId.nunique() user_ids = data.userId.astype('category').cat.codes.values movie_ids = data.movieId.astype('category').cat.codes.values ratings = data.rating.values user_item_matrix = np.zeros((n_users, n_movies)) for i in range(len(ratings)): user_item_matrix[user_ids[i], movie_ids[i]] = ratings[i] embedding_size = 64 user_input = Input(shape=(1,)) user_embed = Embedding(n_users, embedding_size)(user_input) user_embed = Flatten()(user_embed) movie_input = Input(shape=(1,)) movie_embed = Embedding(n_movies, embedding_size)(movie_input) movie_embed = Flatten()(movie_embed) dot_product = Dot(axes=1)([user_embed, movie_embed]) dense1 = Dense(64, activation='relu')(dot_product) dropout1 = Dropout(0.2)(dense1) dense2 = Dense(32, activation='relu')(dropout1) dropout2 = Dropout(0.2)(dense2) output = Dense(1)(dropout2) model = Model(inputs=[user_input, movie_input], outputs=output) model.compile(loss='mse', optimizer='adam') user_ids_test = np.random.choice(n_users, 10) movie_ids_test = np.random.choice(n_movies, 10) predictions = model.predict([user_ids_test, movie_ids_test]).flatten() print(predictions)
code
129007856/cell_4
[ "text_plain_output_1.png" ]
history = model.fit([user_ids, movie_ids], ratings, batch_size=128, epochs=5, validation_split=0.2)
code
129007856/cell_6
[ "text_plain_output_1.png" ]
import gc import gc gc.collect()
code
129007856/cell_11
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model import numpy as np import numpy as np import pandas as pd import pandas as pd import pickle data = pd.read_csv('/kaggle/input/flicktime/rating.csv') import pandas as pd import numpy as np import tensorflow as tf from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model n_users = data.userId.nunique() n_movies = data.movieId.nunique() user_ids = data.userId.astype('category').cat.codes.values movie_ids = data.movieId.astype('category').cat.codes.values ratings = data.rating.values user_item_matrix = np.zeros((n_users, n_movies)) for i in range(len(ratings)): user_item_matrix[user_ids[i], movie_ids[i]] = ratings[i] embedding_size = 64 user_input = Input(shape=(1,)) user_embed = Embedding(n_users, embedding_size)(user_input) user_embed = Flatten()(user_embed) movie_input = Input(shape=(1,)) movie_embed = Embedding(n_movies, embedding_size)(movie_input) movie_embed = Flatten()(movie_embed) dot_product = Dot(axes=1)([user_embed, movie_embed]) dense1 = Dense(64, activation='relu')(dot_product) dropout1 = Dropout(0.2)(dense1) dense2 = Dense(32, activation='relu')(dropout1) dropout2 = Dropout(0.2)(dense2) output = Dense(1)(dropout2) model = Model(inputs=[user_input, movie_input], outputs=output) model.compile(loss='mse', optimizer='adam') user_ids_test = np.random.choice(n_users, 10) movie_ids_test = np.random.choice(n_movies, 10) predictions = model.predict([user_ids_test, movie_ids_test]).flatten() import pickle pickle.dump(model, open('model_rating.pkl', 'wb'))
code
129007856/cell_7
[ "image_output_1.png" ]
import ctypes import ctypes libc = ctypes.CDLL('libc.so.6') libc.malloc_trim(0)
code
129007856/cell_3
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model import numpy as np import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('/kaggle/input/flicktime/rating.csv') import pandas as pd import numpy as np import tensorflow as tf from tensorflow.keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout from tensorflow.keras.models import Model n_users = data.userId.nunique() n_movies = data.movieId.nunique() user_ids = data.userId.astype('category').cat.codes.values movie_ids = data.movieId.astype('category').cat.codes.values ratings = data.rating.values user_item_matrix = np.zeros((n_users, n_movies)) for i in range(len(ratings)): user_item_matrix[user_ids[i], movie_ids[i]] = ratings[i] embedding_size = 64 user_input = Input(shape=(1,)) user_embed = Embedding(n_users, embedding_size)(user_input) user_embed = Flatten()(user_embed) movie_input = Input(shape=(1,)) movie_embed = Embedding(n_movies, embedding_size)(movie_input) movie_embed = Flatten()(movie_embed) dot_product = Dot(axes=1)([user_embed, movie_embed]) dense1 = Dense(64, activation='relu')(dot_product) dropout1 = Dropout(0.2)(dense1) dense2 = Dense(32, activation='relu')(dropout1) dropout2 = Dropout(0.2)(dense2) output = Dense(1)(dropout2) model = Model(inputs=[user_input, movie_input], outputs=output) model.compile(loss='mse', optimizer='adam')
code
129007856/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.plot(history.history['loss'], label='train_loss') plt.plot(history.history['val_loss'], label='val_loss') plt.title('Model Learning Curve') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show()
code
49117653/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['rpt_day'].nunique()
code
49117653/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv')
code
49117653/cell_55
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours r_months = df.groupby('occur_month').count() r_months r_neighbors = df.groupby('neighborhood').count() r_neighbors rn = r_neighbors['offense_id'].reset_index() rn rn = rn.sort_values(by=['offense_id'], ascending=False) rn = rn.head(15) rn
code
49117653/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1
code
49117653/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['rpt_year'].nunique()
code
49117653/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['occur_hour'].nunique()
code
49117653/cell_48
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours rh = r_hours['offense_id'].reset_index() plt.plot(rh['occur_hour'], rh['offense_id'], 'b-.') plt.title('# of Crime Occurred Over Hours') plt.xlabel('Hours') plt.ylabel('# of Crime') plt.xticks(rh['occur_hour'], size=11) plt.yticks(size=11) plt.show()
code
49117653/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['rpt_month'].nunique()
code
49117653/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import folium from folium.plugins import MarkerCluster import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49117653/cell_50
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours r_months = df.groupby('occur_month').count() r_months rm = r_months['offense_id'].reset_index() rm
code
49117653/cell_52
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours r_months = df.groupby('occur_month').count() r_months r_neighbors = df.groupby('neighborhood').count() r_neighbors
code
49117653/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df2
code
49117653/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours
code
49117653/cell_49
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours r_months = df.groupby('occur_month').count() r_months
code
49117653/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3['occur_time'].value_counts()
code
49117653/cell_32
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df.info()
code
49117653/cell_51
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours rh = r_hours['offense_id'].reset_index() plt.xticks(rh['occur_hour'], size=11) plt.yticks(size=11) r_months = df.groupby('occur_month').count() r_months rm = r_months['offense_id'].reset_index() rm plt.plot(rm['occur_month'], rm['offense_id'], 'b--') plt.title('# of Crime Occurred Over Months') plt.xlabel('Months') plt.ylabel('# of Crime') plt.xticks(rm['occur_month'], size=11) plt.yticks(size=11) plt.show()
code
49117653/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df3
code
49117653/cell_38
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['occur_day'].nunique()
code
49117653/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours rh = r_hours['offense_id'].reset_index() rh
code
49117653/cell_35
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['occur_year'].value_counts()
code
49117653/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['neighborhood'].value_counts()
code
49117653/cell_53
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) r_hours = df.groupby('occur_hour').count() r_hours r_months = df.groupby('occur_month').count() r_months r_neighbors = df.groupby('neighborhood').count() r_neighbors rn = r_neighbors['offense_id'].reset_index() rn
code
49117653/cell_27
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df
code
49117653/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020-OldRMS-09292020 (Corrected 11_25_20)/COBRA-2020-OldRMS-09292020.csv') df2 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2020 (Updated 12_10_2020)/COBRA-2020.csv') df3 = pd.read_csv('../input/atlanta-crime-data2020/COBRA-2009-2019 (Updated 1_9_2020)/COBRA-2009-2019.csv') df1 = df1.drop(columns=['apartment_office_prefix', 'apartment_number', 'watch', 'location_type', 'UCR_Number']) df2 = df2.drop(columns=['ibr_code']) df3 = df3.drop(columns=['Apartment Office Prefix', 'Apartment Number', 'Shift Occurence', 'Location Type', 'UCR #', 'IBR Code']) df3.columns = df1.columns df3 = df3[df3['occur_time'].apply(lambda x: str(x).isdecimal())] df3 = df3[df3['occur_hour'].apply(lambda x: str(x).isnumeric())] keys = df1.columns df1 = df1.dropna(subset=keys).reset_index(drop=True) df2 = df2.dropna(subset=keys).reset_index(drop=True) df3 = df3.dropna(subset=keys).reset_index(drop=True) df = pd.concat([df1, df2, df3]).reset_index(drop=True) df.to_csv('df.csv', index=False) df = df.astype({'occur_year': 'int32', 'occur_month': 'int32', 'occur_day': 'int32', 'occur_hour': 'int32', 'rpt_year': 'int32', 'rpt_month': 'int32', 'rpt_day': 'int32'}) df['occur_month'].nunique()
code
73082264/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T
code
73082264/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.info()
code
73082264/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T def summary(df): Types = df.dtypes Counts = df.apply(lambda x: x.count()) Uniques = df.apply(lambda x: x.unique().shape[0]) Nulls = df.apply(lambda x: x.isnull().sum()) cols = ['Types', 'Counts', 'Uniques', 'Nulls'] str = pd.concat([Types, Counts, Uniques, Nulls], axis=1, sort=True) str.columns = cols summary(df) col = df.columns.to_list() catcol = [_ for _ in col if df[_].nunique() < 30] termination_reasion_code = df.term_reas_code.unique() termination_reasion_code_description = df.term_reas_desc.unique() termination_reasion = dict(zip(termination_reasion_code, termination_reasion_code_description)) termination_reasion df.drop(columns=['bill_cycl', 'serv_type', 'serv_code', 'term_reas_desc'], inplace=True) miss = df.isnull().sum().sort_values(ascending=False) miss_per = miss / len(df) * 100 pd.DataFrame({'Missing_Records': miss, 'Percentage of Missing_Data': miss_per.values})
code
73082264/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import missingno as msno import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code