path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72111100/cell_11
[ "text_html_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train.head(2)
code
72111100/cell_18
[ "image_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] train.head(3)
code
72111100/cell_32
[ "text_plain_output_1.png" ]
from math import sqrt from sklearn.linear_model import LinearRegression as LR, Perceptron from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler from sklearn.svm import SVR train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] X_train = train.drop('selling_price', axis=1).values[0:6850] y_train = train['selling_price'].values[0:6850] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import * from math import sqrt mcc = make_scorer(mean_absolute_error) def evaluate_model(model): model = model import sklearn scores = cross_val_score(model, scaled_X_train, y_train, scoring=mcc, cv=5, n_jobs=-1) return scores.mean() models = [KNR(), RNR(), LR(), RFR(n_estimators=300), Perceptron(), SVR(), MLPR()] models_names = ['K_neighbors', 'radius_neighbors', 'linear_regression', 'random_forest_regressor', 'perceptron', 'SVR', 'MLP_Regression'] scores = list() for clf, clf_name in zip(models, models_names): k_mean = evaluate_model(clf) scores.append(k_mean) model = models[3] model.fit(scaled_X_train, y_train) train_prediction = model.predict(scaled_X_train) train_pred = [int(x) for x in train_prediction.round()] train_prediction = np.array(train_pred) def transform(df): brand = [x.split(' ')[0] for x in list(df['name'])] df.insert(0, 'brand', brand) df.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1, inplace=True) df['engine'] = [int(x.split(' ')[0]) for x in list(df['engine'])] df['mileage'] = [float(x.split(' ')[0]) for x in list(df['mileage'])] df['max_power'] = [float(x.split(' ')[0]) for x in list(df['max_power'])] df['transmission'] = [0 if x == 'Manual' else 1 for x in df['transmission']] df['brand'] = [0 if x <= 1000000 else 1 if x <= 2000000 else 2 if x <= 4000000 else 3 for x in df['selling_price']] X = df.drop('selling_price', axis=1).values y = df['selling_price'].values from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) test_prediction = model.predict(X_test) test_pred = [int(x) for x in test_prediction.round()] test_prediction = np.array(test_pred) print('Test R.M.S.E : ', sqrt(mean_squared_error(y_test, test_prediction)))
code
72111100/cell_28
[ "text_plain_output_1.png" ]
from math import sqrt from sklearn.linear_model import LinearRegression as LR, Perceptron from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.svm import SVR train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] X_train = train.drop('selling_price', axis=1).values[0:6850] y_train = train['selling_price'].values[0:6850] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import * from math import sqrt mcc = make_scorer(mean_absolute_error) def evaluate_model(model): model = model import sklearn scores = cross_val_score(model, scaled_X_train, y_train, scoring=mcc, cv=5, n_jobs=-1) return scores.mean() models = [KNR(), RNR(), LR(), RFR(n_estimators=300), Perceptron(), SVR(), MLPR()] models_names = ['K_neighbors', 'radius_neighbors', 'linear_regression', 'random_forest_regressor', 'perceptron', 'SVR', 'MLP_Regression'] scores = list() for clf, clf_name in zip(models, models_names): k_mean = evaluate_model(clf) scores.append(k_mean) model = models[3] model.fit(scaled_X_train, y_train) train_prediction = model.predict(scaled_X_train) train_pred = [int(x) for x in train_prediction.round()] train_prediction = np.array(train_pred) print('Train R.M.S.E : ', sqrt(mean_squared_error(y_train, train_prediction)))
code
72111100/cell_16
[ "image_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] train.head(3)
code
72111100/cell_3
[ "text_plain_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') train.head(2)
code
72111100/cell_24
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression as LR, Perceptron from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.svm import SVR train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] X_train = train.drop('selling_price', axis=1).values[0:6850] y_train = train['selling_price'].values[0:6850] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_X_train = scaler.fit_transform(X_train) from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import * from math import sqrt mcc = make_scorer(mean_absolute_error) def evaluate_model(model): model = model import sklearn scores = cross_val_score(model, scaled_X_train, y_train, scoring=mcc, cv=5, n_jobs=-1) return scores.mean() models = [KNR(), RNR(), LR(), RFR(n_estimators=300), Perceptron(), SVR(), MLPR()] models_names = ['K_neighbors', 'radius_neighbors', 'linear_regression', 'random_forest_regressor', 'perceptron', 'SVR', 'MLP_Regression'] scores = list() for clf, clf_name in zip(models, models_names): k_mean = evaluate_model(clf) print(f'score of {clf_name} : ', round(k_mean, 3)) scores.append(k_mean)
code
72111100/cell_14
[ "text_html_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') names = [x.split(' ')[0] for x in list(train['name'])] train.insert(0, 'brand', names) train = train.drop(['name', 'seller_type', 'owner', 'torque', 'fuel'], axis=1) train['engine'] = [int(x.split(' ')[0]) for x in list(train['engine'])] train['mileage'] = [float(x.split(' ')[0]) for x in list(train['mileage'])] train['max_power'] = [float(x.split(' ')[0]) for x in list(train['max_power'])] num_features = [x for x in train.columns if type(train[x][0]) is not str] cat_features = [x for x in train.columns if x not in num_features] import seaborn as sns, matplotlib.pyplot as plt import seaborn as sns, matplotlib.pyplot as plt sns.barplot(x=train['transmission'], y=train['selling_price']) plt.show()
code
72111100/cell_5
[ "text_plain_output_1.png" ]
train = pd.read_csv('../input/car-price/train set.csv') test = pd.read_csv('../input/car-price/test set.csv') test.head(2)
code
74067093/cell_7
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense, Dropout from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras.metrics import AUC from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras.models import Sequential img_augmentation = Sequential([preprocessing.CenterCrop(224, 224)], name='img_augmentation') from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense, Dropout from tensorflow.keras.layers.experimental.preprocessing import CenterCrop from tensorflow.keras.models import Model from tensorflow.keras.metrics import AUC auc = AUC() inputs = Input(shape=(299, 299, 3)) x = img_augmentation(inputs) x = EfficientNetB0(weights='imagenet', include_top=False)(x) x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(rate=0.25)(x) outputs = Dense(1, activation='sigmoid')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[auc]) model.summary()
code
17138373/cell_4
[ "image_output_1.png" ]
import pandas as pd dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.head()
code
17138373/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.isnull().sum()
code
17138373/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.isnull().sum() produtosmaisComprados = dfBlackFriday['Product_ID'].value_counts().head(10) produtosmaisComprados.plot(kind='bar', title='10 Produtos mais comprados') plt.xlabel('Produtos') plt.ylabel('Quantidade')
code
17138373/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.isnull().sum() sns.violinplot(dfBlackFriday['Age'].sort_values(), dfBlackFriday['Purchase'], data=dfBlackFriday) plt.show()
code
17138373/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.isnull().sum() dfBlackFridayCons = dfBlackFriday.query('Purchase > 9000') sns.violinplot(dfBlackFridayCons['Marital_Status'], dfBlackFridayCons['Occupation'], data=dfBlackFridayCons)
code
17138373/cell_10
[ "text_html_output_1.png" ]
import pandas as pd dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.isnull().sum() dfBlackFriday['Product_ID'].value_counts()
code
17138373/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd dfBlackFriday = pd.read_csv('../input/BlackFriday.csv', delimiter=',') dfBlackFriday.describe()
code
122252864/cell_13
[ "text_html_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape
code
122252864/cell_9
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) sns.set(rc={'figure.figsize': (13, 5)}) sns.barplot(x='attacker_king', y='attacker_size', data=battle) plt.show()
code
122252864/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape
code
122252864/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) battle.head()
code
122252864/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) sns.set(rc={'figure.figsize': (13, 5)}) sns.set(rc={'figure.figsize': (13, 5)}) sns.countplot(x=battle['attacker_king'], hue=battle['battle_type']) plt.show()
code
122252864/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) battle['attacker_king'].value_counts()
code
122252864/cell_18
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) sns.set(rc={'figure.figsize': (13, 5)}) sns.set(rc={'figure.figsize': (13, 5)}) death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape sns.set(rc={'figure.figsize': (30, 10)}) sns.countplot(data=death, x='Allegiances', width=0.8) plt.show()
code
122252864/cell_8
[ "image_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) battle['location'].value_counts()
code
122252864/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape death['Nobility'].value_counts()
code
122252864/cell_16
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape death['Death Year'].value_counts()
code
122252864/cell_3
[ "text_html_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.head()
code
122252864/cell_17
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) sns.set(rc={'figure.figsize': (13, 5)}) sns.set(rc={'figure.figsize': (13, 5)}) death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape sns.countplot(data=death, x='Death Year') plt.show()
code
122252864/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.shape death['Gender'].value_counts()
code
122252864/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.rename(columns={'defender_1': 'primary_defender'}, inplace=True) sns.set(rc={'figure.figsize': (13, 5)}) sns.set(rc={'figure.figsize': (13, 5)}) sns.barplot(x='defender_king', y='defender_size', data=battle) plt.show()
code
122252864/cell_12
[ "text_html_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') death = pd.read_csv('/kaggle/input/character-deathscsv/character-deaths.csv') death.head()
code
122252864/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd battle = pd.read_csv('/kaggle/input/games-of-thrones/battles.csv') battle.shape battle.rename(columns={'attacker_1': 'primary_attacker'}, inplace=True) battle.head()
code
129020918/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from matplotlib import patches, patheffects from torch.utils.data import ConcatDataset from torch.utils.data import Dataset from torch.utils.data import Dataset, DataLoader from torch.utils.data import Subset from torchvision import datasets, transforms from torchvision.transforms.functional import to_tensor from tqdm.notebook import trange, tqdm import math import matplotlib.pyplot as plt import numpy as np import os import torch as tc import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.models as models import torchvision.models as models import xml.etree.ElementTree as ET imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) def one_epoch(net, loss, dl, opt=None, metric=None): if opt: net.train() # only affects some layers else: net.eval() L, M = [], [] dl_it = iter(dl) for xb, yb in tqdm(dl_it, leave=False): xb = xb.cuda() if not isinstance(yb, list): yb = [yb] # this is new(!) yb = [yb_.cuda() for yb_ in yb] if opt: y_ = net(xb) l = loss(y_, yb) opt.zero_grad() l.backward() opt.step() else: with tc.no_grad(): y_ = net(xb) l = loss(y_, yb) L.append(l.detach().cpu().numpy()) if isinstance(metric, list): for m in metric: M.append(m(tc.sigmoid(y_), yb[0])) elif metric: M.append(metric(tc.sigmoid(y_), yb[0])) return L, M def fit(net, tr_dl, val_dl, loss=nn.CrossEntropyLoss(), epochs=3, lr=3e-3, wd=1e-3, plot=True): opt = optim.Adam(net.parameters(), lr=lr, weight_decay=wd) Ltr_hist, Lval_hist = [], [] for epoch in trange(epochs): Ltr, _ = one_epoch(net, loss, tr_dl, opt) Lval, Aval = one_epoch(net, loss, val_dl, None, batch_iou) Ltr_hist.append(np.mean(Ltr)) Lval_hist.append(np.mean(Lval)) #print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f} \tvalidation accuracy: {mean(Aval):0.2f}') print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f}, overlap accuracy: {np.array(Aval).mean():0.2f}') # plot the losses if plot: _,ax = plt.subplots(1,1,figsize=(16,4)) ax.plot(1+np.arange(len(Ltr_hist)),Ltr_hist) ax.plot(1+np.arange(len(Lval_hist)),Lval_hist) ax.grid('on') ax.set_xlim(left=1, right=len(Ltr_hist)) ax.legend(['training loss', 'validation loss']); return Ltr_hist, Lval_hist def denorm(x, stats=imagenet_stats): return x * tc.Tensor(stats[1])[:,None,None] + tc.Tensor(stats[0])[:,None,None] transform = transforms.Compose([ transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)], p=1), transforms.ToTensor()]) def draw_rect(ax, xy, w, h): patch = ax.add_patch(patches.Rectangle(xy, w, h, fill=False, edgecolor='yellow', lw=2)) patch.set_path_effects([patheffects.Stroke(linewidth=6, foreground='black'), patheffects.Normal()]) def _freeze(md, fr=True): ch = list(md.children()) for c in ch: _freeze(c, fr) if not ch and not isinstance(md, tc.nn.modules.batchnorm.BatchNorm2d): # not freezing the BatchNorm layers! for p in md.parameters(): p.requires_grad = not fr def freeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, True) def unfreeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, False) def calculate_iou(box_true, box_pred): x1_tr, y1_tr, w_tr, h_tr = box_true x1_pr, y1_pr, w_pr, h_pr = box_pred intersection_x1 = max(x1_tr, x1_pr) intersection_y1 = max(y1_tr, y1_pr) intersection_x2 = min(x1_tr + w_tr, x1_pr + w_pr) intersection_y2 = min(y1_tr + h_tr, y1_pr + h_pr) intersection_w = max(0, intersection_x2 - intersection_x1) intersection_h = max(0, intersection_y2 - intersection_y1) intersection_area = intersection_w * intersection_h area_true = w_tr * h_tr area_pred = w_pr * h_pr union_area = area_true + area_pred - intersection_area iou = intersection_area / union_area return iou def batch_iou(box_true_batch, box_pred_batch): iou_list = [] for box_true, box_pred in zip(box_true_batch, box_pred_batch): iou = calculate_iou(box_true, box_pred) iou_list.append(iou.item()) iou_array = np.array(iou_list).mean() return iou_array class Data2tensor(Dataset): def __init__(self, data_dir, transforms=None): self.data_dir = data_dir self.transforms = transforms self.img_paths = [] self.xml_paths = [] for filename in os.listdir(data_dir): name, ext = os.path.splitext(filename) if ext == '.jpg': img_path = os.path.join(data_dir, filename) xml_path = os.path.join(data_dir, name + '.xml') if os.path.isfile(xml_path): self.img_paths.append(img_path) self.xml_paths.append(xml_path) def __len__(self): return len(self.img_paths) def __getitem__(self, idx): img_path = self.img_paths[idx] xml_path = self.xml_paths[idx] img = Image.open(img_path).convert('RGB') w, h = img.size tree = ET.parse(xml_path) root = tree.getroot() bboxes = [] for obj in root.findall('object'): bbox = obj.find('bndbox') xmin = int(bbox.find('xmin').text) ymin = int(bbox.find('ymin').text) xmax = int(bbox.find('xmax').text) ymax = int(bbox.find('ymax').text) y_bbox = np.array([xmin, ymin, xmax, ymax]) y_bbox = y_bbox / np.array([w, h, w, h]) y_bbox = [*y_bbox[:2], *y_bbox[2:] - y_bbox[:2]] bboxes.append(y_bbox) if self.transforms: img = self.transforms(img) else: img = to_tensor(img) bboxes = [tc.tensor(bbox, dtype=tc.float32) for bbox in bboxes] return (img, bboxes) def ds_train_val(train_dir, val_dir, p, transforms): ds_full = Data2tensor(train_dir, transforms) num_data = len(ds_full) num_train = math.ceil(num_data * p) train_indices = list(range(num_train)) ds_tr = Subset(ds_full, train_indices) ds_val = Data2tensor(val_dir, None) return (ds_tr, ds_val) train_dir = '/kaggle/input/spot250/train' val_dir = '/kaggle/input/spot250/valid' ds_tr0, ds_val = ds_train_val(train_dir, val_dir, p=1, transforms=transform) from torch.utils.data import ConcatDataset transform1 = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)], p=1), transforms.ToTensor()]) transform2 = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.4, contrast=0.5, saturation=0.2, hue=0.1)], p=1), transforms.ToTensor()]) ds_tr, _ = ds_train_val(train_dir, val_dir, p=1, transforms=None) transf_ds1, _ = ds_train_val(train_dir, val_dir, p=0.4, transforms=transform1) ds_tr1 = ConcatDataset([ds_tr, transf_ds1]) transf_ds2, _ = ds_train_val(train_dir, val_dir, p=0.4, transforms=transform2) transformed_ds_tr = ConcatDataset([ds_tr1, transf_ds2]) bs = 500 train_and_transformed = DataLoader(transformed_ds_tr, batch_size=bs, shuffle=True, num_workers=0) train_dl = DataLoader(ds_tr, batch_size=bs, shuffle=True, num_workers=0) val_dl = DataLoader(ds_val, batch_size=2 * bs, shuffle=True, num_workers=0) xt, yt = next(iter(train_and_transformed)) xb, yb = next(iter(train_dl)) x_v, y_v = next(iter(val_dl)) md_full = models.resnet34() num_ftrs = md_full.fc.in_features md_full.fc = nn.Linear(num_ftrs, 4) def myloss(y, y_b, reduction='mean'): inp_reg = y tar_reg = y_b[0] loss_reg = F.mse_loss(1.2 * tc.sigmoid(inp_reg) - 0.1, tar_reg, reduction=reduction) if reduction == 'none': loss_reg = loss_reg.mean(dim=-1) return loss_reg bs = 32 train_and_transformed = DataLoader(transformed_ds_tr, batch_size=bs, shuffle=True, num_workers=0) train_dl = DataLoader(ds_tr0, batch_size=bs, shuffle=True, num_workers=0) val_dl = DataLoader(ds_val, batch_size=bs, shuffle=True, num_workers=0) fit(md_full.cuda(), train_dl, val_dl, loss=myloss, epochs=5, wd=0.001, lr=0.003)
code
129020918/cell_11
[ "image_output_1.png" ]
from PIL import Image from matplotlib import patches, patheffects from torch.utils.data import Dataset from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms from torchvision.transforms.functional import to_tensor from tqdm.notebook import trange, tqdm import matplotlib.pyplot as plt import numpy as np import os import torch as tc import torch.nn as nn import torch.optim as optim import xml.etree.ElementTree as ET imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) def one_epoch(net, loss, dl, opt=None, metric=None): if opt: net.train() # only affects some layers else: net.eval() L, M = [], [] dl_it = iter(dl) for xb, yb in tqdm(dl_it, leave=False): xb = xb.cuda() if not isinstance(yb, list): yb = [yb] # this is new(!) yb = [yb_.cuda() for yb_ in yb] if opt: y_ = net(xb) l = loss(y_, yb) opt.zero_grad() l.backward() opt.step() else: with tc.no_grad(): y_ = net(xb) l = loss(y_, yb) L.append(l.detach().cpu().numpy()) if isinstance(metric, list): for m in metric: M.append(m(tc.sigmoid(y_), yb[0])) elif metric: M.append(metric(tc.sigmoid(y_), yb[0])) return L, M def fit(net, tr_dl, val_dl, loss=nn.CrossEntropyLoss(), epochs=3, lr=3e-3, wd=1e-3, plot=True): opt = optim.Adam(net.parameters(), lr=lr, weight_decay=wd) Ltr_hist, Lval_hist = [], [] for epoch in trange(epochs): Ltr, _ = one_epoch(net, loss, tr_dl, opt) Lval, Aval = one_epoch(net, loss, val_dl, None, batch_iou) Ltr_hist.append(np.mean(Ltr)) Lval_hist.append(np.mean(Lval)) #print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f} \tvalidation accuracy: {mean(Aval):0.2f}') print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f}, overlap accuracy: {np.array(Aval).mean():0.2f}') # plot the losses if plot: _,ax = plt.subplots(1,1,figsize=(16,4)) ax.plot(1+np.arange(len(Ltr_hist)),Ltr_hist) ax.plot(1+np.arange(len(Lval_hist)),Lval_hist) ax.grid('on') ax.set_xlim(left=1, right=len(Ltr_hist)) ax.legend(['training loss', 'validation loss']); return Ltr_hist, Lval_hist def denorm(x, stats=imagenet_stats): return x * tc.Tensor(stats[1])[:,None,None] + tc.Tensor(stats[0])[:,None,None] transform = transforms.Compose([ transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)], p=1), transforms.ToTensor()]) def draw_rect(ax, xy, w, h): patch = ax.add_patch(patches.Rectangle(xy, w, h, fill=False, edgecolor='yellow', lw=2)) patch.set_path_effects([patheffects.Stroke(linewidth=6, foreground='black'), patheffects.Normal()]) def _freeze(md, fr=True): ch = list(md.children()) for c in ch: _freeze(c, fr) if not ch and not isinstance(md, tc.nn.modules.batchnorm.BatchNorm2d): # not freezing the BatchNorm layers! for p in md.parameters(): p.requires_grad = not fr def freeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, True) def unfreeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, False) def calculate_iou(box_true, box_pred): x1_tr, y1_tr, w_tr, h_tr = box_true x1_pr, y1_pr, w_pr, h_pr = box_pred intersection_x1 = max(x1_tr, x1_pr) intersection_y1 = max(y1_tr, y1_pr) intersection_x2 = min(x1_tr + w_tr, x1_pr + w_pr) intersection_y2 = min(y1_tr + h_tr, y1_pr + h_pr) intersection_w = max(0, intersection_x2 - intersection_x1) intersection_h = max(0, intersection_y2 - intersection_y1) intersection_area = intersection_w * intersection_h area_true = w_tr * h_tr area_pred = w_pr * h_pr union_area = area_true + area_pred - intersection_area iou = intersection_area / union_area return iou def batch_iou(box_true_batch, box_pred_batch): iou_list = [] for box_true, box_pred in zip(box_true_batch, box_pred_batch): iou = calculate_iou(box_true, box_pred) iou_list.append(iou.item()) iou_array = np.array(iou_list).mean() return iou_array class Data2tensor(Dataset): def __init__(self, data_dir, transforms=None): self.data_dir = data_dir self.transforms = transforms self.img_paths = [] self.xml_paths = [] for filename in os.listdir(data_dir): name, ext = os.path.splitext(filename) if ext == '.jpg': img_path = os.path.join(data_dir, filename) xml_path = os.path.join(data_dir, name + '.xml') if os.path.isfile(xml_path): self.img_paths.append(img_path) self.xml_paths.append(xml_path) def __len__(self): return len(self.img_paths) def __getitem__(self, idx): img_path = self.img_paths[idx] xml_path = self.xml_paths[idx] img = Image.open(img_path).convert('RGB') w, h = img.size tree = ET.parse(xml_path) root = tree.getroot() bboxes = [] for obj in root.findall('object'): bbox = obj.find('bndbox') xmin = int(bbox.find('xmin').text) ymin = int(bbox.find('ymin').text) xmax = int(bbox.find('xmax').text) ymax = int(bbox.find('ymax').text) y_bbox = np.array([xmin, ymin, xmax, ymax]) y_bbox = y_bbox / np.array([w, h, w, h]) y_bbox = [*y_bbox[:2], *y_bbox[2:] - y_bbox[:2]] bboxes.append(y_bbox) if self.transforms: img = self.transforms(img) else: img = to_tensor(img) bboxes = [tc.tensor(bbox, dtype=tc.float32) for bbox in bboxes] return (img, bboxes) def show_img_and_bbox(x, y, ax=None): # plot the image: if not ax: _, ax = plt.subplots(1, 1, figsize=(5, 5)) if len(x.shape) == 3: H,W = x.shape[1:] x = x.numpy().transpose(1, 2, 0) ax.imshow(x) ax.axis('off') # showing bounding box bbox = y x, y, w, h = bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1] draw_rect(ax, [x*W, y*H], x*W+ w*W, y*H+h*H) x, y = ds_tr0[3] show_img_and_bbox(x, y[0])
code
129020918/cell_15
[ "text_plain_output_1.png" ]
from matplotlib import patches, patheffects from torch.utils.data import ConcatDataset from torch.utils.data import Dataset, DataLoader from torch.utils.data import Subset from torchvision import datasets, transforms from tqdm.notebook import trange, tqdm import math import matplotlib.pyplot as plt import numpy as np import torch as tc import torch.nn as nn import torch.optim as optim imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) def one_epoch(net, loss, dl, opt=None, metric=None): if opt: net.train() # only affects some layers else: net.eval() L, M = [], [] dl_it = iter(dl) for xb, yb in tqdm(dl_it, leave=False): xb = xb.cuda() if not isinstance(yb, list): yb = [yb] # this is new(!) yb = [yb_.cuda() for yb_ in yb] if opt: y_ = net(xb) l = loss(y_, yb) opt.zero_grad() l.backward() opt.step() else: with tc.no_grad(): y_ = net(xb) l = loss(y_, yb) L.append(l.detach().cpu().numpy()) if isinstance(metric, list): for m in metric: M.append(m(tc.sigmoid(y_), yb[0])) elif metric: M.append(metric(tc.sigmoid(y_), yb[0])) return L, M def fit(net, tr_dl, val_dl, loss=nn.CrossEntropyLoss(), epochs=3, lr=3e-3, wd=1e-3, plot=True): opt = optim.Adam(net.parameters(), lr=lr, weight_decay=wd) Ltr_hist, Lval_hist = [], [] for epoch in trange(epochs): Ltr, _ = one_epoch(net, loss, tr_dl, opt) Lval, Aval = one_epoch(net, loss, val_dl, None, batch_iou) Ltr_hist.append(np.mean(Ltr)) Lval_hist.append(np.mean(Lval)) #print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f} \tvalidation accuracy: {mean(Aval):0.2f}') print(f'epoch: {epoch}\ttraining loss: {np.mean(Ltr):0.4f}\tvalidation loss: {np.mean(Lval):0.4f}, overlap accuracy: {np.array(Aval).mean():0.2f}') # plot the losses if plot: _,ax = plt.subplots(1,1,figsize=(16,4)) ax.plot(1+np.arange(len(Ltr_hist)),Ltr_hist) ax.plot(1+np.arange(len(Lval_hist)),Lval_hist) ax.grid('on') ax.set_xlim(left=1, right=len(Ltr_hist)) ax.legend(['training loss', 'validation loss']); return Ltr_hist, Lval_hist def denorm(x, stats=imagenet_stats): return x * tc.Tensor(stats[1])[:,None,None] + tc.Tensor(stats[0])[:,None,None] transform = transforms.Compose([ transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)], p=1), transforms.ToTensor()]) def draw_rect(ax, xy, w, h): patch = ax.add_patch(patches.Rectangle(xy, w, h, fill=False, edgecolor='yellow', lw=2)) patch.set_path_effects([patheffects.Stroke(linewidth=6, foreground='black'), patheffects.Normal()]) def _freeze(md, fr=True): ch = list(md.children()) for c in ch: _freeze(c, fr) if not ch and not isinstance(md, tc.nn.modules.batchnorm.BatchNorm2d): # not freezing the BatchNorm layers! for p in md.parameters(): p.requires_grad = not fr def freeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, True) def unfreeze_to(md, ix=-1): ch_all = list(md.children()) for ch in ch_all[:ix]: _freeze(ch, False) def ds_train_val(train_dir, val_dir, p, transforms): ds_full = Data2tensor(train_dir, transforms) num_data = len(ds_full) num_train = math.ceil(num_data * p) train_indices = list(range(num_train)) ds_tr = Subset(ds_full, train_indices) ds_val = Data2tensor(val_dir, None) return (ds_tr, ds_val) train_dir = '/kaggle/input/spot250/train' val_dir = '/kaggle/input/spot250/valid' ds_tr0, ds_val = ds_train_val(train_dir, val_dir, p=1, transforms=transform) from torch.utils.data import ConcatDataset transform1 = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)], p=1), transforms.ToTensor()]) transform2 = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.4, contrast=0.5, saturation=0.2, hue=0.1)], p=1), transforms.ToTensor()]) ds_tr, _ = ds_train_val(train_dir, val_dir, p=1, transforms=None) transf_ds1, _ = ds_train_val(train_dir, val_dir, p=0.4, transforms=transform1) ds_tr1 = ConcatDataset([ds_tr, transf_ds1]) transf_ds2, _ = ds_train_val(train_dir, val_dir, p=0.4, transforms=transform2) transformed_ds_tr = ConcatDataset([ds_tr1, transf_ds2]) bs = 500 train_and_transformed = DataLoader(transformed_ds_tr, batch_size=bs, shuffle=True, num_workers=0) train_dl = DataLoader(ds_tr, batch_size=bs, shuffle=True, num_workers=0) val_dl = DataLoader(ds_val, batch_size=2 * bs, shuffle=True, num_workers=0) xt, yt = next(iter(train_and_transformed)) xb, yb = next(iter(train_dl)) x_v, y_v = next(iter(val_dl)) print(f'Train and transformed data: x shape = {xt.shape}, bbox shape = {yt[0].shape} \n') print(f'Training data: image shape = {xb.shape}, bbox shape = {yb[0].shape} \n') print(f'Validation data: image shape = {x_v.shape}, bbox shape = {y_v[0].shape}')
code
90136510/cell_13
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect distinct(jobtitle) as job_title from SalaryDataset\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect \njobtitle,\ncount(salariesreported) as count_of_reports \nfrom SalaryDataset\ngroup by jobtitle\norder by count(salariesreported) desc\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect\nlocation,\nsum(case when JobTitle='Data Scientist' then 1 else 0 end) as Data_Scientist,\nsum(case when JobTitle='Data Analyst' then 1 else 0 end) as Data_Analyst,\nsum(case when JobTitle='Data Engineer' then 1 else 0 end) as Data_Engineer,\nsum(case when JobTitle='Machine Learning Engineer' then 1 else 0 end) as Machine_Learning_Engineer,\nsum(case when JobTitle='Junior Data Scientist' then 1 else 0 end) as Junior_Data_Scientist,\nsum(case when JobTitle='Senior Machine Learning Engineer' then 1 else 0 end) as Senior_Machine_Learning_Engineer,\nsum(case when JobTitle='Lead Data Scientist' then 1 else 0 end) as Lead_Data_Scientist,\nsum(case when JobTitle='Software Engineer- Machine learning' then 1 else 0 end) as Software_Engineer_Machine_learning,\nsum(case when JobTitle='Machine Learning Scientist' then 1 else 0 end) as Machine_Learning_Scientist,\nsum(case when JobTitle='Machine Learning Developer' then 1 else 0 end) as Machine_Learning_Developer,\nsum(case when JobTitle='Machine Learning Consultant' then 1 else 0 end) as Machine_Learning_Consultant,\nsum(case when JobTitle='Machine Learning Software Engineer' then 1 else 0 end) as Machine_Learning_Software_Engineer,\nsum(case when JobTitle='Machine Learning Engineer/Data Scientist' then 1 else 0 end) as Machine_Learning_Engineer_Data_Scientist,\nsum(case when JobTitle='Machine Learning Data Associate II' then 1 else 0 end) as Machine_Learning_Data_Associate_II,\nsum(case when JobTitle='Machine Learning Data Associate I' then 1 else 0 end) as Machine_Learning_Data_Associate_I,\nsum(case when JobTitle='Machine Learning Data Associate' then 1 else 0 end) as Machine_Learning_Data_Associate,\nsum(case when JobTitle='Machine Learning Data Analyst' then 1 else 0 end) as Machine_Learning_Data_Analyst,\nsum(case when JobTitle='Machine Learning Associate' then 1 else 0 end) as Machine_Learning_Associate,\nsum(case when JobTitle='Data Scientist - Trainee' then 1 else 0 end) as Data_Scientist_Trainee,\nsum(case when JobTitle='Data Science Manager' then 1 else 0 end) as Data_Science_Manager,\nsum(case when JobTitle='Data Science Lead' then 1 else 0 end) as Data_Science_Lead,\nsum(case when JobTitle='Data Science Associate' then 1 else 0 end) as Data_Science_Associate,\nsum(case when JobTitle='Associate Machine Learning Engineer' then 1 else 0 end) as Associate_Machine_Learning_Engineer,\nsum(case when JobTitle='Data Science Consultant' then 1 else 0 end) as Data_Science_Consultant,\nsum(case when JobTitle='Senior Data Scientist' then 1 else 0 end) as Senior_Data_Scientist\nfrom SalaryDataset\ngroup by 1\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nselect *,\n\ncase \nwhen a.count_of_branches>3 then "big"\nwhen a.count_of_branches>1 and a.count_of_branches<=3 then "medium"\nwhen a.count_of_branches=1 then "small"\nelse \'no size\' end as size_of_the_company\n\nfrom\n(Select \ncompanyname,\ncount(distinct location) as count_of_branches\nfrom SalaryDataset\nwhere companyname !=\'None\'\ngroup by 1\norder by count(distinct location) desc\n) a\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\nselect distinct companyname\nfrom salarydataset\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.head(100) df_sql.to_csv('company.csv', index=False) print('run')
code
90136510/cell_9
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\nSelect distinct(jobtitle) as job_title from SalaryDataset\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.head()
code
90136510/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) df.head()
code
90136510/cell_11
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect distinct(jobtitle) as job_title from SalaryDataset\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect \njobtitle,\ncount(salariesreported) as count_of_reports \nfrom SalaryDataset\ngroup by jobtitle\norder by count(salariesreported) desc\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\nSelect\nlocation,\nsum(case when JobTitle='Data Scientist' then 1 else 0 end) as Data_Scientist,\nsum(case when JobTitle='Data Analyst' then 1 else 0 end) as Data_Analyst,\nsum(case when JobTitle='Data Engineer' then 1 else 0 end) as Data_Engineer,\nsum(case when JobTitle='Machine Learning Engineer' then 1 else 0 end) as Machine_Learning_Engineer,\nsum(case when JobTitle='Junior Data Scientist' then 1 else 0 end) as Junior_Data_Scientist,\nsum(case when JobTitle='Senior Machine Learning Engineer' then 1 else 0 end) as Senior_Machine_Learning_Engineer,\nsum(case when JobTitle='Lead Data Scientist' then 1 else 0 end) as Lead_Data_Scientist,\nsum(case when JobTitle='Software Engineer- Machine learning' then 1 else 0 end) as Software_Engineer_Machine_learning,\nsum(case when JobTitle='Machine Learning Scientist' then 1 else 0 end) as Machine_Learning_Scientist,\nsum(case when JobTitle='Machine Learning Developer' then 1 else 0 end) as Machine_Learning_Developer,\nsum(case when JobTitle='Machine Learning Consultant' then 1 else 0 end) as Machine_Learning_Consultant,\nsum(case when JobTitle='Machine Learning Software Engineer' then 1 else 0 end) as Machine_Learning_Software_Engineer,\nsum(case when JobTitle='Machine Learning Engineer/Data Scientist' then 1 else 0 end) as Machine_Learning_Engineer_Data_Scientist,\nsum(case when JobTitle='Machine Learning Data Associate II' then 1 else 0 end) as Machine_Learning_Data_Associate_II,\nsum(case when JobTitle='Machine Learning Data Associate I' then 1 else 0 end) as Machine_Learning_Data_Associate_I,\nsum(case when JobTitle='Machine Learning Data Associate' then 1 else 0 end) as Machine_Learning_Data_Associate,\nsum(case when JobTitle='Machine Learning Data Analyst' then 1 else 0 end) as Machine_Learning_Data_Analyst,\nsum(case when JobTitle='Machine Learning Associate' then 1 else 0 end) as Machine_Learning_Associate,\nsum(case when JobTitle='Data Scientist - Trainee' then 1 else 0 end) as Data_Scientist_Trainee,\nsum(case when JobTitle='Data Science Manager' then 1 else 0 end) as Data_Science_Manager,\nsum(case when JobTitle='Data Science Lead' then 1 else 0 end) as Data_Science_Lead,\nsum(case when JobTitle='Data Science Associate' then 1 else 0 end) as Data_Science_Associate,\nsum(case when JobTitle='Associate Machine Learning Engineer' then 1 else 0 end) as Associate_Machine_Learning_Engineer,\nsum(case when JobTitle='Data Science Consultant' then 1 else 0 end) as Data_Science_Consultant,\nsum(case when JobTitle='Senior Data Scientist' then 1 else 0 end) as Senior_Data_Scientist\nfrom SalaryDataset\ngroup by 1\n" df_sql = pd.read_sql_query(sql, con=engine) df_sql.head(100)
code
90136510/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90136510/cell_7
[ "text_plain_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\nSelect * from SalaryDataset\nlimit 5\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.head()
code
90136510/cell_8
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n" df_sql = pd.read_sql_query(sql, con=engine) df_sql.head()
code
90136510/cell_15
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect distinct(jobtitle) as job_title from SalaryDataset\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect \njobtitle,\ncount(salariesreported) as count_of_reports \nfrom SalaryDataset\ngroup by jobtitle\norder by count(salariesreported) desc\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect\nlocation,\nsum(case when JobTitle='Data Scientist' then 1 else 0 end) as Data_Scientist,\nsum(case when JobTitle='Data Analyst' then 1 else 0 end) as Data_Analyst,\nsum(case when JobTitle='Data Engineer' then 1 else 0 end) as Data_Engineer,\nsum(case when JobTitle='Machine Learning Engineer' then 1 else 0 end) as Machine_Learning_Engineer,\nsum(case when JobTitle='Junior Data Scientist' then 1 else 0 end) as Junior_Data_Scientist,\nsum(case when JobTitle='Senior Machine Learning Engineer' then 1 else 0 end) as Senior_Machine_Learning_Engineer,\nsum(case when JobTitle='Lead Data Scientist' then 1 else 0 end) as Lead_Data_Scientist,\nsum(case when JobTitle='Software Engineer- Machine learning' then 1 else 0 end) as Software_Engineer_Machine_learning,\nsum(case when JobTitle='Machine Learning Scientist' then 1 else 0 end) as Machine_Learning_Scientist,\nsum(case when JobTitle='Machine Learning Developer' then 1 else 0 end) as Machine_Learning_Developer,\nsum(case when JobTitle='Machine Learning Consultant' then 1 else 0 end) as Machine_Learning_Consultant,\nsum(case when JobTitle='Machine Learning Software Engineer' then 1 else 0 end) as Machine_Learning_Software_Engineer,\nsum(case when JobTitle='Machine Learning Engineer/Data Scientist' then 1 else 0 end) as Machine_Learning_Engineer_Data_Scientist,\nsum(case when JobTitle='Machine Learning Data Associate II' then 1 else 0 end) as Machine_Learning_Data_Associate_II,\nsum(case when JobTitle='Machine Learning Data Associate I' then 1 else 0 end) as Machine_Learning_Data_Associate_I,\nsum(case when JobTitle='Machine Learning Data Associate' then 1 else 0 end) as Machine_Learning_Data_Associate,\nsum(case when JobTitle='Machine Learning Data Analyst' then 1 else 0 end) as Machine_Learning_Data_Analyst,\nsum(case when JobTitle='Machine Learning Associate' then 1 else 0 end) as Machine_Learning_Associate,\nsum(case when JobTitle='Data Scientist - Trainee' then 1 else 0 end) as Data_Scientist_Trainee,\nsum(case when JobTitle='Data Science Manager' then 1 else 0 end) as Data_Science_Manager,\nsum(case when JobTitle='Data Science Lead' then 1 else 0 end) as Data_Science_Lead,\nsum(case when JobTitle='Data Science Associate' then 1 else 0 end) as Data_Science_Associate,\nsum(case when JobTitle='Associate Machine Learning Engineer' then 1 else 0 end) as Associate_Machine_Learning_Engineer,\nsum(case when JobTitle='Data Science Consultant' then 1 else 0 end) as Data_Science_Consultant,\nsum(case when JobTitle='Senior Data Scientist' then 1 else 0 end) as Senior_Data_Scientist\nfrom SalaryDataset\ngroup by 1\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nselect *,\n\ncase \nwhen a.count_of_branches>3 then "big"\nwhen a.count_of_branches>1 and a.count_of_branches<=3 then "medium"\nwhen a.count_of_branches=1 then "small"\nelse \'no size\' end as size_of_the_company\n\nfrom\n(Select \ncompanyname,\ncount(distinct location) as count_of_branches\nfrom SalaryDataset\nwhere companyname !=\'None\'\ngroup by 1\norder by count(distinct location) desc\n) a\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nselect distinct companyname\nfrom salarydataset\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.to_csv('company.csv', index=False) sql = "\nselect \na.location,\nsum(case when a.companyname='Mu Sigma' then 1 else 0 end) as Mu_Sigma,\nsum(case when a.companyname='IBM' then 1 else 0 end) as IBM,\nsum(case when a.companyname='Tata Consultancy Services' then 1 else 0 end) as Tata_Consultancy_Services,\nsum(case when a.companyname='Impact Analytics' then 1 else 0 end) as Impact_Analytics,\nsum(case when a.companyname='Accenture' then 1 else 0 end) as Accenture,\nsum(case when a.companyname='Infosys' then 1 else 0 end) as Infosys,\nsum(case when a.companyname='Capgemini' then 1 else 0 end) as Capgemini,\nsum(case when a.companyname='Cognizant Technology Solutions' then 1 else 0 end) as Cognizant_Technology_Solutions\nfrom\n(Select \ncompanyname,\nlocation,\ncount(distinct location) as count_of_branches\nfrom SalaryDataset\nwhere companyname!='None'\ngroup by 1,2\norder by count(distinct location) desc) a\ngroup by 1\n" df_sql = pd.read_sql_query(sql, con=engine) df_sql.head(100)
code
90136510/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df.head()
code
90136510/cell_10
[ "text_html_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect distinct(jobtitle) as job_title from SalaryDataset\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\nSelect \njobtitle,\ncount(salariesreported) as count_of_reports \nfrom SalaryDataset\ngroup by jobtitle\norder by count(salariesreported) desc\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.head()
code
90136510/cell_12
[ "text_plain_output_1.png" ]
from sqlalchemy import create_engine import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) from sqlalchemy import create_engine engine = create_engine('sqlite://', echo=False) df.to_sql('SalaryDataset', con=engine) sql = '\n\nSelect * from SalaryDataset\nlimit 5\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect * from SalaryDataset\nwhere CompanyName='IBM'\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect distinct(jobtitle) as job_title from SalaryDataset\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = '\n\nSelect \njobtitle,\ncount(salariesreported) as count_of_reports \nfrom SalaryDataset\ngroup by jobtitle\norder by count(salariesreported) desc\n\n\n\n' df_sql = pd.read_sql_query(sql, con=engine) sql = "\n\nSelect\nlocation,\nsum(case when JobTitle='Data Scientist' then 1 else 0 end) as Data_Scientist,\nsum(case when JobTitle='Data Analyst' then 1 else 0 end) as Data_Analyst,\nsum(case when JobTitle='Data Engineer' then 1 else 0 end) as Data_Engineer,\nsum(case when JobTitle='Machine Learning Engineer' then 1 else 0 end) as Machine_Learning_Engineer,\nsum(case when JobTitle='Junior Data Scientist' then 1 else 0 end) as Junior_Data_Scientist,\nsum(case when JobTitle='Senior Machine Learning Engineer' then 1 else 0 end) as Senior_Machine_Learning_Engineer,\nsum(case when JobTitle='Lead Data Scientist' then 1 else 0 end) as Lead_Data_Scientist,\nsum(case when JobTitle='Software Engineer- Machine learning' then 1 else 0 end) as Software_Engineer_Machine_learning,\nsum(case when JobTitle='Machine Learning Scientist' then 1 else 0 end) as Machine_Learning_Scientist,\nsum(case when JobTitle='Machine Learning Developer' then 1 else 0 end) as Machine_Learning_Developer,\nsum(case when JobTitle='Machine Learning Consultant' then 1 else 0 end) as Machine_Learning_Consultant,\nsum(case when JobTitle='Machine Learning Software Engineer' then 1 else 0 end) as Machine_Learning_Software_Engineer,\nsum(case when JobTitle='Machine Learning Engineer/Data Scientist' then 1 else 0 end) as Machine_Learning_Engineer_Data_Scientist,\nsum(case when JobTitle='Machine Learning Data Associate II' then 1 else 0 end) as Machine_Learning_Data_Associate_II,\nsum(case when JobTitle='Machine Learning Data Associate I' then 1 else 0 end) as Machine_Learning_Data_Associate_I,\nsum(case when JobTitle='Machine Learning Data Associate' then 1 else 0 end) as Machine_Learning_Data_Associate,\nsum(case when JobTitle='Machine Learning Data Analyst' then 1 else 0 end) as Machine_Learning_Data_Analyst,\nsum(case when JobTitle='Machine Learning Associate' then 1 else 0 end) as Machine_Learning_Associate,\nsum(case when JobTitle='Data Scientist - Trainee' then 1 else 0 end) as Data_Scientist_Trainee,\nsum(case when JobTitle='Data Science Manager' then 1 else 0 end) as Data_Science_Manager,\nsum(case when JobTitle='Data Science Lead' then 1 else 0 end) as Data_Science_Lead,\nsum(case when JobTitle='Data Science Associate' then 1 else 0 end) as Data_Science_Associate,\nsum(case when JobTitle='Associate Machine Learning Engineer' then 1 else 0 end) as Associate_Machine_Learning_Engineer,\nsum(case when JobTitle='Data Science Consultant' then 1 else 0 end) as Data_Science_Consultant,\nsum(case when JobTitle='Senior Data Scientist' then 1 else 0 end) as Senior_Data_Scientist\nfrom SalaryDataset\ngroup by 1\n\n\n" df_sql = pd.read_sql_query(sql, con=engine) sql = '\nselect *,\ncase \nwhen a.count_of_branches>3 then "big"\nwhen a.count_of_branches>1 and a.count_of_branches<=3 then "medium"\nwhen a.count_of_branches=1 then "small"\nelse \'no size\' end as size_of_the_company\nfrom\n(Select \ncompanyname,\ncount(distinct location) as count_of_branches\nfrom SalaryDataset\nwhere companyname !=\'None\'\ngroup by 1\norder by count(distinct location) desc\n) a\n' df_sql = pd.read_sql_query(sql, con=engine) df_sql.head(100)
code
90136510/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd ## import liberaries pandas import pandas as pd df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv') df = df.rename(columns={'Company Name': 'CompanyName', 'Job Title': 'JobTitle', 'Salaries Reported': 'SalariesReported'}, inplace=False) print('The columns of the dataset are:- ', df.columns)
code
90144122/cell_13
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pandas as pd import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model1 = Sequential() model1.add(Conv2D(64, kernel_size=(3, 3), input_shape=X_train[0].shape)) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Conv2D(64, kernel_size=(3, 3))) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Flatten()) model1.add(Dense(64)) model1.add(Activation('relu')) model1.add(Dropout(0.3)) model1.add(Dense(1)) model1.add(Activation('sigmoid')) model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history1 = model1.fit(X_train, y_train, batch_size=32, epochs=10, validation_split=0.2) history_df1 = pd.DataFrame(history1.history) model2 = Sequential() model2.add(Conv2D(128, kernel_size=(3, 3), input_shape=X_train[0].shape)) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Flatten()) model2.add(Dense(128)) model2.add(Activation('relu')) model2.add(Dropout(0.3)) model2.add(Dense(1)) model2.add(Activation('sigmoid')) model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history2 = model2.fit(X_train, y_train, batch_size=64, epochs=40, validation_split=0.2) history_df2 = pd.DataFrame(history2.history) model3 = Sequential() model3.add(Conv2D(256, kernel_size=(3, 2), input_shape=X_train[0].shape)) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.3)) model3.add(Conv2D(256, kernel_size=(2, 3))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Flatten()) model3.add(Dense(128)) model3.add(Activation('relu')) model3.add(Dropout(0.5)) model3.add(Dense(1)) model3.add(Activation('sigmoid')) model3.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history3 = model3.fit(X_train, y_train, batch_size=256, epochs=50, validation_split=0.2) history_df3 = pd.DataFrame(history3.history) history_df3.loc[:, ['loss', 'val_loss']].plot(title='Loss') history_df3.loc[:, ['accuracy', 'val_accuracy']].plot(title='Accuracy')
code
90144122/cell_9
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model2 = Sequential() model2.add(Conv2D(128, kernel_size=(3, 3), input_shape=X_train[0].shape)) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Flatten()) model2.add(Dense(128)) model2.add(Activation('relu')) model2.add(Dropout(0.3)) model2.add(Dense(1)) model2.add(Activation('sigmoid')) model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history2 = model2.fit(X_train, y_train, batch_size=64, epochs=40, validation_split=0.2)
code
90144122/cell_6
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model1 = Sequential() model1.add(Conv2D(64, kernel_size=(3, 3), input_shape=X_train[0].shape)) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Conv2D(64, kernel_size=(3, 3))) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Flatten()) model1.add(Dense(64)) model1.add(Activation('relu')) model1.add(Dropout(0.3)) model1.add(Dense(1)) model1.add(Activation('sigmoid')) model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history1 = model1.fit(X_train, y_train, batch_size=32, epochs=10, validation_split=0.2)
code
90144122/cell_11
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model2 = Sequential() model2.add(Conv2D(128, kernel_size=(3, 3), input_shape=X_train[0].shape)) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Flatten()) model2.add(Dense(128)) model2.add(Activation('relu')) model2.add(Dropout(0.3)) model2.add(Dense(1)) model2.add(Activation('sigmoid')) model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history2 = model2.fit(X_train, y_train, batch_size=64, epochs=40, validation_split=0.2) y_pred2 = model2.predict(X_test) >= 0.5 print('Confusion matrix:\n', confusion_matrix(y_test, y_pred2)) print('Classification report:\n', classification_report(y_test, y_pred2))
code
90144122/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pandas as pd import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model1 = Sequential() model1.add(Conv2D(64, kernel_size=(3, 3), input_shape=X_train[0].shape)) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Conv2D(64, kernel_size=(3, 3))) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Flatten()) model1.add(Dense(64)) model1.add(Activation('relu')) model1.add(Dropout(0.3)) model1.add(Dense(1)) model1.add(Activation('sigmoid')) model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history1 = model1.fit(X_train, y_train, batch_size=32, epochs=10, validation_split=0.2) history_df1 = pd.DataFrame(history1.history) history_df1.loc[:, ['loss', 'val_loss']].plot(title='Loss') history_df1.loc[:, ['accuracy', 'val_accuracy']].plot(title='Accuracy')
code
90144122/cell_8
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model1 = Sequential() model1.add(Conv2D(64, kernel_size=(3, 3), input_shape=X_train[0].shape)) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Conv2D(64, kernel_size=(3, 3))) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Flatten()) model1.add(Dense(64)) model1.add(Activation('relu')) model1.add(Dropout(0.3)) model1.add(Dense(1)) model1.add(Activation('sigmoid')) model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history1 = model1.fit(X_train, y_train, batch_size=32, epochs=10, validation_split=0.2) y_pred1 = model1.predict(X_test) >= 0.5 print('Confusion matrix:\n', confusion_matrix(y_test, y_pred1)) print('Classification report:\n', classification_report(y_test, y_pred1))
code
90144122/cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) fig, axes = plt.subplots(3, 3, figsize=(15, 15)) index = 75 for i in range(3): for j in range(3): axes[i, j].imshow(data[index][0], cmap='gray') index += 1 plt.show()
code
90144122/cell_14
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model3 = Sequential() model3.add(Conv2D(256, kernel_size=(3, 2), input_shape=X_train[0].shape)) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.3)) model3.add(Conv2D(256, kernel_size=(2, 3))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Flatten()) model3.add(Dense(128)) model3.add(Activation('relu')) model3.add(Dropout(0.5)) model3.add(Dense(1)) model3.add(Activation('sigmoid')) model3.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history3 = model3.fit(X_train, y_train, batch_size=256, epochs=50, validation_split=0.2) y_pred3 = model3.predict(X_test) >= 0.5 print('Confusion matrix:\n', confusion_matrix(y_test, y_pred3)) print('Classification report:\n', classification_report(y_test, y_pred3))
code
90144122/cell_10
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pandas as pd import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model1 = Sequential() model1.add(Conv2D(64, kernel_size=(3, 3), input_shape=X_train[0].shape)) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.5)) model1.add(Conv2D(64, kernel_size=(3, 3))) model1.add(Activation('relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Flatten()) model1.add(Dense(64)) model1.add(Activation('relu')) model1.add(Dropout(0.3)) model1.add(Dense(1)) model1.add(Activation('sigmoid')) model1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history1 = model1.fit(X_train, y_train, batch_size=32, epochs=10, validation_split=0.2) history_df1 = pd.DataFrame(history1.history) model2 = Sequential() model2.add(Conv2D(128, kernel_size=(3, 3), input_shape=X_train[0].shape)) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Dropout(0.5)) model2.add(Conv2D(128, kernel_size=(3, 3))) model2.add(Activation('relu')) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(Flatten()) model2.add(Dense(128)) model2.add(Activation('relu')) model2.add(Dropout(0.3)) model2.add(Dense(1)) model2.add(Activation('sigmoid')) model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history2 = model2.fit(X_train, y_train, batch_size=64, epochs=40, validation_split=0.2) history_df2 = pd.DataFrame(history2.history) history_df2.loc[:, ['loss', 'val_loss']].plot(title='Loss') history_df2.loc[:, ['accuracy', 'val_accuracy']].plot(title='Accuracy')
code
90144122/cell_12
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 model3 = Sequential() model3.add(Conv2D(256, kernel_size=(3, 2), input_shape=X_train[0].shape)) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.3)) model3.add(Conv2D(256, kernel_size=(2, 3))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Dropout(0.5)) model3.add(Conv2D(256, kernel_size=(2, 2))) model3.add(Activation('relu')) model3.add(MaxPooling2D(pool_size=(2, 2))) model3.add(Flatten()) model3.add(Dense(128)) model3.add(Activation('relu')) model3.add(Dropout(0.5)) model3.add(Dense(1)) model3.add(Activation('sigmoid')) model3.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history3 = model3.fit(X_train, y_train, batch_size=256, epochs=50, validation_split=0.2)
code
90144122/cell_5
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np import pickle data = pickle.load(open('../input/dog-and-cat/DOGnCAT50x50.pickle', 'rb')) X = np.array([e[0] for e in data]).astype('float32') y = np.array([e[1] for e in data]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train = np.resize(X_train, (len(X_train), 50, 50, 1)) X_test = np.resize(X_test, (len(X_test), 50, 50, 1)) X_train /= 255.0 X_test /= 255.0 print('X_train shape: ', X_train.shape) print('y_train shape: ', y_train.shape) print('X_test shape: ', X_test.shape) print('y_test shape: ', y_test.shape)
code
90102125/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') df.head()
code
90102125/cell_24
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split, cross_val_score from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') low = '<=50K' y = df['income'].apply(lambda x: 0 if x == low else 1) X = df.drop(['income'], axis=1) X = pd.get_dummies(X) from sklearn.model_selection import train_test_split, cross_val_score X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) from sklearn.neural_network import MLPClassifier clf = MLPClassifier(random_state=1, max_iter=3000) parameter_grid = {'hidden_layer_sizes': [3, (3, 3)]} from sklearn.model_selection import GridSearchCV gs = GridSearchCV(clf, parameter_grid, cv=5) gs = gs.fit(X_train, y_train) clf_best = gs.best_estimator_ clf_best = clf_best.fit(X_train, y_train) from sklearn import metrics y_pred = clf_best.predict(X_test) print(metrics.accuracy_score(y_test, y_pred))
code
90102125/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') X = df.drop(['income'], axis=1) X = pd.get_dummies(X) X.head(5)
code
90102125/cell_22
[ "text_html_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split, cross_val_score from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') low = '<=50K' y = df['income'].apply(lambda x: 0 if x == low else 1) X = df.drop(['income'], axis=1) X = pd.get_dummies(X) from sklearn.model_selection import train_test_split, cross_val_score X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) from sklearn.neural_network import MLPClassifier clf = MLPClassifier(random_state=1, max_iter=3000) parameter_grid = {'hidden_layer_sizes': [3, (3, 3)]} from sklearn.model_selection import GridSearchCV gs = GridSearchCV(clf, parameter_grid, cv=5) gs = gs.fit(X_train, y_train) clf_best = gs.best_estimator_ print('best model:', clf_best.get_params()) clf_best = clf_best.fit(X_train, y_train)
code
90102125/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') low = '<=50K' y = df['income'].apply(lambda x: 0 if x == low else 1) y.head(5)
code
90102125/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/adult-census-income/adult.csv') X = df.drop(['income'], axis=1) X.head(5)
code
34137153/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') miss_values = train.isna().sum().sort_values(ascending=False).head(22).reset_index() miss_values.rename(columns={'index': 'x', 0: 'y'}, inplace=True) miss_values
code
34137153/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34137153/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.describe()
code
34137153/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['xtick.labelsize'] = 10 plt.rcParams['ytick.labelsize'] = 10 plt.figure(figsize=(4, 7)) color = sns.dark_palette('deeppink', reverse=True, n_colors=18) ax = sns.barplot(x='y', y='x', data=miss_values, palette=color, orient='h') plt.xticks(rotation=90) plt.title('Columns Missing Values') sns.despine() plt.show()
code
74052153/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] print(train.columns)
code
74052153/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False) train = train.loc[:, train.isnull().mean() < 0.4] test = test.loc[:, test.isnull().mean() < 0.4] train.shape print(train.isnull().sum().sort_values(ascending=False))
code
74052153/cell_40
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False) train = train.loc[:, train.isnull().mean() < 0.4] test = test.loc[:, test.isnull().mean() < 0.4] train.shape from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(missing_values=np.NaN, strategy='mean') categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train[my_cols].copy() X_test = X_test[my_cols].copy() test = test[my_cols].copy() preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) X_train = preprocessor.fit_transform(X_train) X_test = preprocessor.fit_transform(X_test) test = preprocessor.fit_transform(test) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) params = {'max_depth': 6, 'min_child_weight': 1, 'eta': 0.3, 'subsample': 1, 'colsample_bytree': 1, 'objective': 'reg:linear'} num_boost_round = 999 model = xgb.train(params, dtrain, num_boost_round=num_boost_round, evals=[(dtest, 'Test')], early_stopping_rounds=10) cv_results = xgb.cv(params, dtrain, num_boost_round=num_boost_round, seed=42, nfold=5, metrics={'mae'}, early_stopping_rounds=10) cv_results
code
74052153/cell_39
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False) train = train.loc[:, train.isnull().mean() < 0.4] test = test.loc[:, test.isnull().mean() < 0.4] train.shape from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(missing_values=np.NaN, strategy='mean') categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train[my_cols].copy() X_test = X_test[my_cols].copy() test = test[my_cols].copy() preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) X_train = preprocessor.fit_transform(X_train) X_test = preprocessor.fit_transform(X_test) test = preprocessor.fit_transform(test) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) params = {'max_depth': 6, 'min_child_weight': 1, 'eta': 0.3, 'subsample': 1, 'colsample_bytree': 1, 'objective': 'reg:linear'} num_boost_round = 999 model = xgb.train(params, dtrain, num_boost_round=num_boost_round, evals=[(dtest, 'Test')], early_stopping_rounds=10) print('Best MAE: {:.2f} with {} rounds'.format(model.best_score, model.best_iteration + 1))
code
74052153/cell_41
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False) train = train.loc[:, train.isnull().mean() < 0.4] test = test.loc[:, test.isnull().mean() < 0.4] train.shape from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(missing_values=np.NaN, strategy='mean') categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train[my_cols].copy() X_test = X_test[my_cols].copy() test = test[my_cols].copy() preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) X_train = preprocessor.fit_transform(X_train) X_test = preprocessor.fit_transform(X_test) test = preprocessor.fit_transform(test) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) params = {'max_depth': 6, 'min_child_weight': 1, 'eta': 0.3, 'subsample': 1, 'colsample_bytree': 1, 'objective': 'reg:linear'} num_boost_round = 999 model = xgb.train(params, dtrain, num_boost_round=num_boost_round, evals=[(dtest, 'Test')], early_stopping_rounds=10) cv_results = xgb.cv(params, dtrain, num_boost_round=num_boost_round, seed=42, nfold=5, metrics={'mae'}, early_stopping_rounds=10) cv_results cv_results['test-mae-mean'].min()
code
74052153/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe print(train.info)
code
74052153/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.head()
code
74052153/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns)
code
74052153/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] print('The train dataset have the shape', train.shape) print('The test dataset have the shape', test.shape)
code
74052153/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns)
code
74052153/cell_35
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.impute import SimpleImputer from sklearn.metrics import mean_absolute_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import numpy as np from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(missing_values=np.NaN, strategy='mean') categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) mean_train = np.mean(y_train) baseline_predictions = np.ones(y_test.shape) * mean_train mae_baseline = mean_absolute_error(y_test, baseline_predictions) print('Baseline MAE is {:.2f}'.format(mae_baseline))
code
74052153/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False) train = train.loc[:, train.isnull().mean() < 0.4] test = test.loc[:, test.isnull().mean() < 0.4] train.shape
code
74052153/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns)
code
74052153/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique() list(train.select_dtypes(include=['int64']).columns) list(train.select_dtypes(include=['float64']).columns) list(train.select_dtypes(include=['O']).columns) train.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) test.drop(['Id', 'YrSold', 'MoSold'], inplace=True, axis=1) train.isnull().mean().round(4).mul(100).sort_values(ascending=False)
code
74052153/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe
code
74052153/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/home-data-for-ml-course/train.csv') test = pd.read_csv('../input/home-data-for-ml-course/test.csv') id = test['Id'] train.describe train.dtypes.unique()
code
49124211/cell_9
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import pandas as pd import pandas as pd df = pd.read_csv('../input/drive-and-act/iccv_activities_3s/activities_3s/kinect_color/tasklevel.chunks_90.split_0.train.csv') root_path = '../input/drive-and-act/kinect_color/kinect_color/' sample_rate = 5 for j in range(1): file_names = [] labels = [] length = [] for i in range(df.shape[0]): if i > 0: if root_path + df.iloc[i - 1, 1] + '.mp4' != root_path + df.iloc[i, 1] + '.mp4': path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 else: path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 frame_start = df.iloc[i, 3] frame_end = df.iloc[i, 4] label = df.iloc[i, 5] length.append(frame_end - frame_start) if frame_end - frame_start > 39: root_save = '/kaggle/working/Trainsplit_0/' + df.iloc[i, 1] + '/' + df.iloc[i, 1].split('/')[-1] + '-' + str(frame_start) + '-' + str(frame_end - frame_start) file_names.append(root_save) labels.append(label) lenn = 0 while True: ret, frame = cap.read() if count_frame >= frame_start and count_frame <= frame_end and (ret == True) and ((count_frame - frame_start) % sample_rate == 0) and (lenn < 9): lenn += 1 frame = cv2.resize(frame, (256, 256)) if not os.path.exists(root_save): os.makedirs(root_save) save_path = df.iloc[i, 1].split('/')[-1] + str(count_frame) + '.jpg' final_path = root_save + '/' + save_path cv2.imwrite(final_path, frame) count_frame += 1 if count_frame >= frame_end or ret == False: break np.save('/kaggle/working/Trainsplit_0_file_names.npy', file_names) np.save('/kaggle/working/Trainsplit_0_labels.npy', labels) len(labels)
code
49124211/cell_4
[ "text_html_output_1.png" ]
! nvidia-smi
code
49124211/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/drive-and-act/iccv_activities_3s/activities_3s/kinect_color/tasklevel.chunks_90.split_0.train.csv') df.head()
code
49124211/cell_2
[ "text_plain_output_1.png" ]
import psutil import psutil def get_size(bytes, suffix='B'): factor = 1024 for unit in ['', 'K', 'M', 'G', 'T', 'P']: if bytes < factor: return f'{bytes:.2f}{unit}{suffix}' bytes /= factor print('=' * 40, 'Memory Information', '=' * 40) svmem = psutil.virtual_memory() print(f'Total: {get_size(svmem.total)}') print(f'Available: {get_size(svmem.available)}') print(f'Used: {get_size(svmem.used)}') print(f'Percentage: {svmem.percent}%')
code
49124211/cell_8
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import pandas as pd import pandas as pd df = pd.read_csv('../input/drive-and-act/iccv_activities_3s/activities_3s/kinect_color/tasklevel.chunks_90.split_0.train.csv') root_path = '../input/drive-and-act/kinect_color/kinect_color/' sample_rate = 5 for j in range(1): file_names = [] labels = [] length = [] for i in range(df.shape[0]): if i > 0: if root_path + df.iloc[i - 1, 1] + '.mp4' != root_path + df.iloc[i, 1] + '.mp4': path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 else: path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 frame_start = df.iloc[i, 3] frame_end = df.iloc[i, 4] label = df.iloc[i, 5] length.append(frame_end - frame_start) if frame_end - frame_start > 39: root_save = '/kaggle/working/Trainsplit_0/' + df.iloc[i, 1] + '/' + df.iloc[i, 1].split('/')[-1] + '-' + str(frame_start) + '-' + str(frame_end - frame_start) file_names.append(root_save) labels.append(label) lenn = 0 while True: ret, frame = cap.read() if count_frame >= frame_start and count_frame <= frame_end and (ret == True) and ((count_frame - frame_start) % sample_rate == 0) and (lenn < 9): lenn += 1 frame = cv2.resize(frame, (256, 256)) if not os.path.exists(root_save): os.makedirs(root_save) save_path = df.iloc[i, 1].split('/')[-1] + str(count_frame) + '.jpg' final_path = root_save + '/' + save_path cv2.imwrite(final_path, frame) count_frame += 1 if count_frame >= frame_end or ret == False: break np.save('/kaggle/working/Trainsplit_0_file_names.npy', file_names) np.save('/kaggle/working/Trainsplit_0_labels.npy', labels) np.unique(length, return_counts=True)
code
49124211/cell_10
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import pandas as pd import pandas as pd df = pd.read_csv('../input/drive-and-act/iccv_activities_3s/activities_3s/kinect_color/tasklevel.chunks_90.split_0.train.csv') root_path = '../input/drive-and-act/kinect_color/kinect_color/' sample_rate = 5 for j in range(1): file_names = [] labels = [] length = [] for i in range(df.shape[0]): if i > 0: if root_path + df.iloc[i - 1, 1] + '.mp4' != root_path + df.iloc[i, 1] + '.mp4': path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 else: path_video = root_path + df.iloc[i, 1] + '.mp4' cap = cv2.VideoCapture(path_video) count_frame = 0 frame_start = df.iloc[i, 3] frame_end = df.iloc[i, 4] label = df.iloc[i, 5] length.append(frame_end - frame_start) if frame_end - frame_start > 39: root_save = '/kaggle/working/Trainsplit_0/' + df.iloc[i, 1] + '/' + df.iloc[i, 1].split('/')[-1] + '-' + str(frame_start) + '-' + str(frame_end - frame_start) file_names.append(root_save) labels.append(label) lenn = 0 while True: ret, frame = cap.read() if count_frame >= frame_start and count_frame <= frame_end and (ret == True) and ((count_frame - frame_start) % sample_rate == 0) and (lenn < 9): lenn += 1 frame = cv2.resize(frame, (256, 256)) if not os.path.exists(root_save): os.makedirs(root_save) save_path = df.iloc[i, 1].split('/')[-1] + str(count_frame) + '.jpg' final_path = root_save + '/' + save_path cv2.imwrite(final_path, frame) count_frame += 1 if count_frame >= frame_end or ret == False: break np.save('/kaggle/working/Trainsplit_0_file_names.npy', file_names) np.save('/kaggle/working/Trainsplit_0_labels.npy', labels) len(file_names)
code
104119293/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy.linalg from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt def theta_m2_m1(n2=1, n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (a / mu_1) ** n1 * ((a + b + c) / (mu_1 + mu_2)) ** n2 return p def theta_m1_m2(n2=1, n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (b / mu_2) ** n2 * ((a + b + c) / (mu_1 + mu_2)) ** n1 return p def theta_m2_0m1(n2=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = mu_1 / mu_2 * ((a + b + c) / (mu_1 + mu_2)) ** n2 return p def theta_m1_0m2(n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = mu_1 / mu_2 * ((a + b + c) / (mu_1 + mu_2)) ** n1 return p def theta_m2(n2=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (mu_1 + mu_2) / (a + c) * (b / mu_2) ** n2 return p def theta_m1(n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (mu_1 + mu_2) / (b + c) * (a / mu_1) ** n1 return p def theta_0s(a=1, b=1, c=1, mu_1=1, mu_2=1): A = np.array([[0, a + b + c + mu_2, -(b + c / 2)], [a + b + c + mu_1, 0, -(a + c / 2)], [-mu_1, -mu_2, a + b + c]]) b = np.array([[(mu_1 + mu_2) * b / (a + c) + 2 * mu_1 ** 2 / mu_2], [(mu_1 + mu_2) * a / (b + c) + 2 * mu_1], [0]]) A_inv = np.linalg.inv(A) ps = np.matmul(A_inv, b) return ps def calc_visschers(a=1, b=1, c=1, mu_1=1, mu_2=1, k=100): p_list = [] t0 = theta_0s(a, b, c, mu_1, mu_2)[2][0] p_list.append(t0) t1busy = theta_0s(a, b, c, mu_1, mu_2)[0][0] p_list.append(t1busy) t2busy = theta_0s(a, b, c, mu_1, mu_2)[1][0] p_list.append(t2busy) tbothbusy = theta_m2_0m1(n2=0, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) + theta_m1_0m2(n1=0, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(tbothbusy) for n in range(1, k): for m in range(1, k): t1 = theta_m2_m1(n2=m, n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) t2 = theta_m1_m2(n2=m, n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t1) p_list.append(t2) t3 = theta_m2_0m1(n2=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t3) t4 = theta_m1_0m2(n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t4) t5 = theta_m2(n2=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t5) t6 = theta_m1(n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t6) x = np.array(p_list) x = x / x.sum() probs_array = x[0:4] return x rlist = [] for k in range(100, 3000, 10): r = calc_visschers(a=4.75, b=4.75, c=0, mu_1=10, mu_2=10, k=k) rlist.append(r[0]) r = np.linspace(0, 1) min_p = np.argmin(avg_wait) min_val = r[min_p] print(min_val) def line(m, x, b): return m * x + b for i in range(min_p - 10, min_p): x1 = r[i] y1 = avg_wait[i] c = 0 for j in range(min_p + 1, min_p + 10): x2 = r[j] y2 = avg_wait[j] m = (y1 - y2) / (x1 - x2) b = (x1 * y2 - x2 * y1) / (x1 - x2) lines = [] true_func = [] for p in range(i + 1, j): c += 1 x = r[p] y_line = line(m=m, x=x, b=b) lines.append(y_line) y_true = avg_wait[p] true_func.append(y_true) if y_true > y_line: print('non-convex between x1=' + str(x1) + ' and x2=' + str(x2) + ' at x=' + str(x) + ' with line value ' + str(y_line) + ' and average wait value ' + str(y_true)) if c % 10 == 0: plt.plot(r[i + 1:j], lines) plt.plot(r, avg_wait)
code
104119293/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy.linalg from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt def theta_m2_m1(n2=1, n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (a / mu_1) ** n1 * ((a + b + c) / (mu_1 + mu_2)) ** n2 return p def theta_m1_m2(n2=1, n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (b / mu_2) ** n2 * ((a + b + c) / (mu_1 + mu_2)) ** n1 return p def theta_m2_0m1(n2=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = mu_1 / mu_2 * ((a + b + c) / (mu_1 + mu_2)) ** n2 return p def theta_m1_0m2(n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = mu_1 / mu_2 * ((a + b + c) / (mu_1 + mu_2)) ** n1 return p def theta_m2(n2=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (mu_1 + mu_2) / (a + c) * (b / mu_2) ** n2 return p def theta_m1(n1=1, a=1, b=1, c=1, mu_1=1, mu_2=1): p = (mu_1 + mu_2) / (b + c) * (a / mu_1) ** n1 return p def theta_0s(a=1, b=1, c=1, mu_1=1, mu_2=1): A = np.array([[0, a + b + c + mu_2, -(b + c / 2)], [a + b + c + mu_1, 0, -(a + c / 2)], [-mu_1, -mu_2, a + b + c]]) b = np.array([[(mu_1 + mu_2) * b / (a + c) + 2 * mu_1 ** 2 / mu_2], [(mu_1 + mu_2) * a / (b + c) + 2 * mu_1], [0]]) A_inv = np.linalg.inv(A) ps = np.matmul(A_inv, b) return ps def calc_visschers(a=1, b=1, c=1, mu_1=1, mu_2=1, k=100): p_list = [] t0 = theta_0s(a, b, c, mu_1, mu_2)[2][0] p_list.append(t0) t1busy = theta_0s(a, b, c, mu_1, mu_2)[0][0] p_list.append(t1busy) t2busy = theta_0s(a, b, c, mu_1, mu_2)[1][0] p_list.append(t2busy) tbothbusy = theta_m2_0m1(n2=0, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) + theta_m1_0m2(n1=0, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(tbothbusy) for n in range(1, k): for m in range(1, k): t1 = theta_m2_m1(n2=m, n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) t2 = theta_m1_m2(n2=m, n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t1) p_list.append(t2) t3 = theta_m2_0m1(n2=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t3) t4 = theta_m1_0m2(n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t4) t5 = theta_m2(n2=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t5) t6 = theta_m1(n1=n, a=a, b=b, c=c, mu_1=mu_1, mu_2=mu_2) p_list.append(t6) x = np.array(p_list) x = x / x.sum() probs_array = x[0:4] return x rlist = [] for k in range(100, 3000, 10): r = calc_visschers(a=4.75, b=4.75, c=0, mu_1=10, mu_2=10, k=k) rlist.append(r[0]) plt.plot(range(100, 3000, 10), rlist)
code
73075873/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv') import matplotlib.pyplot as plt total = list(data.Risk_Flag.value_counts()) Flag0 = total[0] Flag1 = total[1] import seaborn as sns g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep') g.set_xticklabels(rotation=60) import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=60) data['Age_group'] = pd.qcut(data.Age, 5) g = sns.FacetGrid(data=data, row='House_Ownership', col='Married/Single', height=5, aspect=1.5) g.map_dataframe(sns.barplot, x='Age_group', y='Risk_Flag', ci=None) g.set_xticklabels(rotation=60) data.corr().Risk_Flag.drop(['Risk_Flag', 'Id']).plot.bar()
code
73075873/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv') data.head()
code
73075873/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv') import matplotlib.pyplot as plt total = list(data.Risk_Flag.value_counts()) Flag0 = total[0] Flag1 = total[1] import seaborn as sns g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep') g.set_xticklabels(rotation=60) import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=60) data['Age_group'] = pd.qcut(data.Age, 5) g = sns.FacetGrid(data=data, row='House_Ownership', col='Married/Single', height=5, aspect=1.5) g.map_dataframe(sns.barplot, x='Age_group', y='Risk_Flag', ci=None) g.set_xticklabels(rotation=60) dummies = pd.get_dummies(data[['STATE', 'Profession']]) dummies.drop(dummies.columns[[0, -1]], axis=1, inplace=True) features = ['Income', 'Age', 'Experience', 'CURRENT_JOB_YRS', 'CURRENT_HOUSE_YRS', 'Married/Single', 'House_Ownership', 'Car_Ownership'] X = pd.concat([data[features], dummies], axis=1) y = data['Risk_Flag'] X.head()
code
73075873/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from imblearn.ensemble import BalancedRandomForestClassifier from imblearn.over_sampling import ADASYN from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv') import matplotlib.pyplot as plt total = list(data.Risk_Flag.value_counts()) Flag0 = total[0] Flag1 = total[1] import seaborn as sns g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep') g.set_xticklabels(rotation=60) import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=60) from imblearn.ensemble import BalancedRandomForestClassifier from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix brf=BalancedRandomForestClassifier().fit(X_train, y_train) fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6)) plt.title('asfafasf') ax1.set_title('Confusion matrix (Balanced RF)') ax2.set_title('ROC curve (Balanced RF)') ax2.plot([0,1], [0,1], 'g--', alpha=0.25) plot_confusion_matrix(brf, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1) plot_roc_curve(brf, X_test, y_test, ax=ax2) y_pred = brf.predict(X_test) acc_brf=accuracy_score(y_test, y_pred) f1_brf=f1_score(y_test, y_pred) roc_brf=roc_auc_score(y_test, y_pred) print('Roc_Auc score: %.3f' %roc_brf) from imblearn.over_sampling import ADASYN ada = ADASYN(random_state=42) X_ada, y_ada = ada.fit_resample(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rf_ada = RandomForestClassifier().fit(X_ada, y_ada) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) ax1.set_title('Confusion matrix (RF and ADASYN)') ax2.set_title('ROC curve (RF and ADASYN)') ax2.plot([0, 1], [0, 1], 'g--', alpha=0.25) plot_confusion_matrix(rf_ada, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1) plot_roc_curve(rf_ada, X_test, y_test, ax=ax2) y_pred = rf_ada.predict(X_test) acc_ada = accuracy_score(y_test, y_pred) f1_ada = f1_score(y_test, y_pred) roc_ada = roc_auc_score(y_test, y_pred) print('Roc_Auc score: %.3f' % roc_ada)
code
73075873/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
from imblearn.combine import SMOTETomek from imblearn.ensemble import BalancedRandomForestClassifier from imblearn.over_sampling import ADASYN from imblearn.under_sampling import TomekLinks from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/loan-prediction-based-on-customer-behavior/Training Data.csv') import matplotlib.pyplot as plt total = list(data.Risk_Flag.value_counts()) Flag0 = total[0] Flag1 = total[1] import seaborn as sns g=sns.catplot(x='STATE', data=data, height=12, aspect=1.5, kind='count', palette='deep') g.set_xticklabels(rotation=60) import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=60) from imblearn.ensemble import BalancedRandomForestClassifier from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, plot_roc_curve, plot_confusion_matrix brf=BalancedRandomForestClassifier().fit(X_train, y_train) fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6)) plt.title('asfafasf') ax1.set_title('Confusion matrix (Balanced RF)') ax2.set_title('ROC curve (Balanced RF)') ax2.plot([0,1], [0,1], 'g--', alpha=0.25) plot_confusion_matrix(brf, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1) plot_roc_curve(brf, X_test, y_test, ax=ax2) y_pred = brf.predict(X_test) acc_brf=accuracy_score(y_test, y_pred) f1_brf=f1_score(y_test, y_pred) roc_brf=roc_auc_score(y_test, y_pred) print('Roc_Auc score: %.3f' %roc_brf) from imblearn.over_sampling import ADASYN ada = ADASYN(random_state=42) X_ada, y_ada = ada.fit_resample(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rf_ada=RandomForestClassifier().fit(X_ada, y_ada) fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (16,6)) ax1.set_title('Confusion matrix (RF and ADASYN)') ax2.set_title('ROC curve (RF and ADASYN)') ax2.plot([0,1], [0,1], 'g--', alpha=0.25) plot_confusion_matrix(rf_ada,X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1) plot_roc_curve(rf_ada, X_test, y_test, ax=ax2) y_pred = rf_ada.predict(X_test) acc_ada=accuracy_score(y_test, y_pred) f1_ada=f1_score(y_test, y_pred) roc_ada=roc_auc_score(y_test, y_pred) print('Roc_Auc score: %.3f' %roc_ada) from imblearn.combine import SMOTETomek from imblearn.under_sampling import TomekLinks smt = SMOTETomek(tomek=TomekLinks(sampling_strategy='majority')) X_smt, y_smt = smt.fit_resample(X_train, y_train) rf_smt = RandomForestClassifier().fit(X_smt, y_smt) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) ax1.set_title('Confusion matrix (RF and SMOTETomek)') ax2.set_title('ROC curve (RF and SMOTETomek)') ax2.plot([0, 1], [0, 1], 'g--', alpha=0.25) plot_confusion_matrix(rf_smt, X_test, y_test, cmap=plt.cm.Blues, normalize='true', ax=ax1) plot_roc_curve(rf_smt, X_test, y_test, ax=ax2) y_pred = rf_smt.predict(X_test) acc_smt = accuracy_score(y_test, y_pred) f1_smt = f1_score(y_test, y_pred) roc_smt = roc_auc_score(y_test, y_pred) print('Roc_Auc score: %.3f' % roc_smt)
code
73075873/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from imblearn.over_sampling import ADASYN from imblearn.over_sampling import ADASYN print('Initial size:', X_train.shape) ada = ADASYN(random_state=42) X_ada, y_ada = ada.fit_resample(X_train, y_train) print('Resampled size:', X_ada.shape)
code
73075873/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code