path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
88086039/cell_46 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv(dirname + '/train.csv')
test = pd.read_csv(dirname + '/test.csv')
pid_test = test['PassengerId']
pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1)
pct_missing.sort_values(ascending=False).head()
plt.figure(figsize=(16, 4))
sns.countplot(x='Embarked', data=train, hue='Pclass')
plt.legend(loc=1) | code |
88086039/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv(dirname + '/train.csv')
test = pd.read_csv(dirname + '/test.csv')
pid_test = test['PassengerId']
pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1)
pct_missing.sort_values(ascending=False).head()
plt.figure(figsize=(16, 6))
sns.heatmap(data=train.isnull()) | code |
88086039/cell_14 | [
"text_plain_output_1.png"
] | train = pd.read_csv(dirname + '/train.csv')
test = pd.read_csv(dirname + '/test.csv')
pid_test = test['PassengerId']
train.head(2) | code |
88086039/cell_22 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | train = pd.read_csv(dirname + '/train.csv')
test = pd.read_csv(dirname + '/test.csv')
pid_test = test['PassengerId']
print('<< % of missing data >>')
pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1)
pct_missing.sort_values(ascending=False).head() | code |
88086039/cell_36 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv(dirname + '/train.csv')
test = pd.read_csv(dirname + '/test.csv')
pid_test = test['PassengerId']
pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1)
pct_missing.sort_values(ascending=False).head()
train['Pclass'].unique() | code |
106205745/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
d = np.array([[1], [0], [1]])
d
e = np.array([1, 2, 3])
e
f = e[:, np.newaxis]
f
e + f | code |
106205745/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b | code |
106205745/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
d = np.array([[1], [0], [1]])
d | code |
106205745/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
d = np.array([[1], [0], [1]])
d
e = np.array([1, 2, 3])
e
f = e[:, np.newaxis]
f
h = np.array([1, 1, 0])
g = np.array([[1], [2], [1]])
h + g | code |
106205745/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
d = np.array([[1], [0], [1]])
d
e = np.array([1, 2, 3])
e | code |
106205745/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
d = np.array([[1], [0], [1]])
d
e = np.array([1, 2, 3])
e
f = e[:, np.newaxis]
f | code |
106205745/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a | code |
106205745/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1, 2, 3])
b
c = a + b
c | code |
90118434/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T
#In order to decide what to do with the null values in the price,
#it is helpful to know the corelations between features.
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize = (15,6))
sns.heatmap(corr_matrix, annot = True)
low_price_data = raw_data[(raw_data['sqft_living'] < zero_price['sqft_living'].median()) & (raw_data['bathrooms'] < zero_price['bathrooms'].median()) & (raw_data['sqft_above'] < zero_price['sqft_above'].median())]
low_price = low_price_data.price.median()
high_price_data = raw_data[(raw_data['sqft_living'] > zero_price['sqft_living'].median()) & (raw_data['bathrooms'] > zero_price['bathrooms'].median()) & (raw_data['sqft_above'] > zero_price['sqft_above'].median())]
high_price = high_price_data.price.median()
data_prc = raw_data.copy()
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] > zero_price['sqft_living'].median()), high_price, data_prc.price)
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] <= zero_price['sqft_living'].median()), low_price, data_prc.price)
data_prc.price[data_prc.price == 0].count() | code |
90118434/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T | code |
90118434/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T
#In order to decide what to do with the null values in the price,
#it is helpful to know the corelations between features.
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize = (15,6))
sns.heatmap(corr_matrix, annot = True)
low_price_data = raw_data[(raw_data['sqft_living'] < zero_price['sqft_living'].median()) & (raw_data['bathrooms'] < zero_price['bathrooms'].median()) & (raw_data['sqft_above'] < zero_price['sqft_above'].median())]
low_price = low_price_data.price.median()
high_price_data = raw_data[(raw_data['sqft_living'] > zero_price['sqft_living'].median()) & (raw_data['bathrooms'] > zero_price['bathrooms'].median()) & (raw_data['sqft_above'] > zero_price['sqft_above'].median())]
high_price = high_price_data.price.median()
data_prc = raw_data.copy()
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] > zero_price['sqft_living'].median()), high_price, data_prc.price)
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] <= zero_price['sqft_living'].median()), low_price, data_prc.price)
data_prc.price[data_prc.price == 0].count()
#I will print the distrubution plots to decide
#which method to use fill in the unknown zero values in the bedrooms and batromms columns.
#As you may notice, there is some skewness that will affect the mean of both features.
#I will use the median imputation for replacing zero values.
fig, ax = plt.subplots(1,2, figsize = (20,6))
sns.distplot(ax = ax[0], x= data_prc.bedrooms, color='darkmagenta')
ax[0].set_title('Bedrooms', size = 18)
sns.distplot(ax = ax[1], x = data_prc.bathrooms, color='darkmagenta')
ax[1].set_title('Bathrooms', size = 18)
data_prc['bedrooms'] = data_prc['bedrooms'].replace(0, np.NaN)
data_prc['bedrooms'] = data_prc['bedrooms'].fillna(data_prc.bedrooms.median())
data_prc.bedrooms[data_prc.bedrooms == 0].count() | code |
90118434/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize=(15, 6))
sns.heatmap(corr_matrix, annot=True) | code |
90118434/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T
#In order to decide what to do with the null values in the price,
#it is helpful to know the corelations between features.
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize = (15,6))
sns.heatmap(corr_matrix, annot = True)
low_price_data = raw_data[(raw_data['sqft_living'] < zero_price['sqft_living'].median()) & (raw_data['bathrooms'] < zero_price['bathrooms'].median()) & (raw_data['sqft_above'] < zero_price['sqft_above'].median())]
low_price = low_price_data.price.median()
high_price_data = raw_data[(raw_data['sqft_living'] > zero_price['sqft_living'].median()) & (raw_data['bathrooms'] > zero_price['bathrooms'].median()) & (raw_data['sqft_above'] > zero_price['sqft_above'].median())]
high_price = high_price_data.price.median()
data_prc = raw_data.copy()
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] > zero_price['sqft_living'].median()), high_price, data_prc.price)
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] <= zero_price['sqft_living'].median()), low_price, data_prc.price)
data_prc.price[data_prc.price == 0].count()
#I will print the distrubution plots to decide
#which method to use fill in the unknown zero values in the bedrooms and batromms columns.
#As you may notice, there is some skewness that will affect the mean of both features.
#I will use the median imputation for replacing zero values.
fig, ax = plt.subplots(1,2, figsize = (20,6))
sns.distplot(ax = ax[0], x= data_prc.bedrooms, color='darkmagenta')
ax[0].set_title('Bedrooms', size = 18)
sns.distplot(ax = ax[1], x = data_prc.bathrooms, color='darkmagenta')
ax[1].set_title('Bathrooms', size = 18)
data_prc['bedrooms'] = data_prc['bedrooms'].replace(0, np.NaN)
data_prc['bedrooms'] = data_prc['bedrooms'].fillna(data_prc.bedrooms.median())
data_prc.bedrooms[data_prc.bedrooms == 0].count()
data_prc['bathrooms'].replace(to_replace=0, value=data_prc.bathrooms.median(), inplace=True)
data_prc.bathrooms[data_prc.bathrooms == 0].count()
sns.catplot(x='price', data=data_prc, kind='box', height=3, aspect=3) | code |
90118434/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.head() | code |
90118434/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T | code |
90118434/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
raw_data[raw_data == 0].count() | code |
90118434/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum() | code |
90118434/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T
#In order to decide what to do with the null values in the price,
#it is helpful to know the corelations between features.
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize = (15,6))
sns.heatmap(corr_matrix, annot = True)
low_price_data = raw_data[(raw_data['sqft_living'] < zero_price['sqft_living'].median()) & (raw_data['bathrooms'] < zero_price['bathrooms'].median()) & (raw_data['sqft_above'] < zero_price['sqft_above'].median())]
low_price = low_price_data.price.median()
high_price_data = raw_data[(raw_data['sqft_living'] > zero_price['sqft_living'].median()) & (raw_data['bathrooms'] > zero_price['bathrooms'].median()) & (raw_data['sqft_above'] > zero_price['sqft_above'].median())]
high_price = high_price_data.price.median()
data_prc = raw_data.copy()
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] > zero_price['sqft_living'].median()), high_price, data_prc.price)
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] <= zero_price['sqft_living'].median()), low_price, data_prc.price)
data_prc.price[data_prc.price == 0].count()
#I will print the distrubution plots to decide
#which method to use fill in the unknown zero values in the bedrooms and batromms columns.
#As you may notice, there is some skewness that will affect the mean of both features.
#I will use the median imputation for replacing zero values.
fig, ax = plt.subplots(1,2, figsize = (20,6))
sns.distplot(ax = ax[0], x= data_prc.bedrooms, color='darkmagenta')
ax[0].set_title('Bedrooms', size = 18)
sns.distplot(ax = ax[1], x = data_prc.bathrooms, color='darkmagenta')
ax[1].set_title('Bathrooms', size = 18)
data_prc['bedrooms'] = data_prc['bedrooms'].replace(0, np.NaN)
data_prc['bedrooms'] = data_prc['bedrooms'].fillna(data_prc.bedrooms.median())
data_prc.bedrooms[data_prc.bedrooms == 0].count()
data_prc['bathrooms'].replace(to_replace=0, value=data_prc.bathrooms.median(), inplace=True)
data_prc.bathrooms[data_prc.bathrooms == 0].count() | code |
90118434/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary() | code |
90118434/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.describe(include='all').T
x1 = raw_data.drop(['price', 'date', 'street', 'city', 'statezip', 'country'], axis=1)
y = raw_data['price']
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit()
results.summary()
raw_data.isnull().sum()
zero_price = raw_data[raw_data['price'] == 0]
zero_price.describe().T
#In order to decide what to do with the null values in the price,
#it is helpful to know the corelations between features.
corr_matrix = raw_data.corr()
fig, ax = plt.subplots(figsize = (15,6))
sns.heatmap(corr_matrix, annot = True)
low_price_data = raw_data[(raw_data['sqft_living'] < zero_price['sqft_living'].median()) & (raw_data['bathrooms'] < zero_price['bathrooms'].median()) & (raw_data['sqft_above'] < zero_price['sqft_above'].median())]
low_price = low_price_data.price.median()
high_price_data = raw_data[(raw_data['sqft_living'] > zero_price['sqft_living'].median()) & (raw_data['bathrooms'] > zero_price['bathrooms'].median()) & (raw_data['sqft_above'] > zero_price['sqft_above'].median())]
high_price = high_price_data.price.median()
data_prc = raw_data.copy()
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] > zero_price['sqft_living'].median()), high_price, data_prc.price)
data_prc['price'] = np.where((data_prc['price'] == 0) & (data_prc['sqft_living'] <= zero_price['sqft_living'].median()), low_price, data_prc.price)
data_prc.price[data_prc.price == 0].count()
fig, ax = plt.subplots(1, 2, figsize=(20, 6))
sns.distplot(ax=ax[0], x=data_prc.bedrooms, color='darkmagenta')
ax[0].set_title('Bedrooms', size=18)
sns.distplot(ax=ax[1], x=data_prc.bathrooms, color='darkmagenta')
ax[1].set_title('Bathrooms', size=18) | code |
90118434/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
raw_data = pd.read_csv('../input/housedata/data.csv')
raw_data.info() | code |
74052486/cell_6 | [
"text_plain_output_1.png"
] | from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, CenterCrop, ToTensor,ColorJitter
import glob
import os
import shutil
import os
import glob
import shutil
train_data_dir = '/kaggle/input/1056lab-covid19-chest-xray-recognit/train'
working_dir = '/kaggle/working'
os.makedirs(working_dir + '/train')
os.makedirs(working_dir + '/train/COVID/')
os.makedirs(working_dir + '/train/Not_COVID/')
path_list = glob.glob(train_data_dir + '/COVID/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/COVID/')
for dir in ['Lung_Opacity', 'Normal', 'Viral_Pneumonia']:
path_list = glob.glob(train_data_dir + '/' + dir + '/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/Not_COVID/')
from torchvision.transforms import Compose, CenterCrop, ToTensor, ColorJitter
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
train_data_dir = '/kaggle/working/train'
transform = Compose([CenterCrop(224), ToTensor()])
transforms = ColorJitter(contrast=1)
train_data = ImageFolder(train_data_dir, transform=transform)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True)
class_names = train_data.classes
print(class_names) | code |
74052486/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import torch
import torch
use_cuda = torch.cuda.is_available()
print('Use CUDA:', use_cuda)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu') | code |
74052486/cell_8 | [
"text_html_output_1.png"
] | !pip install efficientnet_pytorch
from efficientnet_pytorch import EfficientNet
from torch import nn, optim
model_ft = EfficientNet.from_pretrained('efficientnet-b0', num_classes=len(class_names))
print("======== Fine-funing netowrk architecutre ========\n")
print(model_ft)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) | code |
74052486/cell_16 | [
"text_plain_output_1.png"
] | from time import time
from torch.nn.functional import softmax
from torch.utils.data import DataLoader
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, CenterCrop, ToTensor
from torchvision.transforms import Compose, CenterCrop, ToTensor,ColorJitter
import glob
import glob
import numpy as np # linear algebra
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import shutil
import shutil
import torch
import torch
use_cuda = torch.cuda.is_available()
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
import os
import glob
import shutil
train_data_dir = '/kaggle/input/1056lab-covid19-chest-xray-recognit/train'
working_dir = '/kaggle/working'
os.makedirs(working_dir + '/train')
os.makedirs(working_dir + '/train/COVID/')
os.makedirs(working_dir + '/train/Not_COVID/')
path_list = glob.glob(train_data_dir + '/COVID/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/COVID/')
for dir in ['Lung_Opacity', 'Normal', 'Viral_Pneumonia']:
path_list = glob.glob(train_data_dir + '/' + dir + '/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/Not_COVID/')
from torchvision.transforms import Compose, CenterCrop, ToTensor, ColorJitter
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
train_data_dir = '/kaggle/working/train'
transform = Compose([CenterCrop(224), ToTensor()])
transforms = ColorJitter(contrast=1)
train_data = ImageFolder(train_data_dir, transform=transform)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True)
class_names = train_data.classes
from time import time
epoch_num = 10
start = time()
for epoch in range(1, epoch_num + 1):
model_ft.train()
sum_loss = 0.0
count = 0
optimizer.step()
scheduler.step()
for image, label in train_loader:
if torch.cuda.is_available():
image = image.cuda()
label = label.cuda()
y = model_ft(image)
_, preds = torch.max(y, 1)
loss = criterion(y, label)
model_ft.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.item() * image.size(0)
count += torch.sum(preds == label.data)
train_loss = sum_loss / len(train_data)
train_acc = float(count) / len(train_data)
t = time() - start
import os
import glob
import shutil
test_data_dir = '/kaggle/input/1056lab-covid19-chest-xray-recognit/test'
working_dir = '/kaggle/working'
os.makedirs(working_dir + '/test')
os.makedirs(working_dir + '/test/COVID/')
os.makedirs(working_dir + '/test/Not_COVID/')
path_list = glob.glob(test_data_dir + '/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/test/COVID/')
from torchvision.transforms import Compose, CenterCrop, ToTensor
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
test_data_dir = '/kaggle/working/test'
test_data = ImageFolder(test_data_dir, transform=transform)
test_loader = DataLoader(test_data, batch_size=32, shuffle=False)
from torch.nn.functional import softmax
torch.no_grad()
y_pred = []
for image, label in test_loader:
if torch.cuda.is_available():
image = image.cuda()
label = label.cuda()
y = model_ft(image)
y = softmax(y, dim=1).to('cpu')
y_pred = np.concatenate([y_pred, y.detach().numpy()[:, 0]])
submit_df = pd.read_csv('/kaggle/input/1056lab-covid19-chest-xray-recognit/sampleSubmission.csv', index_col=0)
submit_df['COVID'] = y_pred
submit_df.to_csv('submission.csv')
submit_df | code |
74052486/cell_10 | [
"text_plain_output_1.png"
] | from time import time
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, CenterCrop, ToTensor,ColorJitter
import glob
import os
import shutil
import torch
import torch
use_cuda = torch.cuda.is_available()
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
import os
import glob
import shutil
train_data_dir = '/kaggle/input/1056lab-covid19-chest-xray-recognit/train'
working_dir = '/kaggle/working'
os.makedirs(working_dir + '/train')
os.makedirs(working_dir + '/train/COVID/')
os.makedirs(working_dir + '/train/Not_COVID/')
path_list = glob.glob(train_data_dir + '/COVID/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/COVID/')
for dir in ['Lung_Opacity', 'Normal', 'Viral_Pneumonia']:
path_list = glob.glob(train_data_dir + '/' + dir + '/*.png')
for path in path_list:
shutil.copy(path, working_dir + '/train/Not_COVID/')
from torchvision.transforms import Compose, CenterCrop, ToTensor, ColorJitter
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
train_data_dir = '/kaggle/working/train'
transform = Compose([CenterCrop(224), ToTensor()])
transforms = ColorJitter(contrast=1)
train_data = ImageFolder(train_data_dir, transform=transform)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True)
class_names = train_data.classes
from time import time
epoch_num = 10
start = time()
for epoch in range(1, epoch_num + 1):
model_ft.train()
sum_loss = 0.0
count = 0
optimizer.step()
scheduler.step()
for image, label in train_loader:
if torch.cuda.is_available():
image = image.cuda()
label = label.cuda()
y = model_ft(image)
_, preds = torch.max(y, 1)
loss = criterion(y, label)
model_ft.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.item() * image.size(0)
count += torch.sum(preds == label.data)
train_loss = sum_loss / len(train_data)
train_acc = float(count) / len(train_data)
t = time() - start
print(f'epoch: {epoch}, mean loss: {train_loss:.4f}, train accuracy: {train_acc:.4f}, elapsed_time :{t:.4f}') | code |
105182335/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.predict([[6.5]]) | code |
105182335/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y) | code |
105182335/cell_6 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg.predict(X), color='blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | code |
105182335/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105182335/cell_7 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color='blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | code |
105182335/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color='blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | code |
105182335/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
print(data)
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values | code |
105182335/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
lin_reg_2.predict(poly_reg.fit_transform([[6.5]])) | code |
105182335/cell_5 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/salary-prediction-polynomial-linear-regression/Position_Salaries.csv')
X = data.iloc[:, 1:2].values
y = data.iloc[:, 2].values
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y) | code |
34133814/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
labels = ['1', '2', '3', '4', '5']
sizes = df['International Reputation'].value_counts()
colors = plt.cm.copper(np.linspace(0, 1, 5))
explode = [0.1, 0.1, 0.2, 0.5, 0.9]
plt.rcParams['figure.figsize'] = (9, 9)
p = sns.countplot(x='Position', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90) | code |
34133814/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
def club(x):
return df[df['Club'] == x][['Name', 'Position', 'Overall', 'Nationality', 'Age']]
club('Tottenham Hotspur')
x = club('Tottenham Hotspur')
x.shape | code |
34133814/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.info() | code |
34133814/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
def club(x):
return df[df['Club'] == x][['Name', 'Position', 'Overall', 'Nationality', 'Age']]
club('Tottenham Hotspur')
x = club('Tottenham Hotspur')
x.shape
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
labels = ['1', '2', '3', '4', '5']
sizes = df['International Reputation'].value_counts()
colors = plt.cm.copper(np.linspace(0, 1, 5))
explode = [0.1, 0.1, 0.2, 0.5, 0.9]
plt.rcParams['figure.figsize'] = (9, 9)
#Используем диаграмму countplot и выводим данные
p = sns.countplot(x='Position', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90)
#Используем диаграмму countplot и выводим данные
fig = plt.figure(figsize=(25, 10))
p = sns.countplot(x='Nationality', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90)
x = df.Age
plt.figure(figsize=(15, 8))
ax = sns.distplot(x, bins=58, kde=False, color='g')
ax.set_xlabel(xlabel='Возраст футболистов', fontsize=16)
ax.set_ylabel(ylabel='Количество футболистов', fontsize=16)
ax.set_title(label='Гистограмма возраста футболистов', fontsize=20)
plt.show() | code |
34133814/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
labels = ['1', '2', '3', '4', '5']
sizes = df['International Reputation'].value_counts()
colors = plt.cm.copper(np.linspace(0, 1, 5))
explode = [0.1, 0.1, 0.2, 0.5, 0.9]
plt.rcParams['figure.figsize'] = (9, 9)
plt.pie(sizes, labels=labels, colors=colors, explode=explode, shadow=True)
plt.title('Международная репутация футболистов', fontsize=20)
plt.legend()
plt.show() | code |
34133814/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.head(10) | code |
34133814/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34133814/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
def country(x):
return df[df['Nationality'] == x][['Name', 'Overall', 'Potential', 'Position']]
country('Russia') | code |
34133814/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(df['Preferred Foot'], palette='Reds')
plt.title('Предпочитаемая нога игрока', fontsize=20)
plt.show() | code |
34133814/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape | code |
34133814/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
df.isnull().sum() | code |
34133814/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
def club(x):
return df[df['Club'] == x][['Name', 'Position', 'Overall', 'Nationality', 'Age']]
club('Tottenham Hotspur')
x = club('Tottenham Hotspur')
x.shape
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
labels = ['1', '2', '3', '4', '5']
sizes = df['International Reputation'].value_counts()
colors = plt.cm.copper(np.linspace(0, 1, 5))
explode = [0.1, 0.1, 0.2, 0.5, 0.9]
plt.rcParams['figure.figsize'] = (9, 9)
#Используем диаграмму countplot и выводим данные
p = sns.countplot(x='Position', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90)
#Используем диаграмму countplot и выводим данные
fig = plt.figure(figsize=(25, 10))
p = sns.countplot(x='Nationality', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90)
# To show that there are people having same age
# Histogram: number of players's age
x = df.Age
plt.figure(figsize = (15,8))
ax = sns.distplot(x, bins = 58, kde = False, color = 'g')
ax.set_xlabel(xlabel = "Возраст футболистов", fontsize = 16)
ax.set_ylabel(ylabel = 'Количество футболистов', fontsize = 16)
ax.set_title(label = 'Гистограмма возраста футболистов', fontsize = 20)
plt.show()
x = df.Potential
plt.figure(figsize=(12, 8))
plt.style.use('seaborn-paper')
ax = sns.distplot(x, bins=58, kde=False, color='y')
ax.set_xlabel(xlabel='Очки потенциала футболиста', fontsize=16)
ax.set_ylabel(ylabel='Количество игроков', fontsize=16)
ax.set_title(label='Гистограмма очков потенциала футболиста', fontsize=20)
plt.show() | code |
34133814/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
df.isnull().sum()
plt.rcParams['figure.figsize'] = (10, 5)
labels = ['1', '2', '3', '4', '5']
sizes = df['International Reputation'].value_counts()
colors = plt.cm.copper(np.linspace(0, 1, 5))
explode = [0.1, 0.1, 0.2, 0.5, 0.9]
plt.rcParams['figure.figsize'] = (9, 9)
#Используем диаграмму countplot и выводим данные
p = sns.countplot(x='Position', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90)
fig = plt.figure(figsize=(25, 10))
p = sns.countplot(x='Nationality', data=df)
_ = plt.setp(p.get_xticklabels(), rotation=90) | code |
34133814/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns | code |
34133814/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.drop(df.columns.difference(['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Position', 'Overall', 'Potential', 'Club', 'Composure', 'Dribbling', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'Joined', 'Wage', 'Preferred Foot', 'International Reputation']), 1, inplace=True)
df.shape
df.columns
def club(x):
return df[df['Club'] == x][['Name', 'Position', 'Overall', 'Nationality', 'Age']]
club('Tottenham Hotspur') | code |
34133814/cell_5 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fifa19/data.csv')
df.head(10) | code |
34148707/cell_15 | [
"text_html_output_1.png"
] | val_fold | code |
88085268/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from mlxtend.preprocessing import minmax_scaling
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from xgboost import XGBRegressor
import pandas as pd
import seaborn as sns
import pandas as pd
import seaborn as sns
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
import numpy as np
from matplotlib import pyplot as plt
data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
data.dropna(subset=['SalePrice'], inplace=True)
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
X_test = test_data.drop('Id', axis=1)
test_id = test_data['Id']
def proc_data_ordinal(X, test_X=None):
numerical_data = X[X.select_dtypes(exclude=['object']).columns]
categorical_data = X[X.select_dtypes('object').columns]
if test_X is not None:
t_numerical_data = test_X[test_X.select_dtypes(exclude=['object']).columns]
t_categorical_data = test_X[test_X.select_dtypes('object').columns]
imputer = SimpleImputer(strategy='constant')
imputed_num = pd.DataFrame(imputer.fit_transform(numerical_data))
imputed_num.columns = numerical_data.columns
if test_X is not None:
t_imputed_num = pd.DataFrame(imputer.transform(t_numerical_data))
t_imputed_num.columns = t_numerical_data.columns
imputer = SimpleImputer(strategy='most_frequent')
imputed_cat = pd.DataFrame(imputer.fit_transform(categorical_data))
imputed_cat.columns = categorical_data.columns
if test_X is not None:
t_imputed_cat = pd.DataFrame(imputer.transform(t_categorical_data))
t_imputed_cat.columns = t_categorical_data.columns
ordinal_encoder = OrdinalEncoder()
ord_X_train = imputed_cat.copy()
ord_X_train[imputed_cat.columns] = ordinal_encoder.fit_transform(imputed_cat)
if test_X is not None:
t_ord_X_train = t_imputed_cat.copy()
t_ord_X_train[t_imputed_cat.columns] = ordinal_encoder.transform(t_imputed_cat)
processed_X = pd.concat([imputed_num, ord_X_train], axis=1)
t_processed_X = None
if test_X is not None:
t_processed_X = pd.concat([t_imputed_num, t_ord_X_train], axis=1)
return (processed_X, t_processed_X)
xgb_params = dict(max_depth=6, learning_rate=0.01, n_estimators=5000, min_child_weight=1, colsample_bytree=0.7, subsample=0.7, reg_alpha=0.5, reg_lambda=1.0, num_parallel_tree=1)
baseline_X, _ = proc_data_ordinal(X, X_test)
baseline_model = XGBRegressor(**xgb_params, random_state=1722)
previous_scores = -1 * cross_val_score(baseline_model, baseline_X, y, cv=5, scoring='neg_mean_absolute_error')
previous_scores.mean()
neighborhood_averages = data.groupby('Neighborhood')['SalePrice'].transform('mean')
neighborhood_map = pd.DataFrame({'Neighborhood': data['Neighborhood'], 'avg': neighborhood_averages})
data['Neighborhood'] = neighborhood_averages
neighborhood_map.groupby(['Neighborhood', 'avg']).size()
nbd_dict = {'Blmngtn': 194870.882353, 'Blueste': 137500.0, 'BrDale': 104493.75, 'BrkSide': 124834.051724, 'ClearCr': 212565.428571, 'CollgCr': 197965.773333, 'Crawfor': 210624.72549, 'Edwards': 128219.7, 'Gilbert': 192854.506329, 'IDOTRR': 100123.783784, 'MeadowV': 98576.470588, 'Mitchel': 156270.122449, 'NAmes': 145847.08, 'NPkVill': 142694.444444, 'NWAmes': 189050.068493, 'NoRidge': 335295.317073, 'NridgHt': 316270.623377, 'OldTown': 128225.300885, 'SWISU': 142591.36, 'Sawyer': 136793.135135, 'SawyerW': 186555.79661, 'Somerst': 225379.837209, 'StoneBr': 310499.0, 'Timber': 242247.447368, 'Veenker': 238772.727273}
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
new_X, _ = proc_data_ordinal(X, None)
new_model = XGBRegressor(**xgb_params, random_state=1722)
previous_scores = -1 * cross_val_score(new_model, new_X, y, cv=5, scoring='neg_mean_absolute_error')
previous_scores.mean()
test_data['Neighborhood'] = test_data.Neighborhood.map(lambda n: nbd_dict[n])
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
new_X, test_X = proc_data_ordinal(X, test_data.drop('Id', axis=1))
new_model = XGBRegressor(**xgb_params, random_state=1722)
new_model.fit(new_X, y)
preds = new_model.predict(test_X)
output = pd.DataFrame({'Id': test_data.Id, 'SalePrice': preds})
output.to_csv('submission.csv', index=False)
from sklearn.cluster import KMeans
from mlxtend.preprocessing import minmax_scaling
g = pd.DataFrame(data.GrLivArea)
d = minmax_scaling(g, columns=['GrLivArea'])
data['GrLivArea_scaled'] = d * 10
comparative_features = ['OverallCond', 'OverallQual']
nbs = list(data.Neighborhood.unique())
ks = dict()
for n in nbs:
cluster_data = data.loc[data.Neighborhood == n][comparative_features]
if len(cluster_data) > 40:
k = KMeans(n_clusters=3, n_init=10)
cluster_data['nbd_comp_label'] = k.fit_predict(cluster_data)
else:
k = KMeans(n_clusters=1, n_init=10)
cluster_data['nbd_comp_label'] = k.fit_predict(cluster_data)
ks[n] = k | code |
88085268/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from xgboost import XGBRegressor
import pandas as pd
import seaborn as sns
import pandas as pd
import seaborn as sns
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
import numpy as np
from matplotlib import pyplot as plt
data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
data.dropna(subset=['SalePrice'], inplace=True)
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
test_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
X_test = test_data.drop('Id', axis=1)
test_id = test_data['Id']
def proc_data_ordinal(X, test_X=None):
numerical_data = X[X.select_dtypes(exclude=['object']).columns]
categorical_data = X[X.select_dtypes('object').columns]
if test_X is not None:
t_numerical_data = test_X[test_X.select_dtypes(exclude=['object']).columns]
t_categorical_data = test_X[test_X.select_dtypes('object').columns]
imputer = SimpleImputer(strategy='constant')
imputed_num = pd.DataFrame(imputer.fit_transform(numerical_data))
imputed_num.columns = numerical_data.columns
if test_X is not None:
t_imputed_num = pd.DataFrame(imputer.transform(t_numerical_data))
t_imputed_num.columns = t_numerical_data.columns
imputer = SimpleImputer(strategy='most_frequent')
imputed_cat = pd.DataFrame(imputer.fit_transform(categorical_data))
imputed_cat.columns = categorical_data.columns
if test_X is not None:
t_imputed_cat = pd.DataFrame(imputer.transform(t_categorical_data))
t_imputed_cat.columns = t_categorical_data.columns
ordinal_encoder = OrdinalEncoder()
ord_X_train = imputed_cat.copy()
ord_X_train[imputed_cat.columns] = ordinal_encoder.fit_transform(imputed_cat)
if test_X is not None:
t_ord_X_train = t_imputed_cat.copy()
t_ord_X_train[t_imputed_cat.columns] = ordinal_encoder.transform(t_imputed_cat)
processed_X = pd.concat([imputed_num, ord_X_train], axis=1)
t_processed_X = None
if test_X is not None:
t_processed_X = pd.concat([t_imputed_num, t_ord_X_train], axis=1)
return (processed_X, t_processed_X)
xgb_params = dict(max_depth=6, learning_rate=0.01, n_estimators=5000, min_child_weight=1, colsample_bytree=0.7, subsample=0.7, reg_alpha=0.5, reg_lambda=1.0, num_parallel_tree=1)
baseline_X, _ = proc_data_ordinal(X, X_test)
baseline_model = XGBRegressor(**xgb_params, random_state=1722)
previous_scores = -1 * cross_val_score(baseline_model, baseline_X, y, cv=5, scoring='neg_mean_absolute_error')
previous_scores.mean()
neighborhood_averages = data.groupby('Neighborhood')['SalePrice'].transform('mean')
neighborhood_map = pd.DataFrame({'Neighborhood': data['Neighborhood'], 'avg': neighborhood_averages})
data['Neighborhood'] = neighborhood_averages
neighborhood_map.groupby(['Neighborhood', 'avg']).size()
nbd_dict = {'Blmngtn': 194870.882353, 'Blueste': 137500.0, 'BrDale': 104493.75, 'BrkSide': 124834.051724, 'ClearCr': 212565.428571, 'CollgCr': 197965.773333, 'Crawfor': 210624.72549, 'Edwards': 128219.7, 'Gilbert': 192854.506329, 'IDOTRR': 100123.783784, 'MeadowV': 98576.470588, 'Mitchel': 156270.122449, 'NAmes': 145847.08, 'NPkVill': 142694.444444, 'NWAmes': 189050.068493, 'NoRidge': 335295.317073, 'NridgHt': 316270.623377, 'OldTown': 128225.300885, 'SWISU': 142591.36, 'Sawyer': 136793.135135, 'SawyerW': 186555.79661, 'Somerst': 225379.837209, 'StoneBr': 310499.0, 'Timber': 242247.447368, 'Veenker': 238772.727273}
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
new_X, _ = proc_data_ordinal(X, None)
new_model = XGBRegressor(**xgb_params, random_state=1722)
previous_scores = -1 * cross_val_score(new_model, new_X, y, cv=5, scoring='neg_mean_absolute_error')
previous_scores.mean()
test_data['Neighborhood'] = test_data.Neighborhood.map(lambda n: nbd_dict[n])
y = data.SalePrice
X = data.drop('SalePrice', axis=1)
new_X, test_X = proc_data_ordinal(X, test_data.drop('Id', axis=1))
new_model = XGBRegressor(**xgb_params, random_state=1722)
new_model.fit(new_X, y) | code |
16158815/cell_13 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={'font.style': 'normal', 'axes.facecolor': bg_color, 'figure.facecolor': bg_color, 'text.color': 'white', 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.grid': False, 'axes.labelsize': 25, 'figure.figsize': (10.0, 5.0), 'xtick.labelsize': 15, 'ytick.labelsize': 15})
result = []
for i in heart['ChestPain']:
if i == 0:
result.append('Typical Angina')
if i == 1:
result.append('Atypical Angina')
if i == 2:
result.append('Non-Anginal')
if i == 3:
result.append('Asymptomatic')
heart['ChestPainType'] = result
ax = sns.countplot(hue=result, x='Gender', data=heart, palette='husl')
plt.title('Chest Pain Type Vs Gender')
plt.ylabel('')
plt.yticks([])
plt.xlabel('')
ax.set_xticklabels(['Female', 'Male'])
print(ax.patches) | code |
16158815/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={'font.style': 'normal', 'axes.facecolor': bg_color, 'figure.facecolor': bg_color, 'text.color': 'white', 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.grid': False, 'axes.labelsize': 25, 'figure.figsize': (10.0, 5.0), 'xtick.labelsize': 15, 'ytick.labelsize': 15})
sns.swarmplot(heart['Age']) | code |
16158815/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
heart = pd.read_csv('../input/heart.csv')
print('unique entries in each column')
heart.nunique() | code |
16158815/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
heart = pd.read_csv('../input/heart.csv')
print(heart.shape)
heart.head() | code |
16158815/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
heart.head() | code |
16158815/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
print(os.listdir('../input')) | code |
16158815/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
heart['Age'].hist(grid=False) | code |
16158815/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
heart = pd.read_csv('../input/heart.csv')
heart.info() | code |
16158815/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={'font.style': 'normal', 'axes.facecolor': bg_color, 'figure.facecolor': bg_color, 'text.color': 'white', 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.grid': False, 'axes.labelsize': 25, 'figure.figsize': (10.0, 5.0), 'xtick.labelsize': 15, 'ytick.labelsize': 15})
result = []
for i in heart['ChestPain']:
if i == 0:
result.append('Typical Angina')
if i == 1:
result.append('Atypical Angina')
if i == 2:
result.append('Non-Anginal')
if i == 3:
result.append('Asymptomatic')
heart['ChestPainType'] = result
# do a gender comparison
ax = sns.countplot(hue=result,x='Gender',data=heart,palette='husl')
plt.title("Chest Pain Type Vs Gender")
plt.ylabel("")
plt.yticks([])
plt.xlabel("")
ax.set_xticklabels(['Female','Male'])
print(ax.patches)
ax = sns.regplot(x='RestingBloodPressure', y='Cholestrol', data=heart, color='g') | code |
16158815/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
heart = pd.read_csv('../input/heart.csv')
heart.nunique()
heart.columns = ['Age', 'Gender', 'ChestPain', 'RestingBloodPressure', 'Cholestrol', 'FastingBloodSugar', 'RestingECG', 'MaxHeartRateAchivied', 'ExerciseIndusedAngina', 'Oldpeak', 'Slope', 'MajorVessels', 'Thalassemia', 'Target']
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={'font.style': 'normal', 'axes.facecolor': bg_color, 'figure.facecolor': bg_color, 'text.color': 'white', 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.grid': False, 'axes.labelsize': 25, 'figure.figsize': (10.0, 5.0), 'xtick.labelsize': 15, 'ytick.labelsize': 15})
result = []
for i in heart['ChestPain']:
if i == 0:
result.append('Typical Angina')
if i == 1:
result.append('Atypical Angina')
if i == 2:
result.append('Non-Anginal')
if i == 3:
result.append('Asymptomatic')
heart['ChestPainType'] = result
sns.swarmplot(x='ChestPainType', y='Age', data=heart) | code |
128049382/cell_4 | [
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png",
"text_html_output_3.png"
] | from torchsummary import summary
import torch
from torchvision import models
from torchsummary import summary
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = RN_BCNN().to(device)
summary(model, (3, 224, 224)) | code |
128049382/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score,cohen_kappa_score
from torchsummary import summary
from tqdm import tqdm
import numpy as np
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import tqdm
import wandb
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.downsample = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_channels))
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.downsample(identity)
out = self.relu(out)
return out
class RN_BCNN(nn.Module):
def __init__(self):
super(RN_BCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.res_block1 = self._make_res_block(64, 32, 128, 3)
self.res_block2 = self._make_res_block(128, 64, 256, 4)
self.res_block3 = self._make_res_block(256, 128, 512, 6)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, 5)
self.softmax = nn.Softmax(dim=1)
def _make_res_block(self, in_channels, mid_channels, out_channels, num_blocks):
layers = []
layers.append(ResidualBlock(in_channels, mid_channels, stride=1))
for i in range(num_blocks - 1):
layers.append(ResidualBlock(mid_channels, mid_channels, stride=1))
layers.append(ResidualBlock(mid_channels, out_channels, stride=2))
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.res_block1(out)
out = self.res_block2(out)
out = self.res_block3(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = self.softmax(out)
return out
from torchvision import models
from torchsummary import summary
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = RN_BCNN().to(device)
summary(model, (3, 224, 224))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(size=(224, 224)), transforms.ToTensor()])
path_data = '/kaggle/input/img-process/img_process/'
train_file = '/kaggle/input/vindr-birads/train_data_final.csv'
test_file = '/kaggle/input/vindr-birads/test_data_final.csv'
import cv2
from torch.utils.data import DataLoader
data_breast = {'train': CustomImageDataset(train_file, path_data, transform), 'test': CustomImageDataset(test_file, path_data, transform)}
dm = Datamodule(16, data_breast['train'], data_breast['test'])
import torchvision.models as models
dataloaders = {'train': torch.utils.data.DataLoader(data_breast['train'], batch_size=16, shuffle=True, num_workers=0), 'test': torch.utils.data.DataLoader(data_breast['test'], batch_size=16, shuffle=True, num_workers=0)}
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score
def train_model(model, criterion, optimizer, num_epochs=3):
for epoch in range(num_epochs):
for phase in ['train', 'test']:
if phase == 'train':
model.train()
else:
model.eval()
y_label = []
y_predict = []
running_loss = 0.0
running_corrects = 0
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
y_label.extend(labels.cpu().numpy())
y_predict.extend(np.squeeze(preds.cpu().numpy().T))
epoch_loss = running_loss / len(data_breast[phase])
epoch_acc = running_corrects.double() / len(data_breast[phase])
confusion_mtx = confusion_matrix(y_label, y_predict)
f1_scores = f1_score(y_label, y_predict, average=None)
precision_scores = precision_score(y_label, y_predict, average=None)
recall_scores = recall_score(y_label, y_predict, average=None)
kappas = cohen_kappa_score(y_label, y_predict)
wandb.log({'epoch': epoch, phase + 'loss': epoch_loss, phase + 'acc': epoch_acc, 'f1_score_0': f1_scores[0], 'f1_score_1': f1_scores[1], 'f1_score_2': f1_scores[2], 'f1_score_3': f1_scores[3], 'f1_score_4': f1_scores[4], 'precision_score_0': precision_scores[0], 'precision_score_1': precision_scores[1], 'precision_score_2': precision_scores[2], 'precision_score_3': precision_scores[3], 'precision_score_4': precision_scores[4], 'recall_0': recall_scores[0], 'recall_1': recall_scores[1], 'recall_2': recall_scores[2], 'recall_3': recall_scores[3], 'recall_4': recall_scores[4], 'kappa': kappas})
return model
import wandb
run = wandb.init(project='Breast-density-classification', reinit=True)
wandb.run.name = 'RN-BCNN'
model = RN_BCNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.05)
train_model(model, criterion, optimizer, num_epochs=100)
run.finish() | code |
128049382/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_1.png"
] | from torchsummary import summary
import torch
import torch
from torchvision import models
from torchsummary import summary
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = RN_BCNN().to(device)
summary(model, (3, 224, 224))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device) | code |
128049382/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score,cohen_kappa_score
from torchsummary import summary
from tqdm import tqdm
import numpy as np
import torch
import torch
import torchvision.transforms as transforms
import tqdm
import wandb
from torchvision import models
from torchsummary import summary
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = RN_BCNN().to(device)
summary(model, (3, 224, 224))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(size=(224, 224)), transforms.ToTensor()])
path_data = '/kaggle/input/img-process/img_process/'
train_file = '/kaggle/input/vindr-birads/train_data_final.csv'
test_file = '/kaggle/input/vindr-birads/test_data_final.csv'
import cv2
from torch.utils.data import DataLoader
data_breast = {'train': CustomImageDataset(train_file, path_data, transform), 'test': CustomImageDataset(test_file, path_data, transform)}
dm = Datamodule(16, data_breast['train'], data_breast['test'])
import torchvision.models as models
dataloaders = {'train': torch.utils.data.DataLoader(data_breast['train'], batch_size=16, shuffle=True, num_workers=0), 'test': torch.utils.data.DataLoader(data_breast['test'], batch_size=16, shuffle=True, num_workers=0)}
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score
def train_model(model, criterion, optimizer, num_epochs=3):
for epoch in range(num_epochs):
for phase in ['train', 'test']:
if phase == 'train':
model.train()
else:
model.eval()
y_label = []
y_predict = []
running_loss = 0.0
running_corrects = 0
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
y_label.extend(labels.cpu().numpy())
y_predict.extend(np.squeeze(preds.cpu().numpy().T))
epoch_loss = running_loss / len(data_breast[phase])
epoch_acc = running_corrects.double() / len(data_breast[phase])
confusion_mtx = confusion_matrix(y_label, y_predict)
f1_scores = f1_score(y_label, y_predict, average=None)
precision_scores = precision_score(y_label, y_predict, average=None)
recall_scores = recall_score(y_label, y_predict, average=None)
kappas = cohen_kappa_score(y_label, y_predict)
wandb.log({'epoch': epoch, phase + 'loss': epoch_loss, phase + 'acc': epoch_acc, 'f1_score_0': f1_scores[0], 'f1_score_1': f1_scores[1], 'f1_score_2': f1_scores[2], 'f1_score_3': f1_scores[3], 'f1_score_4': f1_scores[4], 'precision_score_0': precision_scores[0], 'precision_score_1': precision_scores[1], 'precision_score_2': precision_scores[2], 'precision_score_3': precision_scores[3], 'precision_score_4': precision_scores[4], 'recall_0': recall_scores[0], 'recall_1': recall_scores[1], 'recall_2': recall_scores[2], 'recall_3': recall_scores[3], 'recall_4': recall_scores[4], 'kappa': kappas})
return model
import wandb
run = wandb.init(project='Breast-density-classification', reinit=True)
wandb.run.name = 'RN-BCNN' | code |
128049382/cell_3 | [
"text_plain_output_1.png"
] | pip install torchsummary | code |
1008057/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from subprocess import check_output
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train_num = train.shape[0]
full = train.append(test, ignore_index=True)
titanic = full[:train_num]
from sklearn.preprocessing import LabelEncoder
sex = pd.Series(np.where(full.Sex == 'male', 1, 0), name='Sex')
embarked = pd.get_dummies(full.Embarked, prefix='Embarked')
le = LabelEncoder()
imputed = pd.DataFrame()
imputed['Age'] = full.Age.fillna(full.Age.mean())
imputed['Fare'] = full.Fare.fillna(full.Fare.mean())
imputed['Parch'] = full.Parch.fillna(full.Parch.mean())
imputed['SibSp'] = full.SibSp.fillna(full.SibSp.mean())
title = pd.DataFrame()
title['Title'] = full['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip())
Title_Dictionary = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
title['Title'] = title.Title.map(Title_Dictionary)
title = pd.get_dummies(title.Title)
cabin = pd.DataFrame()
cabin['Cabin'] = full.Cabin.fillna('U')
cabin['Cabin'] = cabin.Cabin.map(lambda c: c[0])
cabin = pd.get_dummies(cabin.Cabin, prefix='Cabin')
from sklearn.model_selection import train_test_split
full_X = pd.concat([imputed, embarked, cabin, sex], axis=1)
train_valid_X = full_X[:train_num]
train_valid_y = titanic.Survived
test_X = full_X[train_num:]
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
print(full_X.shape, train_X.shape, valid_X.shape, train_y.shape, valid_y.shape, test_X.shape) | code |
1008057/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train_num = train.shape[0]
full = train.append(test, ignore_index=True)
titanic = full[:train_num]
from sklearn.preprocessing import LabelEncoder
sex = pd.Series(np.where(full.Sex == 'male', 1, 0), name='Sex')
embarked = pd.get_dummies(full.Embarked, prefix='Embarked')
le = LabelEncoder()
imputed = pd.DataFrame()
imputed['Age'] = full.Age.fillna(full.Age.mean())
imputed['Fare'] = full.Fare.fillna(full.Fare.mean())
imputed['Parch'] = full.Parch.fillna(full.Parch.mean())
imputed['SibSp'] = full.SibSp.fillna(full.SibSp.mean())
title = pd.DataFrame()
title['Title'] = full['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip())
Title_Dictionary = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
title['Title'] = title.Title.map(Title_Dictionary)
title = pd.get_dummies(title.Title)
cabin = pd.DataFrame()
cabin['Cabin'] = full.Cabin.fillna('U')
cabin['Cabin'] = cabin.Cabin.map(lambda c: c[0])
cabin = pd.get_dummies(cabin.Cabin, prefix='Cabin')
from sklearn.model_selection import train_test_split
full_X = pd.concat([imputed, embarked, cabin, sex], axis=1)
train_valid_X = full_X[:train_num]
train_valid_y = titanic.Survived
test_X = full_X[train_num:]
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
from sklearn.ensemble import GradientBoostingClassifier
import matplotlib.pyplot as plt
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
tree_num = 20
train_score_list = []
valid_score_list = []
x_range = range(1, tree_num)
for trees in x_range:
model = GradientBoostingClassifier(n_estimators=trees, max_depth=10, min_samples_split=2, learning_rate=0.1, subsample=0.8, max_features=0.8)
model.fit(train_X, train_y)
train_score = model.score(train_X, train_y)
valid_score = model.score(valid_X, valid_y)
train_score_list.append(train_score)
valid_score_list.append(valid_score)
from sklearn.ensemble import VotingClassifier, GradientBoostingClassifier
import numpy as np
train_loop_num = 100
model_list = []
weight = []
for idx in range(0, train_loop_num):
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
model = GradientBoostingClassifier(n_estimators=10, max_depth=10, min_samples_split=2, learning_rate=0.1, subsample=0.8, max_features=0.8)
model.fit(train_X, train_y)
train_score = model.score(train_X, train_y)
valid_score = model.score(valid_X, valid_y)
model_name = 'model_' + str(idx)
print('%s, train_score:%f, valid_score:%f' % (model_name, train_score, valid_score))
weight.append(valid_score)
model_list.append((model_name, model))
sum_of_weight = sum(weight)
weight = [x / sum_of_weight for x in weight]
pred = []
for name, model in model_list:
test_Y = [int(x) for x in model.predict(test_X)]
pred.append(test_Y)
test_Y = []
for idx in range(0, test_X.shape[0]):
pred_result = [x[idx] for x in pred]
test_Y.append(int(np.dot(pred_result, weight)))
print(test_Y) | code |
1008057/cell_2 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train_num = train.shape[0]
full = train.append(test, ignore_index=True)
titanic = full[:train_num]
print('train_size = %s, test_size=%s' % (train.shape, test.shape))
full.head(10) | code |
1008057/cell_7 | [
"text_plain_output_1.png"
] | """
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier , GradientBoostingClassifier, AdaBoostClassifier, VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
model_list = {
"random_forest": RandomForestClassifier(n_estimators=100, max_depth=10,min_samples_split=2),
"SVC": SVC(),
"GBDT": GradientBoostingClassifier(n_estimators=10, learning_rate=0.3, max_depth=4, random_state=0),
"KNN": KNeighborsClassifier(n_neighbors = 3),
"GaussianNB": GaussianNB(),
"LR": LogisticRegression()
}.items()
weight = []
for (name, model) in model_list:
model.fit(train_X, train_y)
train_score = model.score( train_X , train_y )
valid_score = model.score( valid_X , valid_y )
weight.append(valid_score)
print ("%s, train_score:%f, valid_score:%f" % (name, train_score , valid_score) )
# voting ensemble
eclf = VotingClassifier(estimators=model_list, weights=weight)
eclf.fit(train_X, train_y)
print (eclf.score(train_X, train_y))
print (eclf.score(valid_X, valid_y))
test_Y = [int(x) for x in eclf.predict(test_X)]
print (len(test_Y))
""" | code |
1008057/cell_3 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from subprocess import check_output
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train_num = train.shape[0]
full = train.append(test, ignore_index=True)
titanic = full[:train_num]
from sklearn.preprocessing import LabelEncoder
sex = pd.Series(np.where(full.Sex == 'male', 1, 0), name='Sex')
embarked = pd.get_dummies(full.Embarked, prefix='Embarked')
le = LabelEncoder()
imputed = pd.DataFrame()
imputed['Age'] = full.Age.fillna(full.Age.mean())
imputed['Fare'] = full.Fare.fillna(full.Fare.mean())
imputed['Parch'] = full.Parch.fillna(full.Parch.mean())
imputed['SibSp'] = full.SibSp.fillna(full.SibSp.mean())
title = pd.DataFrame()
title['Title'] = full['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip())
Title_Dictionary = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
title['Title'] = title.Title.map(Title_Dictionary)
title = pd.get_dummies(title.Title)
cabin = pd.DataFrame()
cabin['Cabin'] = full.Cabin.fillna('U')
cabin['Cabin'] = cabin.Cabin.map(lambda c: c[0])
cabin = pd.get_dummies(cabin.Cabin, prefix='Cabin')
cabin.head() | code |
1008057/cell_5 | [
"image_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train_num = train.shape[0]
full = train.append(test, ignore_index=True)
titanic = full[:train_num]
from sklearn.preprocessing import LabelEncoder
sex = pd.Series(np.where(full.Sex == 'male', 1, 0), name='Sex')
embarked = pd.get_dummies(full.Embarked, prefix='Embarked')
le = LabelEncoder()
imputed = pd.DataFrame()
imputed['Age'] = full.Age.fillna(full.Age.mean())
imputed['Fare'] = full.Fare.fillna(full.Fare.mean())
imputed['Parch'] = full.Parch.fillna(full.Parch.mean())
imputed['SibSp'] = full.SibSp.fillna(full.SibSp.mean())
title = pd.DataFrame()
title['Title'] = full['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip())
Title_Dictionary = {'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Dr': 'Officer', 'Rev': 'Officer', 'the Countess': 'Royalty', 'Dona': 'Royalty', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Royalty'}
title['Title'] = title.Title.map(Title_Dictionary)
title = pd.get_dummies(title.Title)
cabin = pd.DataFrame()
cabin['Cabin'] = full.Cabin.fillna('U')
cabin['Cabin'] = cabin.Cabin.map(lambda c: c[0])
cabin = pd.get_dummies(cabin.Cabin, prefix='Cabin')
from sklearn.model_selection import train_test_split
full_X = pd.concat([imputed, embarked, cabin, sex], axis=1)
train_valid_X = full_X[:train_num]
train_valid_y = titanic.Survived
test_X = full_X[train_num:]
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
from sklearn.ensemble import GradientBoostingClassifier
import matplotlib.pyplot as plt
train_X, valid_X, train_y, valid_y = train_test_split(train_valid_X, train_valid_y, train_size=0.8)
tree_num = 20
train_score_list = []
valid_score_list = []
x_range = range(1, tree_num)
for trees in x_range:
model = GradientBoostingClassifier(n_estimators=trees, max_depth=10, min_samples_split=2, learning_rate=0.1, subsample=0.8, max_features=0.8)
model.fit(train_X, train_y)
train_score = model.score(train_X, train_y)
valid_score = model.score(valid_X, valid_y)
train_score_list.append(train_score)
valid_score_list.append(valid_score)
plt.plot(x_range, train_score_list, '-r')
plt.plot(x_range, valid_score_list, '-b')
plt.show() | code |
1009991/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
type_1 = os.listdir('../input/train/Type_1')
type_1.shape | code |
1009991/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input/train/']).decode('utf8')) | code |
34144217/cell_4 | [
"image_output_4.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dates = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/calendar.csv')
data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv')
sale_data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sell_prices.csv')
submission = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sample_submission.csv')
last_date = 1913
original_features = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']
total_sales = data.mean()
x_data = np.array([int(x[2:]) for x in total_sales.index])
y_data = np.array(total_sales.array)
m, b = np.polyfit(x_data, y_data, 1)
plt.pyplot.scatter(x_data, total_sales, s=1)
plt.pyplot.plot([0, last_date], [b, m * last_date + b], linewidth=3)
print('Gradient: ', m, 'Intercept:', b)
plt.pyplot.figure()
plt.pyplot.scatter(x_data[400:1250], total_sales.iloc[400:1250], s=5) | code |
34144217/cell_6 | [
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dates = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/calendar.csv')
data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv')
sale_data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sell_prices.csv')
submission = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sample_submission.csv')
last_date = 1913
original_features = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']
total_sales = data.mean()
x_data = np.array([int(x[2:]) for x in total_sales.index])
y_data = np.array(total_sales.array)
m, b = np.polyfit(x_data, y_data, 1)
state_groups = data.groupby('state_id')
state_data = state_groups.mean()
print(state_data)
x_data = range(1, last_date + 1)
for i, g in enumerate(state_groups.groups.keys()):
y_data = [state_data['d_' + str(x)].iloc[i] for x in x_data]
plt.pyplot.scatter(x_data, y_data, s=5, alpha=0.3)
m, b = np.polyfit(x_data, y_data, 1)
print('Group ' + str(i) + ':', g, 'Gradient:', m, 'Intercept:', b)
plt.pyplot.plot([0, last_date], [b, m * last_date + b], linewidth=4, label=g)
plt.pyplot.legend(loc='upper left') | code |
34144217/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dates = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/calendar.csv')
data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv')
sale_data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sell_prices.csv')
submission = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sample_submission.csv')
print(dates.head())
print(data.head())
print(sale_data.head())
print(submission.head()) | code |
34144217/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34144217/cell_8 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dates = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/calendar.csv')
data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv')
sale_data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sell_prices.csv')
submission = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sample_submission.csv')
last_date = 1913
original_features = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']
total_sales = data.mean()
x_data = np.array([int(x[2:]) for x in total_sales.index])
y_data = np.array(total_sales.array)
m, b = np.polyfit(x_data, y_data, 1)
state_groups = data.groupby('state_id')
state_data = state_groups.mean()
x_data = range(1, last_date + 1)
for i, g in enumerate(state_groups.groups.keys()):
y_data = [state_data['d_' + str(x)].iloc[i] for x in x_data]
m, b = np.polyfit(x_data, y_data, 1)
category_groups = data.groupby('cat_id')
category_data = category_groups.mean()
x_data = range(1, last_date + 1)
for i, c in enumerate(category_groups.groups.keys()):
y_data = [category_data['d_' + str(x)].iloc[i] for x in x_data]
plt.pyplot.scatter(x_data, y_data, s=5, alpha=0.3)
m, b = np.polyfit(x_data, y_data, 1)
print('Category ', c, 'Gradient:', m, 'Intercept:', b)
plt.pyplot.plot([0, last_date], [b, m * last_date + b], linewidth=4, label=c)
plt.pyplot.legend(loc='upper left')
cs_group = data.groupby(['state_id', 'cat_id']).mean().reset_index()
colours = ['blue', 'darkorange', 'green']
for state in state_groups.groups.keys():
plt.pyplot.figure(figsize=(20, 12))
for i, c in enumerate(category_groups.groups.keys()):
plt.pyplot.subplot(int('33' + str(i + 1)))
y_data = cs_group[(cs_group.state_id == state) & (cs_group.cat_id == c)].iloc[0].tail(last_date).to_list()
plt.pyplot.scatter(x_data, y_data, s=5, alpha=0.3, c=colours[i])
m, b = np.polyfit(x_data, y_data, 1)
print('State: ' + state, 'Category: ' + c, 'Gradient:', m, 'Intercept:', b)
plt.pyplot.plot([0, last_date], [b, m * last_date + b], linewidth=4, label=c, c=colours[i])
plt.pyplot.legend(loc='upper left')
plt.pyplot.title(state) | code |
34144217/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dates = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/calendar.csv')
data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv')
sale_data = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sell_prices.csv')
submission = pd.read_csv('/kaggle/input/m5-forecasting-accuracy/sample_submission.csv')
last_date = 1913
original_features = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']
total_sales = data.mean()
x_data = np.array([int(x[2:]) for x in total_sales.index])
y_data = np.array(total_sales.array)
m, b = np.polyfit(x_data, y_data, 1)
state_groups = data.groupby('state_id')
state_data = state_groups.mean()
x_data = range(1, last_date + 1)
for i, g in enumerate(state_groups.groups.keys()):
y_data = [state_data['d_' + str(x)].iloc[i] for x in x_data]
m, b = np.polyfit(x_data, y_data, 1)
category_groups = data.groupby('cat_id')
category_data = category_groups.mean()
x_data = range(1, last_date+1)
for i, c in enumerate(category_groups.groups.keys()):
y_data = [category_data['d_' + str(x)].iloc[i] for x in x_data]
plt.pyplot.scatter(x_data, y_data, s=5, alpha=0.3)
m, b = np.polyfit(x_data, y_data, 1)
print("Category ", c, "Gradient:", m, "Intercept:", b)
plt.pyplot.plot([0, last_date], [b, m*last_date + b], linewidth=4, label=c)
plt.pyplot.legend(loc="upper left")
cs_group = data.groupby(['state_id','cat_id']).mean().reset_index()
colours = ['blue', 'darkorange', 'green']
for state in state_groups.groups.keys():
plt.pyplot.figure(figsize=(20, 12))
for i, c in enumerate(category_groups.groups.keys()):
plt.pyplot.subplot(int('33' + str(i+1)))
y_data = cs_group[(cs_group.state_id == state) & (cs_group.cat_id == c)].iloc[0].tail(last_date).to_list()
plt.pyplot.scatter(x_data, y_data, s=5, alpha=0.3, c=colours[i])
m, b = np.polyfit(x_data, y_data, 1)
print("State: " + state, "Category: " + c, "Gradient:", m, "Intercept:", b)
plt.pyplot.plot([0, last_date], [b, m*last_date + b], linewidth=4, label=c, c=colours[i])
plt.pyplot.legend(loc="upper left")
plt.pyplot.title(state)
ss_group = data.groupby(['state_id', 'store_id']).mean().reset_index()
for state in state_groups.groups.keys():
store_group = ss_group[ss_group.state_id == state].groupby('store_id')
plt.pyplot.figure(figsize=(16, 4))
for i, s in enumerate(store_group.groups.keys()):
y_data = ss_group[ss_group.store_id == s].iloc[0].tail(last_date).to_list()
plt.pyplot.scatter(x_data, y_data, s=6, alpha=0.4)
m, b = np.polyfit(x_data, y_data, 1)
print('State: ' + state, 'Store: ' + c, 'Gradient:', m, 'Intercept:', b)
plt.pyplot.plot([0, last_date], [b, m * last_date + b], linewidth=4, label=s)
plt.pyplot.legend(loc='upper left')
plt.pyplot.title(state)
plt.pyplot.figure(figsize=(16, 4))
plt.pyplot.subplot(121)
y_data = ss_group[ss_group.store_id == 'WI_1'].iloc[0].tail(last_date).to_list()
plt.pyplot.scatter(x_data, y_data, s=6, alpha=0.4, label='WI_1')
plt.pyplot.legend(loc='upper left')
plt.pyplot.title('WI_1 Anomaly at day 700')
plt.pyplot.subplot(122)
y_data = ss_group[ss_group.store_id == 'WI_2'].iloc[0].tail(last_date).to_list()
plt.pyplot.scatter(x_data, y_data, s=6, alpha=0.4, label='WI_2', c='darkorange')
plt.pyplot.legend(loc='upper left')
plt.pyplot.title('WI_2 Anomaly at day 500') | code |
106213616/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df.info() | code |
106213616/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df.shape | code |
106213616/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df.head() | code |
106213616/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df.shape
df.isnull().sum() | code |
106213616/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df.shape
df.isnull().sum()
df.describe() | code |
106213616/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
df['clicks'].hist() | code |
106213616/cell_5 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/split-experiment-aggregated-data/ab_test_results_aggregated_views_clicks_5.csv')
print('First 3 rows of data:\n')
df.head() | code |
32063375/cell_8 | [
"image_output_1.png"
] | import pandas as pd
hp = pd.read_csv('../input/london-house-prices/hpdemo.csv')
hp
scaler = SS()
scaler.fit(hp[['east', 'north', 'fl_area']])
hp_sc = scaler.transform(hp[['east', 'north', 'fl_area']])
mod1 = NN(n_neighbors=6, weights='uniform', p=2)
price = hp['price'] / 1000.0
mod1.fit(hp_sc, price) | code |
32063375/cell_15 | [
"text_plain_output_1.png"
] | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plot
import numpy as np
import pandas as pd
import sklearn as sk
def print_summary(opt_reg_object):
params = opt_reg_object.best_estimator_.get_params()
score = -opt_reg_object.best_score_
return
hp = pd.read_csv('../input/london-house-prices/hpdemo.csv')
hp
scaler = SS()
scaler.fit(hp[['east', 'north', 'fl_area']])
hp_sc = scaler.transform(hp[['east', 'north', 'fl_area']])
mod1 = NN(n_neighbors=6, weights='uniform', p=2)
price = hp['price'] / 1000.0
mod1.fit(hp_sc, price)
mae = sk.metrics.make_scorer(sk.metrics.mean_absolute_error, greater_is_better=False)
mod_list = sk.model_selection.GridSearchCV(estimator=NN(), scoring=mae, param_grid={'n_neighbors': range(1, 35), 'weights': ['uniform', 'distance'], 'p': [1, 2]})
mod_list.fit(hp[['east', 'north', 'fl_area']], price)
east_mesh, north_mesh = np.meshgrid(np.linspace(505000, 555800, 100), np.linspace(158400, 199900, 100))
fl_mesh = np.zeros_like(east_mesh)
fl_mesh2 = np.zeros_like(east_mesh)
fl_mesh3 = np.zeros_like(east_mesh)
fl_mesh[:, :] = np.mean(hp['fl_area'])
fl_mesh2[:, :] = 75
fl_mesh3[:, :] = 125
regressor_df = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh)]).T
regressor_df2 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh2)]).T
regressor_df3 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh3)]).T
hp_pred = mod_list.predict(regressor_df)
hp_pred2 = mod_list.predict(regressor_df2)
hp_pred3 = mod_list.predict(regressor_df3)
hp_mesh = hp_pred.reshape(east_mesh.shape)
hp_mesh2 = hp_pred2.reshape(east_mesh.shape)
hp_mesh3 = hp_pred3.reshape(east_mesh.shape)
#Plot1
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh, rstride=1, cstride=1, cmap='YlOrBr',lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at Mean Floor Area')
plot.show()
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh2, rstride=1, cstride=1, cmap='YlOrBr', lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at 75m Floor Area')
plot.show() | code |
32063375/cell_16 | [
"image_output_1.png"
] | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plot
import numpy as np
import pandas as pd
import sklearn as sk
def print_summary(opt_reg_object):
params = opt_reg_object.best_estimator_.get_params()
score = -opt_reg_object.best_score_
return
hp = pd.read_csv('../input/london-house-prices/hpdemo.csv')
hp
scaler = SS()
scaler.fit(hp[['east', 'north', 'fl_area']])
hp_sc = scaler.transform(hp[['east', 'north', 'fl_area']])
mod1 = NN(n_neighbors=6, weights='uniform', p=2)
price = hp['price'] / 1000.0
mod1.fit(hp_sc, price)
mae = sk.metrics.make_scorer(sk.metrics.mean_absolute_error, greater_is_better=False)
mod_list = sk.model_selection.GridSearchCV(estimator=NN(), scoring=mae, param_grid={'n_neighbors': range(1, 35), 'weights': ['uniform', 'distance'], 'p': [1, 2]})
mod_list.fit(hp[['east', 'north', 'fl_area']], price)
east_mesh, north_mesh = np.meshgrid(np.linspace(505000, 555800, 100), np.linspace(158400, 199900, 100))
fl_mesh = np.zeros_like(east_mesh)
fl_mesh2 = np.zeros_like(east_mesh)
fl_mesh3 = np.zeros_like(east_mesh)
fl_mesh[:, :] = np.mean(hp['fl_area'])
fl_mesh2[:, :] = 75
fl_mesh3[:, :] = 125
regressor_df = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh)]).T
regressor_df2 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh2)]).T
regressor_df3 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh3)]).T
hp_pred = mod_list.predict(regressor_df)
hp_pred2 = mod_list.predict(regressor_df2)
hp_pred3 = mod_list.predict(regressor_df3)
hp_mesh = hp_pred.reshape(east_mesh.shape)
hp_mesh2 = hp_pred2.reshape(east_mesh.shape)
hp_mesh3 = hp_pred3.reshape(east_mesh.shape)
#Plot1
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh, rstride=1, cstride=1, cmap='YlOrBr',lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at Mean Floor Area')
plot.show()
#Plot2
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh2, rstride=1, cstride=1, cmap='YlOrBr',lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at 75m Floor Area')
plot.show()
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh3, rstride=1, cstride=1, cmap='YlOrBr', lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at 125m Floor Area')
plot.show() | code |
32063375/cell_14 | [
"text_plain_output_1.png"
] | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plot
import numpy as np
import pandas as pd
import sklearn as sk
def print_summary(opt_reg_object):
params = opt_reg_object.best_estimator_.get_params()
score = -opt_reg_object.best_score_
return
hp = pd.read_csv('../input/london-house-prices/hpdemo.csv')
hp
scaler = SS()
scaler.fit(hp[['east', 'north', 'fl_area']])
hp_sc = scaler.transform(hp[['east', 'north', 'fl_area']])
mod1 = NN(n_neighbors=6, weights='uniform', p=2)
price = hp['price'] / 1000.0
mod1.fit(hp_sc, price)
mae = sk.metrics.make_scorer(sk.metrics.mean_absolute_error, greater_is_better=False)
mod_list = sk.model_selection.GridSearchCV(estimator=NN(), scoring=mae, param_grid={'n_neighbors': range(1, 35), 'weights': ['uniform', 'distance'], 'p': [1, 2]})
mod_list.fit(hp[['east', 'north', 'fl_area']], price)
east_mesh, north_mesh = np.meshgrid(np.linspace(505000, 555800, 100), np.linspace(158400, 199900, 100))
fl_mesh = np.zeros_like(east_mesh)
fl_mesh2 = np.zeros_like(east_mesh)
fl_mesh3 = np.zeros_like(east_mesh)
fl_mesh[:, :] = np.mean(hp['fl_area'])
fl_mesh2[:, :] = 75
fl_mesh3[:, :] = 125
regressor_df = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh)]).T
regressor_df2 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh2)]).T
regressor_df3 = np.array([np.ravel(east_mesh), np.ravel(north_mesh), np.ravel(fl_mesh3)]).T
hp_pred = mod_list.predict(regressor_df)
hp_pred2 = mod_list.predict(regressor_df2)
hp_pred3 = mod_list.predict(regressor_df3)
hp_mesh = hp_pred.reshape(east_mesh.shape)
hp_mesh2 = hp_pred2.reshape(east_mesh.shape)
hp_mesh3 = hp_pred3.reshape(east_mesh.shape)
fig = plot.figure()
ax = Axes3D(fig)
ax.plot_surface(east_mesh, north_mesh, hp_mesh, rstride=1, cstride=1, cmap='YlOrBr', lw=0.01)
plot.title('London House Prices')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
ax.set_zlabel('Price at Mean Floor Area')
plot.show() | code |
32063375/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import sklearn as sk
def print_summary(opt_reg_object):
params = opt_reg_object.best_estimator_.get_params()
score = -opt_reg_object.best_score_
return
hp = pd.read_csv('../input/london-house-prices/hpdemo.csv')
hp
scaler = SS()
scaler.fit(hp[['east', 'north', 'fl_area']])
hp_sc = scaler.transform(hp[['east', 'north', 'fl_area']])
mod1 = NN(n_neighbors=6, weights='uniform', p=2)
price = hp['price'] / 1000.0
mod1.fit(hp_sc, price)
mae = sk.metrics.make_scorer(sk.metrics.mean_absolute_error, greater_is_better=False)
mod_list = sk.model_selection.GridSearchCV(estimator=NN(), scoring=mae, param_grid={'n_neighbors': range(1, 35), 'weights': ['uniform', 'distance'], 'p': [1, 2]})
mod_list.fit(hp[['east', 'north', 'fl_area']], price)
print_summary(mod_list) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.