path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17145266/cell_18 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data = src.label_from_df(cols=2).databunch(bs=48)
data.vocab.itos[:10]
data.train_ds[0][0]
data.train_ds[0][0].data[:10] | code |
17145266/cell_32 | [
"text_html_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20]
data_lm.train_ds[0][0].data[:10]
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/')
learn.lr_find()
learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7))
learn.save('fit_head')
learn.load('fit_head')
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(12, max_lr=slice(1e-05, 0.001), moms=(0.8, 0.7))
learn.save('fine_tuned')
learn.load('fine_tuned') | code |
17145266/cell_28 | [
"text_html_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20]
data_lm.train_ds[0][0].data[:10]
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/')
learn.lr_find()
learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7)) | code |
17145266/cell_8 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape) | code |
17145266/cell_15 | [
"text_html_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data = src.label_from_df(cols=2).databunch(bs=48)
data.show_batch() | code |
17145266/cell_16 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data = src.label_from_df(cols=2).databunch(bs=48)
data.vocab.itos[:10] | code |
17145266/cell_17 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data = src.label_from_df(cols=2).databunch(bs=48)
data.vocab.itos[:10]
data.train_ds[0][0] | code |
17145266/cell_35 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20]
data_lm.train_ds[0][0].data[:10]
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/')
learn.lr_find()
learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7))
learn.save('fit_head')
learn.load('fit_head')
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(12, max_lr=slice(1e-05, 0.001), moms=(0.8, 0.7))
learn.save('fine_tuned')
learn.load('fine_tuned')
learn.save_encoder('fine_tuned_enc')
TEXT = 'He screamed like'
N_WORDS = 10
N_SENTENCES = 2
print('\n'.join((learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))) | code |
17145266/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20]
data_lm.train_ds[0][0].data[:10]
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/')
learn.lr_find()
learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7))
learn.save('fit_head')
learn.load('fit_head')
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(12, max_lr=slice(1e-05, 0.001), moms=(0.8, 0.7)) | code |
17145266/cell_24 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20] | code |
17145266/cell_27 | [
"text_plain_output_1.png"
] | (df_train.shape, df_valid.shape)
path = Path('../input/')
path.ls()
src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
bs = 48
src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1))
data_lm = src_lm.label_for_lm().databunch(bs=bs)
data_lm.vocab.itos[:20]
data_lm.train_ds[0][0].data[:10]
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/')
learn.lr_find()
learn.recorder.plot(suggestion=True) | code |
17145266/cell_12 | [
"text_plain_output_1.png"
] | path = Path('../input/')
path.ls() | code |
17145266/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
df = pd.read_json('../input/Sarcasm_Headlines_Dataset_v2.json', lines=True)
df.shape | code |
128044967/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import ultralytics
ultralytics.checks() | code |
128044967/cell_1 | [
"text_plain_output_1.png"
] | import cv2
import os
import shutil
import warnings # попытка поймать сообщения: libpng warning: iCCP: known incorrect sRGB profile
import numpy as np
import pandas as pd
import cv2
from PIL import Image
import warnings
warnings.filterwarnings('error')
import os
import shutil
for dirname, _, filenames in os.walk('/kaggle/input/fruit-and-vegetable-image-recognition'):
print(dirname, 'Count:', len(filenames))
for filename in filenames:
try:
img = cv2.imread(os.path.join(dirname, filename))
except:
print('libpng warning: iCCP: known incorrect sRGB profile:', os.path.join(dirname, filename))
try:
if len(img.shape) != 3:
print(os.path.join(dirname, filename), 'img.shape', img.shape)
else:
patchNew = dirname.replace('input', 'working')
if not os.path.exists(patchNew):
os.makedirs(patchNew)
shutil.copy(os.path.join(dirname, filename), patchNew)
except AttributeError:
print('Ошибочный файл:', os.path.join(dirname, filename))
del img | code |
128044967/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from ultralytics import YOLO
from ultralytics import YOLO
model = YOLO('yolov8n-cls.pt')
model.train(data='/kaggle/working/fruit-and-vegetable-image-recognition/', epochs=3) | code |
16133438/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.corr()
df.corr()['count']
df.windspeed.median()
x = df.drop(columns=['count'])
x.columns
y = df[['count']]
y.columns | code |
16133438/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.windspeed.plot(kind='box') | code |
16133438/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum() | code |
16133438/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape | code |
16133438/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_x, train_y)
predict_train = model.predict(train_x)
predict_test = model.predict(test_x)
r2_train = r2_score(train_y, predict_train)
r2_test = r2_score(test_y, predict_test)
print('r2_train: ', r2_train)
print('r2_test: ', r2_test) | code |
16133438/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_x, train_y) | code |
16133438/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.corr()
df.corr()['count']
df.windspeed.median()
x = df.drop(columns=['count'])
x.columns | code |
16133438/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns | code |
16133438/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_x, train_y)
predict_train = model.predict(train_x)
predict_test = model.predict(test_x)
train_MAE = mean_absolute_error(train_y, predict_train)
test_MAE = mean_absolute_error(test_y, predict_test)
print('train_MAE: ', train_MAE)
print('test_MAE: ', test_MAE) | code |
16133438/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.corr()
df.corr()['count']
df.windspeed.median() | code |
16133438/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16133438/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.info() | code |
16133438/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
plt.figure(figsize=(10, 3))
corr = df.corr()
sns.heatmap(corr, annot=True) | code |
16133438/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
import numpy as np # linear algebra
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_x, train_y)
predict_train = model.predict(train_x)
predict_test = model.predict(test_x)
train_MSE = mean_squared_error(train_y, predict_train)
test_MSE = mean_squared_error(test_y, predict_test)
train_RMSE = np.sqrt(train_MSE)
test_RMSE = np.sqrt(test_MSE)
train_MAPE = np.mean(np.abs(train_y, predict_train))
test_MAPE = np.mean(np.abs(test_y, predict_test))
print('train_MAPE: ', train_MAPE)
print('test_MAPE: ', test_MAPE) | code |
16133438/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum() | code |
16133438/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.registered.plot(kind='box') | code |
16133438/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.corr() | code |
16133438/cell_3 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.head() | code |
16133438/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.corr()
df.corr()['count'] | code |
16133438/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape
df.casual.plot(kind='box') | code |
16133438/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()] | code |
16133438/cell_27 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_x, train_y)
predict_train = model.predict(train_x)
predict_test = model.predict(test_x)
train_MSE = mean_squared_error(train_y, predict_train)
test_MSE = mean_squared_error(test_y, predict_test)
print('train_MSE: ', train_MSE)
print('test_MSE: ', test_MSE) | code |
16133438/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T
df.columns
df.isna().sum()
df.duplicated().sum()
df[df.duplicated()]
df = df.drop_duplicates()
df.shape | code |
16133438/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bike_share.csv')
df.shape
df.describe().T | code |
2026131/cell_13 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist() | code |
2026131/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.columns[test.isnull().any()].tolist() | code |
2026131/cell_57 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import linear_model
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform)
trainFinal = pd.concat([trainNum, trainCatNormalized], axis=1)
from sklearn import linear_model
LR = linear_model.LinearRegression()
X = trainFinal.drop(['Id', 'SalePrice'], axis=1)
y = trainFinal['SalePrice']
LR.fit(X, y)
LR.score(X, y)
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testNum.columns[testNum.isnull().any()].tolist()
testCat.columns[testCat.isnull().any()].tolist()
testCat1 = testCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
testNum['MSSubClass'] = le.fit_transform(testNum['MSSubClass'].astype(str))
testNum['OverallQual'] = le.fit_transform(testNum['OverallQual'].astype(str))
testNum['OverallCond'] = le.fit_transform(testNum['OverallCond'].astype(str))
testNum['YearBuilt'] = le.fit_transform(testNum['YearBuilt'].astype(str))
testNum['YearRemodAdd'] = le.fit_transform(testNum['YearRemodAdd'].astype(str))
testNum['GarageYrBlt'] = le.fit_transform(testNum['GarageYrBlt'].astype(str))
testNum['YrSold'] = le.fit_transform(testNum['YrSold'].astype(str))
testCatNormalized = testCat1.apply(le.fit_transform)
testFinal = pd.concat([testNum, testCatNormalized], axis=1)
testPredicted = LR.predict(testFinal.drop('Id', axis=1))
np.array(list(zip(testFinal.Id, testPredicted))) | code |
2026131/cell_34 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import linear_model
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform)
trainFinal = pd.concat([trainNum, trainCatNormalized], axis=1)
from sklearn import linear_model
LR = linear_model.LinearRegression()
X = trainFinal.drop(['Id', 'SalePrice'], axis=1)
y = trainFinal['SalePrice']
LR.fit(X, y)
LR.score(X, y) | code |
2026131/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform)
trainFinal = pd.concat([trainNum, trainCatNormalized], axis=1)
trainFinal.head() | code |
2026131/cell_55 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform)
trainFinal = pd.concat([trainNum, trainCatNormalized], axis=1)
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testNum.columns[testNum.isnull().any()].tolist()
testCat.columns[testCat.isnull().any()].tolist()
testCat1 = testCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
testNum['MSSubClass'] = le.fit_transform(testNum['MSSubClass'].astype(str))
testNum['OverallQual'] = le.fit_transform(testNum['OverallQual'].astype(str))
testNum['OverallCond'] = le.fit_transform(testNum['OverallCond'].astype(str))
testNum['YearBuilt'] = le.fit_transform(testNum['YearBuilt'].astype(str))
testNum['YearRemodAdd'] = le.fit_transform(testNum['YearRemodAdd'].astype(str))
testNum['GarageYrBlt'] = le.fit_transform(testNum['GarageYrBlt'].astype(str))
testNum['YrSold'] = le.fit_transform(testNum['YrSold'].astype(str))
testCatNormalized = testCat1.apply(le.fit_transform)
testFinal = pd.concat([testNum, testCatNormalized], axis=1)
testFinal.head() | code |
2026131/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform) | code |
2026131/cell_41 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testNum.columns[testNum.isnull().any()].tolist() | code |
2026131/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainNum['GarageYrBlt'].fillna(trainNum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2026131/cell_50 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainCat.columns[trainCat.isnull().any()].tolist()
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatNormalized = trainCat1.apply(le.fit_transform)
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testNum.columns[testNum.isnull().any()].tolist()
testNum['MSSubClass'] = le.fit_transform(testNum['MSSubClass'].astype(str))
testNum['OverallQual'] = le.fit_transform(testNum['OverallQual'].astype(str))
testNum['OverallCond'] = le.fit_transform(testNum['OverallCond'].astype(str))
testNum['YearBuilt'] = le.fit_transform(testNum['YearBuilt'].astype(str))
testNum['YearRemodAdd'] = le.fit_transform(testNum['YearRemodAdd'].astype(str))
testNum['GarageYrBlt'] = le.fit_transform(testNum['GarageYrBlt'].astype(str))
testNum['YrSold'] = le.fit_transform(testNum['YrSold'].astype(str)) | code |
2026131/cell_7 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist() | code |
2026131/cell_45 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testNum.columns[testNum.isnull().any()].tolist()
testNum['BsmtFinSF1'].fillna(testNum['BsmtFinSF1'].mean(), inplace=True)
testNum['BsmtFinSF2'].fillna(testNum['BsmtFinSF2'].mean(), inplace=True)
testNum['BsmtUnfSF'].fillna(testNum['BsmtUnfSF'].mean(), inplace=True)
testNum['TotalBsmtSF'].fillna(testNum['TotalBsmtSF'].mean(), inplace=True)
testNum['BsmtFullBath'].fillna(testNum['BsmtFullBath'].mean(), inplace=True)
testNum['BsmtHalfBath'].fillna(testNum['BsmtHalfBath'].mean(), inplace=True)
testNum['GarageCars'].fillna(testNum['GarageCars'].mean(), inplace=True)
testNum['GarageArea'].fillna(testNum['GarageArea'].mean(), inplace=True)
testNum['LotFrontage'].fillna(testNum['LotFrontage'].mean(), inplace=True)
testNum['MasVnrArea'].fillna(testNum['MasVnrArea'].mean(), inplace=True)
testNum['GarageYrBlt'].fillna(testNum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2026131/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainCat.columns[trainCat.isnull().any()].tolist() | code |
2026131/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
2026131/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainNum['LotFrontage'].fillna(trainNum['LotFrontage'].mean(), inplace=True)
trainNum['MasVnrArea'].fillna(trainNum['MasVnrArea'].mean(), inplace=True) | code |
2026131/cell_43 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
test.columns[test.isnull().any()].tolist()
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
testCat.columns[testCat.isnull().any()].tolist() | code |
2026131/cell_37 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
train.columns[train.isnull().any()].tolist()
test.columns[test.isnull().any()].tolist()
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
test.columns[test.isnull().any()].tolist() | code |
2026131/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns))) | code |
16111049/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/'
df_train = pd.read_csv(f'{path}train.csv', index_col='PassengerId')
df_test = pd.read_csv(f'{path}test.csv', index_col='PassengerId')
target = df_train['Survived']
target.columns = ['Survived']
df_train = df_train.drop(labels='Survived', axis=1)
df_train['Training_set'] = True
df_test['Training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.drop(labels=['Ticket', 'Name', 'Cabin'], axis=1)
df_full
"count_nosurname = 0\nfor i,(name) in enumerate(df_full['Name']):\n name = name.strip()\n ind = name.find(',')\n indw = name.find(' ')\n if(ind!=-1):\n df_full.at[i,'Name'] = name[0:ind]\n else:\n count_nosurname += 1\n df_full.at[i,'Name'] = name[0:indw]\nprint(count_nosurname)\ndf_full.drop(index = 0, axis = 0) "
df_full.isnull().sum()[df_full.isnull().sum() > 0]
df_full.Age = df_full.Age.fillna(df_full.Age.mean())
df_full.Fare = df_full.Fare.fillna(df_full.Fare.mean())
df_full.Embarked = df_full.fillna(df_full.Embarked.mode()[0])
df_full = df_full.interpolate()
df_full = pd.get_dummies(df_full)
df_full
df_train = df_full[df_full['Training_set'] == True]
df_test = df_full[df_full['Training_set'] == False]
df_train.drop(labels='Training_set', inplace=True, axis=1)
df_test.drop(labels='Training_set', inplace=True, axis=1) | code |
16111049/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/'
df_train = pd.read_csv(f'{path}train.csv', index_col='PassengerId')
df_test = pd.read_csv(f'{path}test.csv', index_col='PassengerId')
target = df_train['Survived']
target.columns = ['Survived']
df_train = df_train.drop(labels='Survived', axis=1)
df_train['Training_set'] = True
df_test['Training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.drop(labels=['Ticket', 'Name', 'Cabin'], axis=1)
df_full
"count_nosurname = 0\nfor i,(name) in enumerate(df_full['Name']):\n name = name.strip()\n ind = name.find(',')\n indw = name.find(' ')\n if(ind!=-1):\n df_full.at[i,'Name'] = name[0:ind]\n else:\n count_nosurname += 1\n df_full.at[i,'Name'] = name[0:indw]\nprint(count_nosurname)\ndf_full.drop(index = 0, axis = 0) " | code |
16111049/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/'
df_train = pd.read_csv(f'{path}train.csv', index_col='PassengerId')
df_test = pd.read_csv(f'{path}test.csv', index_col='PassengerId')
target = df_train['Survived']
target.columns = ['Survived']
df_train = df_train.drop(labels='Survived', axis=1)
df_train['Training_set'] = True
df_test['Training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.drop(labels=['Ticket', 'Name', 'Cabin'], axis=1)
df_full
"count_nosurname = 0\nfor i,(name) in enumerate(df_full['Name']):\n name = name.strip()\n ind = name.find(',')\n indw = name.find(' ')\n if(ind!=-1):\n df_full.at[i,'Name'] = name[0:ind]\n else:\n count_nosurname += 1\n df_full.at[i,'Name'] = name[0:indw]\nprint(count_nosurname)\ndf_full.drop(index = 0, axis = 0) "
df_full.isnull().sum()[df_full.isnull().sum() > 0]
df_full.Age = df_full.Age.fillna(df_full.Age.mean())
df_full.Fare = df_full.Fare.fillna(df_full.Fare.mean())
df_full.Embarked = df_full.fillna(df_full.Embarked.mode()[0])
df_full = df_full.interpolate()
df_full = pd.get_dummies(df_full)
df_full | code |
16111049/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
import sklearn
import os
print(os.listdir('../input')) | code |
16111049/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/'
df_train = pd.read_csv(f'{path}train.csv', index_col='PassengerId')
df_test = pd.read_csv(f'{path}test.csv', index_col='PassengerId')
target = df_train['Survived']
target.columns = ['Survived']
df_train = df_train.drop(labels='Survived', axis=1)
df_train['Training_set'] = True
df_test['Training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.drop(labels=['Ticket', 'Name', 'Cabin'], axis=1)
df_full
"count_nosurname = 0\nfor i,(name) in enumerate(df_full['Name']):\n name = name.strip()\n ind = name.find(',')\n indw = name.find(' ')\n if(ind!=-1):\n df_full.at[i,'Name'] = name[0:ind]\n else:\n count_nosurname += 1\n df_full.at[i,'Name'] = name[0:indw]\nprint(count_nosurname)\ndf_full.drop(index = 0, axis = 0) "
df_full.isnull().sum()[df_full.isnull().sum() > 0]
df_full.Age = df_full.Age.fillna(df_full.Age.mean())
df_full.Fare = df_full.Fare.fillna(df_full.Fare.mean())
df_full.Embarked = df_full.fillna(df_full.Embarked.mode()[0])
df_full = df_full.interpolate()
df_full = pd.get_dummies(df_full)
df_full
df_full.info() | code |
16111049/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/'
df_train = pd.read_csv(f'{path}train.csv', index_col='PassengerId')
df_test = pd.read_csv(f'{path}test.csv', index_col='PassengerId')
target = df_train['Survived']
target.columns = ['Survived']
df_train = df_train.drop(labels='Survived', axis=1)
df_train['Training_set'] = True
df_test['Training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.drop(labels=['Ticket', 'Name', 'Cabin'], axis=1)
df_full
"count_nosurname = 0\nfor i,(name) in enumerate(df_full['Name']):\n name = name.strip()\n ind = name.find(',')\n indw = name.find(' ')\n if(ind!=-1):\n df_full.at[i,'Name'] = name[0:ind]\n else:\n count_nosurname += 1\n df_full.at[i,'Name'] = name[0:indw]\nprint(count_nosurname)\ndf_full.drop(index = 0, axis = 0) "
df_full.isnull().sum()[df_full.isnull().sum() > 0] | code |
105216451/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df=data.loc[:,['Customer Type', 'Type of Travel','Class','Satisfaction']]
fig = plt.figure(figsize= (18,7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df,1):
ax=fig.add_subplot(rows, cols, i+1)
sns.countplot(x=data[col], data=data,ax=ax,palette = "Set1")
fig.tight_layout()
plt.show()
data.isnull().sum()
sns.stripplot(y=data['Age'], x=data['Satisfaction'], palette='Set2', alpha=0.1) | code |
105216451/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df = data.loc[:, ['Gate Location', 'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness', 'Food and Drink', 'In-flight Service', 'In-flight Wifi Service', 'In-flight Entertainment', 'Baggage Handling']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f = plt.subplot(5, 2, i)
sns.distplot(distplot_df[column], color='blue', fit_kws={'color': 'darkred'}) | code |
105216451/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum() | code |
105216451/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df=data.loc[:,['Customer Type', 'Type of Travel','Class','Satisfaction']]
fig = plt.figure(figsize= (18,7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df,1):
ax=fig.add_subplot(rows, cols, i+1)
sns.countplot(x=data[col], data=data,ax=ax,palette = "Set1")
fig.tight_layout()
plt.show()
data.isnull().sum()
data = data.loc[data.Age < 81]
data.shape[0] | code |
105216451/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.info() | code |
105216451/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns | code |
105216451/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df=data.loc[:,['Customer Type', 'Type of Travel','Class','Satisfaction']]
fig = plt.figure(figsize= (18,7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df,1):
ax=fig.add_subplot(rows, cols, i+1)
sns.countplot(x=data[col], data=data,ax=ax,palette = "Set1")
fig.tight_layout()
plt.show()
data.isnull().sum() | code |
105216451/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
plt.figure(figsize=(14, 10))
sns.heatmap(data.isnull()) | code |
105216451/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df=data.loc[:,['Customer Type', 'Type of Travel','Class','Satisfaction']]
fig = plt.figure(figsize= (18,7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df,1):
ax=fig.add_subplot(rows, cols, i+1)
sns.countplot(x=data[col], data=data,ax=ax,palette = "Set1")
fig.tight_layout()
plt.show()
sns.heatmap(data.isnull()) | code |
105216451/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.describe() | code |
105216451/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df = data.loc[:, ['Customer Type', 'Type of Travel', 'Class', 'Satisfaction']]
fig = plt.figure(figsize=(18, 7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df, 1):
ax = fig.add_subplot(rows, cols, i + 1)
sns.countplot(x=data[col], data=data, ax=ax, palette='Set1')
fig.tight_layout()
plt.show() | code |
105216451/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
cols = 5
rows = 1
countplot_df=data.loc[:,['Customer Type', 'Type of Travel','Class','Satisfaction']]
fig = plt.figure(figsize= (18,7))
all_cats = data.select_dtypes(include='object')
for i, col in enumerate(countplot_df,1):
ax=fig.add_subplot(rows, cols, i+1)
sns.countplot(x=data[col], data=data,ax=ax,palette = "Set1")
fig.tight_layout()
plt.show()
data['Arrival Delay'].describe() | code |
105216451/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df=data.loc[:,['Age', 'Flight Distance',
'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking',
'Check-in Service','Online Boarding','Gate Location','On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="red",fit_kws={"color":"darkgreen"});
distplot_df=data.loc[:,['Gate Location',
'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness',
'Food and Drink', 'In-flight Service', 'In-flight Wifi Service',
'In-flight Entertainment', 'Baggage Handling',]]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f=plt.subplot(5,2,i)
sns.distplot(distplot_df[column], color="blue",fit_kws={"color":"darkred"});
data.info() | code |
105216451/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum() | code |
105216451/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.isnull().sum()
data.duplicated().sum()
data.columns
distplot_df = data.loc[:, ['Age', 'Flight Distance', 'Departure Delay', 'Arrival Delay', 'Departure and Arrival Time Convenience', 'Ease of Online Booking', 'Check-in Service', 'Online Boarding', 'Gate Location', 'On-board Service']]
fig = plt.figure(figsize=(15, 20))
for i, column in enumerate(distplot_df.columns, 1):
f = plt.subplot(5, 2, i)
sns.distplot(distplot_df[column], color='red', fit_kws={'color': 'darkgreen'}) | code |
105216451/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data.head(15) | code |
2032126/cell_9 | [
"text_plain_output_1.png"
] | from mlxtend.classifier import StackingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
clf1 = KNeighborsClassifier()
clf2 = RandomForestClassifier()
clf3 = GaussianNB()
clf4 = SVC()
meta_clf = LogisticRegression()
stacking_clf = StackingClassifier(classifiers=[clf1, clf2, clf3, clf4], meta_classifier=meta_clf)
clf1.fit(X_train, y_train)
clf2.fit(X_train, y_train)
clf3.fit(X_train, y_train)
clf4.fit(X_train, y_train)
stacking_clf.fit(X_train, y_train)
print('RNN Score:', clf1.score(X_test, y_test))
print('RF Score:', clf2.score(X_test, y_test))
print('GNB Score:', clf3.score(X_test, y_test))
print('SVC Score:', clf4.score(X_test, y_test))
print('Stacking Score:', stacking_clf.score(X_test, y_test)) | code |
2032126/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
df2.head() | code |
2032126/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
train = df1.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
test = df2.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
X = train.drop(['Survived'], axis=1)
y = pd.DataFrame(train['Survived'])
X['Age'] = X['Age'].replace(np.nan, X['Age'].mean())
test['Age'] = test['Age'].replace(np.nan, test['Age'].mean())
test | code |
2032126/cell_7 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
train = df1.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
test = df2.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
X = train.drop(['Survived'], axis=1)
y = pd.DataFrame(train['Survived'])
X['Age'] = X['Age'].replace(np.nan, X['Age'].mean())
X.head() | code |
2032126/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
df1.head() | code |
2032126/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
train = df1.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
test = df2.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
PassengerId = test['PassengerId']
type(PassengerId) | code |
2032126/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('../input/train.csv')
df2 = pd.read_csv('../input/test.csv')
train = df1.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
test = df2.drop(['Ticket', 'Fare', 'Embarked', 'Cabin', 'Name'], axis=1)
train.head() | code |
32062432/cell_2 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32069396/cell_13 | [
"text_html_output_1.png"
] | from PIL import Image
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from torchvision import transforms, utils
from tqdm import tqdm
import copy
import cv2
import numbers
import numpy as np
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
import time
import os
import copy
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms, utils
from PIL import Image
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import cv2
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
from sklearn.model_selection import train_test_split
train_xx, val_x, train_yy, val_y = train_test_split(train_x, train_y, test_size=0.1, random_state=13, stratify=train_y)
((train_xx.shape, train_yy.shape), (val_x.shape, val_y.shape))
class RandomRotation(object):
"""
https://github.com/pytorch/vision/tree/master/torchvision/transforms
Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees degrees counter clockwise order.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return (hshift, vshift)
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
import numbers
train_transform = transforms.Compose([transforms.ToPILImage(), RandomRotation(20), RandomShift(3), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
class MyDataset(Dataset):
def __init__(self, data, target=None, transform=None):
self.transform = transform
self.target = target
if self.target is not None:
self.data = data
self.target = torch.from_numpy(target).long()
else:
self.data = data
def __getitem__(self, index):
if self.target is not None:
return (self.transform(self.data[index]), self.target[index])
else:
return self.transform(self.data[index])
def __len__(self):
return len(list(self.data))
train_dataset = MyDataset(train_xx, train_yy, train_transform)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_dataset = MyDataset(val_x, val_y, transform)
test_loader = DataLoader(val_dataset, batch_size=128, shuffle=True)
dataloaders = {'train': train_loader, 'val': test_loader}
dataset_sizes = {'train': len(train_xx), 'val': len(val_x)}
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
f1_batch = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
f1_batch += f1_score(labels.data.cpu(), preds.cpu(), average='weighted')
epoch_loss = running_loss / dataset_sizes[phase]
epoch_f1 = f1_batch / len(dataloaders[phase])
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
model.load_state_dict(best_model_wts)
return model
model_ft = models.resnet18(pretrained=False)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 16)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.15)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20) | code |
32069396/cell_4 | [
"image_output_1.png"
] | from tqdm import tqdm
import cv2
import numpy as np
import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape | code |
32069396/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
def display_examples(images, labels):
"""
Display 25 images from the images array with its corresponding labels
"""
fig = plt.figure(figsize=(10,10))
fig.suptitle("Some examples of images of the dataset", fontsize=16)
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i].reshape(200,200), cmap=plt.cm.binary)
plt.xlabel(labels[i])
plt.show()
display_examples(train_x, train_y + 1) | code |
32069396/cell_19 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from PIL import Image
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from torch import optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from torchvision import transforms, utils
from tqdm import tqdm
import copy
import cv2
import numbers
import numpy as np
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
import time
import os
import copy
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms, utils
from PIL import Image
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import cv2
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
from sklearn.model_selection import train_test_split
train_xx, val_x, train_yy, val_y = train_test_split(train_x, train_y, test_size=0.1, random_state=13, stratify=train_y)
((train_xx.shape, train_yy.shape), (val_x.shape, val_y.shape))
class RandomRotation(object):
"""
https://github.com/pytorch/vision/tree/master/torchvision/transforms
Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees degrees counter clockwise order.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return (hshift, vshift)
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
import numbers
train_transform = transforms.Compose([transforms.ToPILImage(), RandomRotation(20), RandomShift(3), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
class MyDataset(Dataset):
def __init__(self, data, target=None, transform=None):
self.transform = transform
self.target = target
if self.target is not None:
self.data = data
self.target = torch.from_numpy(target).long()
else:
self.data = data
def __getitem__(self, index):
if self.target is not None:
return (self.transform(self.data[index]), self.target[index])
else:
return self.transform(self.data[index])
def __len__(self):
return len(list(self.data))
train_dataset = MyDataset(train_xx, train_yy, train_transform)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_dataset = MyDataset(val_x, val_y, transform)
test_loader = DataLoader(val_dataset, batch_size=128, shuffle=True)
dataloaders = {'train': train_loader, 'val': test_loader}
dataset_sizes = {'train': len(train_xx), 'val': len(val_x)}
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
f1_batch = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
f1_batch += f1_score(labels.data.cpu(), preds.cpu(), average='weighted')
epoch_loss = running_loss / dataset_sizes[phase]
epoch_f1 = f1_batch / len(dataloaders[phase])
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
model.load_state_dict(best_model_wts)
return model
model_ft = models.resnet18(pretrained=False)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 16)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.15)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20)
test_img = []
for img_name in tqdm(test['name']):
image_path = '/kaggle/input/lego-dataset/test/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
test_img.append(img)
test_x = np.array(test_img)
test_x.shape
val_dataset = MyDataset(data=test_x, transform=transform)
test_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)
def prediciton(data_loader):
model_ft.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = Variable(data, volatile=True)
if torch.cuda.is_available():
data = data.cuda()
output = model_ft(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat((test_pred, pred), dim=0)
return test_pred
test_pred = prediciton(test_loader)
test['category'] = test_pred.numpy() + 1
test.head() | code |
32069396/cell_7 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import numpy as np
import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
from sklearn.model_selection import train_test_split
train_xx, val_x, train_yy, val_y = train_test_split(train_x, train_y, test_size=0.1, random_state=13, stratify=train_y)
((train_xx.shape, train_yy.shape), (val_x.shape, val_y.shape)) | code |
32069396/cell_18 | [
"text_plain_output_1.png"
] | from PIL import Image
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from torch import optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from torchvision import transforms, utils
from tqdm import tqdm
import copy
import cv2
import numbers
import numpy as np
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
import time
import os
import copy
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms, utils
from PIL import Image
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import cv2
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
from sklearn.model_selection import train_test_split
train_xx, val_x, train_yy, val_y = train_test_split(train_x, train_y, test_size=0.1, random_state=13, stratify=train_y)
((train_xx.shape, train_yy.shape), (val_x.shape, val_y.shape))
class RandomRotation(object):
"""
https://github.com/pytorch/vision/tree/master/torchvision/transforms
Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees degrees counter clockwise order.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return (hshift, vshift)
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
import numbers
train_transform = transforms.Compose([transforms.ToPILImage(), RandomRotation(20), RandomShift(3), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
class MyDataset(Dataset):
def __init__(self, data, target=None, transform=None):
self.transform = transform
self.target = target
if self.target is not None:
self.data = data
self.target = torch.from_numpy(target).long()
else:
self.data = data
def __getitem__(self, index):
if self.target is not None:
return (self.transform(self.data[index]), self.target[index])
else:
return self.transform(self.data[index])
def __len__(self):
return len(list(self.data))
train_dataset = MyDataset(train_xx, train_yy, train_transform)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_dataset = MyDataset(val_x, val_y, transform)
test_loader = DataLoader(val_dataset, batch_size=128, shuffle=True)
dataloaders = {'train': train_loader, 'val': test_loader}
dataset_sizes = {'train': len(train_xx), 'val': len(val_x)}
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
f1_batch = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
f1_batch += f1_score(labels.data.cpu(), preds.cpu(), average='weighted')
epoch_loss = running_loss / dataset_sizes[phase]
epoch_f1 = f1_batch / len(dataloaders[phase])
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
model.load_state_dict(best_model_wts)
return model
model_ft = models.resnet18(pretrained=False)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 16)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.15)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20)
test_img = []
for img_name in tqdm(test['name']):
image_path = '/kaggle/input/lego-dataset/test/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
test_img.append(img)
test_x = np.array(test_img)
test_x.shape
val_dataset = MyDataset(data=test_x, transform=transform)
test_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)
def prediciton(data_loader):
model_ft.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = Variable(data, volatile=True)
if torch.cuda.is_available():
data = data.cuda()
output = model_ft(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat((test_pred, pred), dim=0)
return test_pred
test_pred = prediciton(test_loader) | code |
32069396/cell_15 | [
"image_output_1.png"
] | from PIL import Image
from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
import numbers
import numpy as np
import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
def display_examples(images, labels):
"""
Display 25 images from the images array with its corresponding labels
"""
fig = plt.figure(figsize=(10,10))
fig.suptitle("Some examples of images of the dataset", fontsize=16)
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i].reshape(200,200), cmap=plt.cm.binary)
plt.xlabel(labels[i])
plt.show()
class RandomRotation(object):
"""
https://github.com/pytorch/vision/tree/master/torchvision/transforms
Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees degrees counter clockwise order.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return (hshift, vshift)
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
test_img = []
for img_name in tqdm(test['name']):
image_path = '/kaggle/input/lego-dataset/test/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
test_img.append(img)
test_x = np.array(test_img)
test_x.shape
fig = plt.figure(figsize=(10, 10))
fig.suptitle('Some examples of images of the dataset', fontsize=16)
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(test_x[i].reshape(200, 200), cmap=plt.cm.binary)
plt.show() | code |
32069396/cell_3 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train.head() | code |
32069396/cell_14 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from tqdm import tqdm
import cv2
import numbers
import numpy as np
import pandas as pd
test = pd.read_csv('/kaggle/input/lego-dataset/Test.csv')
train = pd.read_csv('/kaggle/input/lego-dataset/Train.csv')
train_img = []
for img_name in tqdm(train['name']):
image_path = '/kaggle/input/lego-dataset/train/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_y = train['category'].values - 1
train_x.shape
class RandomRotation(object):
"""
https://github.com/pytorch/vision/tree/master/torchvision/transforms
Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees degrees counter clockwise order.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return (hshift, vshift)
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
test_img = []
for img_name in tqdm(test['name']):
image_path = '/kaggle/input/lego-dataset/test/' + str(img_name)
img = cv2.imread(image_path, 0)
img = img / 255.0
img = img.reshape(200, 200, 1)
img = img.astype('float32')
test_img.append(img)
test_x = np.array(test_img)
test_x.shape | code |
106198328/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv')
stock_data['Date'] = pd.to_datetime(stock_data['Date'])
ibov_visu = pd.read_csv('../input/ibovespa-index/ibovespa_info.csv') | code |
106198328/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv')
stock_data.isna().mean() | code |
106198328/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
stock_data = pd.read_csv('../input/ibovespa-index/ibovespa_indexq.csv')
stock_data.isna().mean()
round(stock_data.isna().mean().sum(), 2)
stock_data.isna().sum().sum() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.