path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105190732/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes df_train.info() print('----------------------------------') df_test.info()
code
105190732/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes label = df_train['Survived'] label.unique() label.value_counts().plot.pie(autopct='%1.2f%%')
code
105190732/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes print('Amount of missing data in Fare for train:', df_train.Fare.isnull().sum()) print('Amount of missing data in Fare for test:', df_test.Fare.isnull().sum()) print('--------------------------------------------------') print('Amount of missing data in Embarked for train:', df_train.Embarked.isnull().sum()) print('Amount of missing data in Embarked for test:', df_test.Embarked.isnull().sum())
code
105190732/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes print(df_train.Age.isnull().sum()) print(df_test.Age.isnull().sum())
code
105190732/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes label = df_train['Survived'] label.unique()
code
16111090/cell_13
[ "text_plain_output_1.png" ]
from collections import Counter from sklearn.metrics import accuracy_score import numpy as np import numpy as np # linear algebra def predict(x_train, y_train, x_test, k): distances = [] targets = [] for i in range(len(x_train)): distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :]))) distances.append([distance, i]) distances = sorted(distances) for i in range(k): index = distances[i][1] targets.append(y_train.values[index]) return Counter(targets).most_common(1)[0][0] def train(x_train, y_train): return def kNearestNeighbor(x_train, y_train, x_test, predictions, k): train(x_train, y_train) for i in range(len(x_test)): predictions.append(predict(x_train, y_train, x_test.values[i, :], k)) predictions = [] from sklearn.metrics import accuracy_score kNearestNeighbor(x_train, y_train, x_test, predictions, 9) predictions = np.asarray(predictions) accuracy = accuracy_score(y_test, predictions) print('accuracy score is :', accuracy * 100, '%')
code
16111090/cell_11
[ "text_plain_output_1.png" ]
from collections import Counter from sklearn.metrics import accuracy_score import numpy as np import numpy as np # linear algebra def predict(x_train, y_train, x_test, k): distances = [] targets = [] for i in range(len(x_train)): distance = np.sqrt(np.sum(np.square(x_test - x_train.values[i, :]))) distances.append([distance, i]) distances = sorted(distances) for i in range(k): index = distances[i][1] targets.append(y_train.values[index]) return Counter(targets).most_common(1)[0][0] def train(x_train, y_train): return def kNearestNeighbor(x_train, y_train, x_test, predictions, k): train(x_train, y_train) for i in range(len(x_test)): predictions.append(predict(x_train, y_train, x_test.values[i, :], k)) predictions = [] from sklearn.metrics import accuracy_score kNearestNeighbor(x_train, y_train, x_test, predictions, 9) predictions = np.asarray(predictions) accuracy = accuracy_score(y_test, predictions) for i in range(len(x_test)): print('Flower with sepal length', x_test.iloc[i], ':') print('belongs to the kingdom', predictions[i])
code
16111090/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16111090/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import math import operator df = pd.read_csv('../input/Iris.csv') print(df.head()) df.shape from collections import Counter
code
16111090/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import math import operator df = pd.read_csv('../input/Iris.csv') df.shape from collections import Counter from sklearn.model_selection import train_test_split x = df[['SepalLengthCm']] y = df['Species'] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.33) len(y_train)
code
18137353/cell_4
[ "image_output_1.png" ]
from sklearn.utils import shuffle import matplotlib.pyplot as plt data_path = '../input/all-dogs/all-dogs/' lable_path = '../input/annotation/Annotation/' all_image_paths = os.listdir(data_path) IMG_SIZE = 64 BUFFER_SIZE = 20579 BATCH_SIZE = 256 def get_images(directory): Images = [] for image_file in all_image_paths: image = cv2.imread(directory + '/' + image_file) image = cv2.resize(image, (IMG_SIZE, IMG_SIZE)) Images.append(image) return shuffle(Images, random_state=81732) data_images = get_images(data_path) plt.figure(figsize=(15, 15)) i = 0 for i in range(25): plt.subplot(5, 5, i + 1) plt.xticks([]) plt.yticks([]) plt.imshow(data_images[i]) i += 1 plt.show()
code
18137353/cell_1
[ "text_plain_output_1.png" ]
import tensorflow as tf import glob, os, imageio, PIL, time, cv2 import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from IPython import display from sklearn.utils import shuffle print(tf.__version__)
code
106199219/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data
code
106199219/cell_6
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/segment/Segmentation_dataset.csv') data.describe()
code
72098912/cell_2
[ "text_html_output_1.png" ]
import glob import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import glob import pandas as pd path = '../input/pandastasks/Pandas-Data-Science-Tasks-master/SalesAnalysis/Sales_Data' filenames = glob.glob(path + '/*.csv') dfs = [] for filename in filenames: dfs.append(pd.read_csv(filename)) big_frame = pd.concat(dfs, ignore_index=True) big_frame.tail()
code
129020869/cell_9
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() print('The categorical columns are', cat_cols) print('The numerical columns are', num_cols)
code
129020869/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.head()
code
129020869/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('FY')['HOURS\n'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average flight hours yearwise') plt.show()
code
129020869/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.info()
code
129020869/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('Month')['DEPARTURES\n'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average departures monthwise') plt.show()
code
129020869/cell_2
[ "text_plain_output_1.png" ]
import seaborn as sns import matplotlib.pyplot as plt sns.set() import warnings warnings.filterwarnings('ignore')
code
129020869/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight[df_flight['KILOMETRE\n(TH)'] < 100]
code
129020869/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129020869/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum()
code
129020869/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('Month')['PASSENGERS CARRIED\n'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average passengers carried monthwise') plt.show()
code
129020869/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() for col in df_flight.select_dtypes(include=np.number).columns.tolist(): print(col) plt.figure(figsize=(15, 4)) plt.subplot(1, 2, 1) sns.distplot(a=df_flight[col], bins=20, color='green', hist_kws={'edgecolor': 'black'}) plt.ylabel('count') plt.subplot(1, 2, 2) sns.boxplot(x=df_flight[col], color='pink') plt.show()
code
129020869/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight[df_flight['DEPARTURES\n'] < 100]
code
129020869/cell_24
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('FY')['KILOMETRE\n(TH)'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average distance covered yearwise') plt.show()
code
129020869/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('FY')['DEPARTURES\n'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average departures yearwise') plt.show()
code
129020869/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight.groupby('Month')['HOURS\n'].mean().sort_values(ascending=False).plot.bar(fontsize=12) plt.title('Average flight hours monthwise') plt.show()
code
129020869/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.isnull().sum() cat_cols = df_flight.columns[df_flight.dtypes == object] num_cols = df_flight.select_dtypes(include=np.number).columns.tolist() df_flight[' PAX.LOAD FACTOR (IN %)'].dtype
code
129020869/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_flight = pd.read_csv('/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (International).csv') df_flight.tail()
code
73097621/cell_4
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') data.isna().sum() y = data.target features_1 = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9'] features_2 = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13'] features_3 = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13'] X1 = data[features_2] from sklearn.tree import DecisionTreeRegressor model = DecisionTreeRegressor(random_state=1) model.fit(X1, y)
code
73097621/cell_6
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') data.isna().sum() y = data.target features_1 = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9'] features_2 = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13'] features_3 = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13'] X1 = data[features_2] from sklearn.tree import DecisionTreeRegressor model = DecisionTreeRegressor(random_state=1) model.fit(X1, y) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') predictions = model.predict(test[features_2]) output = pd.DataFrame({'Id': test.index, 'target': predictions}) output.to_csv('submission.csv', index=False) print('complete')
code
73097621/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') data.isna().sum()
code
73097621/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
17097984/cell_21
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') interp = ClassificationInterpretation.from_learner(learn) interp.plot_top_losses(9)
code
17097984/cell_13
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch)
code
17097984/cell_9
[ "text_html_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5]
code
17097984/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') interp = ClassificationInterpretation.from_learner(learn) doc(interp.plot_top_losses) interp.plot_confusion_matrix(figsize=(12, 12), dpi=60)
code
17097984/cell_30
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.fit_one_cycle(1) learn.load('stage-1') learn.lr_find() learn.unfreeze() learn.fit_one_cycle(2, max_lr=slice(1e-06, 0.0001))
code
17097984/cell_6
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path
code
17097984/cell_29
[ "text_html_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.fit_one_cycle(1) learn.load('stage-1') learn.lr_find() learn.recorder.plot()
code
17097984/cell_26
[ "image_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.fit_one_cycle(1)
code
17097984/cell_7
[ "image_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls()
code
17097984/cell_18
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4)
code
17097984/cell_28
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.fit_one_cycle(1) learn.load('stage-1') learn.lr_find()
code
17097984/cell_15
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) print(data.classes)
code
17097984/cell_16
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes)
code
17097984/cell_17
[ "image_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate)
code
17097984/cell_24
[ "text_html_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') interp = ClassificationInterpretation.from_learner(learn) doc(interp.plot_top_losses) interp.most_confused(min_val=2)
code
17097984/cell_14
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) data.show_batch(rows=3)
code
17097984/cell_27
[ "image_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats) help(data.show_batch) len(data.classes) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.fit_one_cycle(1) learn.load('stage-1')
code
17097984/cell_12
[ "text_plain_output_1.png" ]
pets = 'https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet' path = untar_data(pets) path path.ls() path_anno = path / 'annotations' path_img = path / 'images' fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = '/([^/]+)_\\d+.jpg$' data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224) data.normalize(imagenet_stats)
code
121151048/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.head(5)
code
121151048/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols training_set.head(2)
code
121151048/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols def train_test_category_union(feature): tcl = len(training_set[feature].cat.categories) tcll = len(testing_set[feature].cat.categories) feature_cat_union = training_set[feature].cat.categories.union(testing_set[feature].cat.categories) training_set[feature] = training_set[feature].cat.set_categories(feature_cat_union) testing_set[feature] = testing_set[feature].cat.set_categories(feature_cat_union) for f in cat_cols: train_test_category_union(f)
code
121151048/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols # fig, axes = plt.subplots(15,4, figsize=(25,70)) # axes=axes.flatten() # for i, col in enumerate(df_train.columns): # sns.histplot(df_train,x=col, stat='percent', hue="Label", bins=100, log_scale=(False, False), ax=axes[i]) fig=plt.figure(figsize=(25,70)) for i, col in enumerate(training_set.columns): plt.subplot(15,4,i+1) sns.histplot(training_set,x=col, stat='percent', hue="label", bins=100, log_scale=(False, False)) fig.tight_layout() plt.show() metadata = ['service'] training_set.drop(columns=metadata, inplace=True) testing_set.drop(columns=metadata, inplace=True) import plotly.express as px px.histogram(training_set, x='ct_state_ttl', color='label', barmode='group')
code
121151048/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') print(training_set.shape, testing_set.shape)
code
121151048/cell_39
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols # fig, axes = plt.subplots(15,4, figsize=(25,70)) # axes=axes.flatten() # for i, col in enumerate(df_train.columns): # sns.histplot(df_train,x=col, stat='percent', hue="Label", bins=100, log_scale=(False, False), ax=axes[i]) fig=plt.figure(figsize=(25,70)) for i, col in enumerate(training_set.columns): plt.subplot(15,4,i+1) sns.histplot(training_set,x=col, stat='percent', hue="label", bins=100, log_scale=(False, False)) fig.tight_layout() plt.show() metadata = ['service'] training_set.drop(columns=metadata, inplace=True) testing_set.drop(columns=metadata, inplace=True) import plotly.express as px training_set.dttl = training_set.dttl.astype('uint8') pd.DataFrame(training_set.groupby(['dttl', 'label']).dttl.count())
code
121151048/cell_2
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True)
code
121151048/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols good_label_cols = [col for col in cat_cols if set(testing_set[col]).issubset(set(training_set[col]))] print(good_label_cols) bad_label_cols = list(set(cat_cols) - set(good_label_cols)) print(bad_label_cols)
code
121151048/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121151048/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes
code
121151048/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols
code
121151048/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) print(training_set[target].value_counts()) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True)
code
121151048/cell_3
[ "text_plain_output_1.png" ]
from fastcore.basics import * from fastcore.parallel import * from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score, accuracy_score from os import cpu_count from math import floor import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.naive_bayes import GaussianNB
code
121151048/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum()
code
121151048/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols fig = plt.figure(figsize=(25, 70)) for i, col in enumerate(training_set.columns): plt.subplot(15, 4, i + 1) sns.histplot(training_set, x=col, stat='percent', hue='label', bins=100, log_scale=(False, False)) fig.tight_layout() plt.show()
code
121151048/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns training_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_training-set.parquet') testing_set = pd.read_parquet('/kaggle/input/unswnb15/UNSW_NB15_testing-set.parquet') training_set.dtypes training_set.attack_cat.value_counts() training_set.dtypes target = 'label' features = list(training_set.columns.difference([target]).values) training_set.drop(columns=['attack_cat'], inplace=True) testing_set.drop(columns=['attack_cat'], inplace=True) training_set.isna().sum() cat_cols = [col for col in training_set.columns if training_set[col].dtype == 'category'] cat_cols # fig, axes = plt.subplots(15,4, figsize=(25,70)) # axes=axes.flatten() # for i, col in enumerate(df_train.columns): # sns.histplot(df_train,x=col, stat='percent', hue="Label", bins=100, log_scale=(False, False), ax=axes[i]) fig=plt.figure(figsize=(25,70)) for i, col in enumerate(training_set.columns): plt.subplot(15,4,i+1) sns.histplot(training_set,x=col, stat='percent', hue="label", bins=100, log_scale=(False, False)) fig.tight_layout() plt.show() metadata = ['service'] training_set.drop(columns=metadata, inplace=True) testing_set.drop(columns=metadata, inplace=True) import plotly.express as px training_set.dttl = training_set.dttl.astype('uint8') px.histogram(training_set, x='dttl', color='label', barmode='group')
code
73090574/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) df_train_no_target_scal.head()
code
73090574/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 fig = plt.figure(figsize=(15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x=df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show()
code
73090574/cell_25
[ "text_plain_output_1.png" ]
from optuna.samplers import TPESampler from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) X = df_train_no_target_scal y = df_train['loss'] model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) def check_model(model=model, n_splits=10): scores = [] cv = KFold(n_splits, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score) def objective(trial): params = {'tweedie_variance_power': trial.suggest_discrete_uniform('tweedie_variance_power', 1.03, 1.12, 0.0001), 'n_estimators': trial.suggest_int('n_estimators', 3500, 5000), 'max_depth': trial.suggest_int('max_depth', 4, 8), 'eta': trial.suggest_float('eta', 0.005, 0.012), 'subsample': trial.suggest_discrete_uniform('subsample', 0.3, 0.6, 0.01), 'colsample_bytree': trial.suggest_discrete_uniform('colsample_bytree', 0.4, 1.0, 0.01), 'colsample_bylevel': trial.suggest_discrete_uniform('colsample_bylevel', 0.5, 0.9, 0.01), 'colsample_bynode': trial.suggest_discrete_uniform('colsample_bynode', 0.2, 1.0, 0.01), 'min_child_weight': trial.suggest_loguniform('min_child_weight', 0.001, 1), 'reg_alpha': trial.suggest_loguniform('reg_alpha', 1, 100.0), 'reg_lambda': trial.suggest_float('reg_lambda', 1000, 10000), 'max_delta_step': trial.suggest_loguniform('max_delta_step', 0.1, 10000.0), 'gamma': trial.suggest_loguniform('gamma', 0.001, 1), 'base_score': trial.suggest_float('base_score', 0.42, 0.48)} model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, **params, random_state=17) scores = [] cv = KFold(n_splits=10, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score / cv.n_splits) return sum(scores) study = optuna.create_study(direction='minimize', sampler=TPESampler()) study.optimize(objective, n_trials=30) params = study.best_params params check_model(model=xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, random_state=17, silent=False, **params)) model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, random_state=17, silent=False, **params) model.fit(X, y) preds = model.predict(df_test) preds = preds.astype(int)
code
73090574/cell_23
[ "text_html_output_1.png" ]
from optuna.samplers import TPESampler from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) X = df_train_no_target_scal y = df_train['loss'] model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) def check_model(model=model, n_splits=10): scores = [] cv = KFold(n_splits, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score) def objective(trial): params = {'tweedie_variance_power': trial.suggest_discrete_uniform('tweedie_variance_power', 1.03, 1.12, 0.0001), 'n_estimators': trial.suggest_int('n_estimators', 3500, 5000), 'max_depth': trial.suggest_int('max_depth', 4, 8), 'eta': trial.suggest_float('eta', 0.005, 0.012), 'subsample': trial.suggest_discrete_uniform('subsample', 0.3, 0.6, 0.01), 'colsample_bytree': trial.suggest_discrete_uniform('colsample_bytree', 0.4, 1.0, 0.01), 'colsample_bylevel': trial.suggest_discrete_uniform('colsample_bylevel', 0.5, 0.9, 0.01), 'colsample_bynode': trial.suggest_discrete_uniform('colsample_bynode', 0.2, 1.0, 0.01), 'min_child_weight': trial.suggest_loguniform('min_child_weight', 0.001, 1), 'reg_alpha': trial.suggest_loguniform('reg_alpha', 1, 100.0), 'reg_lambda': trial.suggest_float('reg_lambda', 1000, 10000), 'max_delta_step': trial.suggest_loguniform('max_delta_step', 0.1, 10000.0), 'gamma': trial.suggest_loguniform('gamma', 0.001, 1), 'base_score': trial.suggest_float('base_score', 0.42, 0.48)} model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, **params, random_state=17) scores = [] cv = KFold(n_splits=10, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score / cv.n_splits) return sum(scores) study = optuna.create_study(direction='minimize', sampler=TPESampler()) study.optimize(objective, n_trials=30) params = study.best_params params
code
73090574/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) X = df_train_no_target_scal y = df_train['loss'] model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) def check_model(model=model, n_splits=10): scores = [] cv = KFold(n_splits, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score) check_model(model)
code
73090574/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_test.head()
code
73090574/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) print(f' Test RMSE score: {np.sqrt(mean_squared_error(y_test, preds_test))}') print(f' Train RMSE score: {np.sqrt(mean_squared_error(y_train, preds_train))}')
code
73090574/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0
code
73090574/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.head()
code
73090574/cell_17
[ "text_plain_output_1.png" ]
import xgboost as xgb model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train)
code
73090574/cell_24
[ "text_plain_output_1.png" ]
from optuna.samplers import TPESampler from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) X = df_train_no_target_scal y = df_train['loss'] model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) def check_model(model=model, n_splits=10): scores = [] cv = KFold(n_splits, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score) def objective(trial): params = {'tweedie_variance_power': trial.suggest_discrete_uniform('tweedie_variance_power', 1.03, 1.12, 0.0001), 'n_estimators': trial.suggest_int('n_estimators', 3500, 5000), 'max_depth': trial.suggest_int('max_depth', 4, 8), 'eta': trial.suggest_float('eta', 0.005, 0.012), 'subsample': trial.suggest_discrete_uniform('subsample', 0.3, 0.6, 0.01), 'colsample_bytree': trial.suggest_discrete_uniform('colsample_bytree', 0.4, 1.0, 0.01), 'colsample_bylevel': trial.suggest_discrete_uniform('colsample_bylevel', 0.5, 0.9, 0.01), 'colsample_bynode': trial.suggest_discrete_uniform('colsample_bynode', 0.2, 1.0, 0.01), 'min_child_weight': trial.suggest_loguniform('min_child_weight', 0.001, 1), 'reg_alpha': trial.suggest_loguniform('reg_alpha', 1, 100.0), 'reg_lambda': trial.suggest_float('reg_lambda', 1000, 10000), 'max_delta_step': trial.suggest_loguniform('max_delta_step', 0.1, 10000.0), 'gamma': trial.suggest_loguniform('gamma', 0.001, 1), 'base_score': trial.suggest_float('base_score', 0.42, 0.48)} model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, **params, random_state=17) scores = [] cv = KFold(n_splits=10, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score / cv.n_splits) return sum(scores) study = optuna.create_study(direction='minimize', sampler=TPESampler()) study.optimize(objective, n_trials=30) params = study.best_params params check_model(model=xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, random_state=17, silent=False, **params))
code
73090574/cell_14
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) df_test.head()
code
73090574/cell_22
[ "text_html_output_1.png" ]
from optuna.samplers import TPESampler from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 df_test.drop('id', axis=1, inplace=True) df_test.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() df_train_no_target = df_train.drop('loss', axis=1) scaler = StandardScaler() df_train_no_target_scal = pd.DataFrame(scaler.fit_transform(df_train_no_target), columns=df_train_no_target.columns) df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns) X = df_train_no_target_scal y = df_train['loss'] model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', max_bin=512, silent=False, random_state=17) model.fit(X_train, y_train) preds_test = model.predict(X_test) preds_train = model.predict(X_train) def check_model(model=model, n_splits=10): scores = [] cv = KFold(n_splits, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score) def objective(trial): params = {'tweedie_variance_power': trial.suggest_discrete_uniform('tweedie_variance_power', 1.03, 1.12, 0.0001), 'n_estimators': trial.suggest_int('n_estimators', 3500, 5000), 'max_depth': trial.suggest_int('max_depth', 4, 8), 'eta': trial.suggest_float('eta', 0.005, 0.012), 'subsample': trial.suggest_discrete_uniform('subsample', 0.3, 0.6, 0.01), 'colsample_bytree': trial.suggest_discrete_uniform('colsample_bytree', 0.4, 1.0, 0.01), 'colsample_bylevel': trial.suggest_discrete_uniform('colsample_bylevel', 0.5, 0.9, 0.01), 'colsample_bynode': trial.suggest_discrete_uniform('colsample_bynode', 0.2, 1.0, 0.01), 'min_child_weight': trial.suggest_loguniform('min_child_weight', 0.001, 1), 'reg_alpha': trial.suggest_loguniform('reg_alpha', 1, 100.0), 'reg_lambda': trial.suggest_float('reg_lambda', 1000, 10000), 'max_delta_step': trial.suggest_loguniform('max_delta_step', 0.1, 10000.0), 'gamma': trial.suggest_loguniform('gamma', 0.001, 1), 'base_score': trial.suggest_float('base_score', 0.42, 0.48)} model = xgb.XGBRegressor(objective='reg:tweedie', tree_method='gpu_hist', predictor='gpu_predictor', sampling_method='gradient_based', n_jobs=-1, max_bin=512, **params, random_state=17) scores = [] cv = KFold(n_splits=10, shuffle=True) for train_idx, test_idx in cv.split(X): X_train, y_train = (X.iloc[train_idx], y.iloc[train_idx]) X_test, y_test = (X.iloc[test_idx], y.iloc[test_idx]) model.fit(X_train, y_train) preds = model.predict(X_test) score = np.sqrt(mean_squared_error(y_test, preds)) scores.append(score / cv.n_splits) return sum(scores) study = optuna.create_study(direction='minimize', sampler=TPESampler()) study.optimize(objective, n_trials=30)
code
73090574/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt parameters = {'axes.grid': True} plt.rcParams.update(parameters) import optuna from optuna.samplers import TPESampler import xgboost as xgb from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0 # Plotting some graphs of random features in train set fig = plt.figure(figsize = (15, 10)) for j in [j for j in range(1, 16)]: i = np.random.randint(0, df_train.columns.size - 1) plt.subplot(3, 5, j) sns.kdeplot(x = df_train[df_train.columns[i]]) plt.title(df_train.columns[i]) fig.tight_layout() print('15 graphs of random features in train set') plt.show() plt.figure(figsize=(15, 5)) sns.histplot(x=df_train['loss'], kde=True) plt.title('Distribution of target (loss)')
code
73090574/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-aug-2021/train.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') df_train.drop('id', axis=1, inplace=True) df_train.isnull().sum().max() == 0
code
128006176/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() plt.figure(figsize=(10, 5)) plt.bar(scene_counts.index, scene_counts.values) plt.xlabel('Scene') plt.ylabel('Number of Images') plt.title('Distribution of Images per Scene') plt.show()
code
128006176/cell_4
[ "image_output_1.png" ]
!wget https://raw.githubusercontent.com/colmap/colmap/dev/scripts/python/read_write_model.py
code
128006176/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() plt.figure(figsize=(10, 5)) plt.bar(dataset_counts.index, dataset_counts.values) plt.xlabel('Dataset') plt.ylabel('Number of Images') plt.title('Distribution of Images per Dataset') plt.show()
code
128006176/cell_7
[ "image_output_1.png" ]
import os import pandas as pd root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.head()
code
128006176/cell_18
[ "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import os import pandas as pd import random import read_write_model root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() # Visualize images num_images_to_show = 5 random_image_indices = random.sample(range(len(train_labels)), num_images_to_show) fig, axes = plt.subplots(1, num_images_to_show, figsize=(20, 5)) train_folder = os.path.join(root_dir, 'train') for i, image_index in enumerate(random_image_indices): relative_image_path = train_labels.iloc[image_index]['image_path'] image_path = os.path.join(train_folder, relative_image_path) img = Image.open(image_path) axes[i].imshow(img) axes[i].set_title(f"Dataset: {train_labels.iloc[image_index]['dataset']} | Scene: {train_labels.iloc[image_index]['scene']}") axes[i].axis('off') plt.show() def plot_sfm_3d_reconstruction(reconstruction, num_points=1000, num_cameras=50): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') point3D_ids = list(reconstruction[2].keys()) selected_point3D_ids = random.sample(point3D_ids, min(num_points, len(point3D_ids))) for point3D_id in selected_point3D_ids: point3D = reconstruction[2][point3D_id] ax.scatter(point3D.xyz[0], point3D.xyz[1], point3D.xyz[2], c='b', marker='o') image_ids = list(reconstruction[1].keys()) selected_image_ids = random.sample(image_ids, min(num_cameras, len(image_ids))) for image_id in selected_image_ids: image = reconstruction[1][image_id] camera_center = -image.qvec2rotmat().T @ image.tvec ax.scatter(camera_center[0], camera_center[1], camera_center[2], c='r', marker='^') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show() sample_sfm_folder = os.path.join(train_folder, 'urban/kyiv-puppet-theater/sfm') cameras, images, points3D = read_write_model.read_model(sample_sfm_folder) reconstruction = (cameras, images, points3D) plot_sfm_3d_reconstruction(reconstruction)
code
128006176/cell_8
[ "image_output_1.png" ]
import os import pandas as pd root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns
code
128006176/cell_16
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import os import pandas as pd import random root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() num_images_to_show = 5 random_image_indices = random.sample(range(len(train_labels)), num_images_to_show) fig, axes = plt.subplots(1, num_images_to_show, figsize=(20, 5)) train_folder = os.path.join(root_dir, 'train') for i, image_index in enumerate(random_image_indices): relative_image_path = train_labels.iloc[image_index]['image_path'] image_path = os.path.join(train_folder, relative_image_path) img = Image.open(image_path) axes[i].imshow(img) axes[i].set_title(f"Dataset: {train_labels.iloc[image_index]['dataset']} | Scene: {train_labels.iloc[image_index]['scene']}") axes[i].axis('off') plt.show()
code
128006176/cell_24
[ "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import read_write_model root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() # Visualize images num_images_to_show = 5 random_image_indices = random.sample(range(len(train_labels)), num_images_to_show) fig, axes = plt.subplots(1, num_images_to_show, figsize=(20, 5)) train_folder = os.path.join(root_dir, 'train') for i, image_index in enumerate(random_image_indices): relative_image_path = train_labels.iloc[image_index]['image_path'] image_path = os.path.join(train_folder, relative_image_path) img = Image.open(image_path) axes[i].imshow(img) axes[i].set_title(f"Dataset: {train_labels.iloc[image_index]['dataset']} | Scene: {train_labels.iloc[image_index]['scene']}") axes[i].axis('off') plt.show() # Explore 3D reconstructions def plot_sfm_3d_reconstruction(reconstruction, num_points=1000, num_cameras=50): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Plot 3D points point3D_ids = list(reconstruction[2].keys()) selected_point3D_ids = random.sample(point3D_ids, min(num_points, len(point3D_ids))) for point3D_id in selected_point3D_ids: point3D = reconstruction[2][point3D_id] ax.scatter(point3D.xyz[0], point3D.xyz[1], point3D.xyz[2], c='b', marker='o') # Plot camera poses image_ids = list(reconstruction[1].keys()) selected_image_ids = random.sample(image_ids, min(num_cameras, len(image_ids))) for image_id in selected_image_ids: image = reconstruction[1][image_id] camera_center = -image.qvec2rotmat().T @ image.tvec ax.scatter(camera_center[0], camera_center[1], camera_center[2], c='r', marker='^') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show() # You can replace the following path with any other scene's sfm folder path sample_sfm_folder = os.path.join(train_folder, 'urban/kyiv-puppet-theater/sfm') cameras, images, points3D = read_write_model.read_model(sample_sfm_folder) reconstruction = (cameras, images, points3D) plot_sfm_3d_reconstruction(reconstruction) def string_to_matrix(matrix_string): return np.array(list(map(float, matrix_string.split(';')))).reshape(3, 3) def string_to_vector(vector_string): return np.array(list(map(float, vector_string.split(';')))) rotation_matrices = train_labels['rotation_matrix'].apply(string_to_matrix) translation_vectors = train_labels['translation_vector'].apply(string_to_vector) rotation_angles = [np.rad2deg(np.arccos((np.trace(R) - 1) / 2)) for R in rotation_matrices] translation_magnitudes = [np.linalg.norm(tvec) for tvec in translation_vectors] plt.figure() plt.hist(translation_magnitudes, bins=50) plt.xlabel('Translation Magnitude (meters)') plt.ylabel('Number of Images') plt.title('Distribution of Translation Magnitudes') plt.show()
code
128006176/cell_22
[ "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import read_write_model root_dir = '/kaggle/input/image-matching-challenge-2023' train_labels_file = os.path.join(root_dir, 'train/train_labels.csv') train_labels = pd.read_csv(train_labels_file) train_labels.columns dataset_counts = train_labels['dataset'].value_counts() scene_counts = train_labels['scene'].value_counts() # Visualize images num_images_to_show = 5 random_image_indices = random.sample(range(len(train_labels)), num_images_to_show) fig, axes = plt.subplots(1, num_images_to_show, figsize=(20, 5)) train_folder = os.path.join(root_dir, 'train') for i, image_index in enumerate(random_image_indices): relative_image_path = train_labels.iloc[image_index]['image_path'] image_path = os.path.join(train_folder, relative_image_path) img = Image.open(image_path) axes[i].imshow(img) axes[i].set_title(f"Dataset: {train_labels.iloc[image_index]['dataset']} | Scene: {train_labels.iloc[image_index]['scene']}") axes[i].axis('off') plt.show() # Explore 3D reconstructions def plot_sfm_3d_reconstruction(reconstruction, num_points=1000, num_cameras=50): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Plot 3D points point3D_ids = list(reconstruction[2].keys()) selected_point3D_ids = random.sample(point3D_ids, min(num_points, len(point3D_ids))) for point3D_id in selected_point3D_ids: point3D = reconstruction[2][point3D_id] ax.scatter(point3D.xyz[0], point3D.xyz[1], point3D.xyz[2], c='b', marker='o') # Plot camera poses image_ids = list(reconstruction[1].keys()) selected_image_ids = random.sample(image_ids, min(num_cameras, len(image_ids))) for image_id in selected_image_ids: image = reconstruction[1][image_id] camera_center = -image.qvec2rotmat().T @ image.tvec ax.scatter(camera_center[0], camera_center[1], camera_center[2], c='r', marker='^') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show() # You can replace the following path with any other scene's sfm folder path sample_sfm_folder = os.path.join(train_folder, 'urban/kyiv-puppet-theater/sfm') cameras, images, points3D = read_write_model.read_model(sample_sfm_folder) reconstruction = (cameras, images, points3D) plot_sfm_3d_reconstruction(reconstruction) def string_to_matrix(matrix_string): return np.array(list(map(float, matrix_string.split(';')))).reshape(3, 3) def string_to_vector(vector_string): return np.array(list(map(float, vector_string.split(';')))) rotation_matrices = train_labels['rotation_matrix'].apply(string_to_matrix) translation_vectors = train_labels['translation_vector'].apply(string_to_vector) rotation_angles = [np.rad2deg(np.arccos((np.trace(R) - 1) / 2)) for R in rotation_matrices] plt.figure() plt.hist(rotation_angles, bins=50) plt.xlabel('Rotation Angle (degrees)') plt.ylabel('Number of Images') plt.title('Distribution of Rotation Angles') plt.show()
code
34118365/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from glob import glob from itertools import chain from keras.applications.resnet_v2 import ResNet50V2 from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D from keras.models import Sequential, Model from keras.models import model_from_json from keras.optimizers import Adam, RMSprop from keras.preprocessing.image import ImageDataGenerator from random import sample import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.model_selection as skl all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv') all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))} all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get) all_xray_df.sample(3) all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist()))) all_labels = [x for x in all_labels if len(x) > 0] for c_label in all_labels: if len(c_label) > 1: all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0) all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia'] all_xray_df.sample(3) def create_splits(df, test_size, column_name): train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name]) p_inds = train_df[train_df[column_name] == 1].index.tolist() np_inds = train_df[train_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, len(p_inds)) train_df = train_df.loc[p_inds + np_sample] p_inds = valid_df[valid_df[column_name] == 1].index.tolist() np_inds = valid_df[valid_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, 4 * len(p_inds)) valid_df = valid_df.loc[p_inds + np_sample] return (train_df, valid_df) train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class') def my_image_augmentation(): my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1) return my_idg def make_train_gen(train_df, img_size, batch_size): idg = my_image_augmentation() train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return train_gen def make_val_gen(valid_df, img_size, batch_size): val_idg = ImageDataGenerator(rescale=1.0 / 255.0) val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return val_gen batch_size = 64 img_size = (224, 224) train_gen = make_train_gen(train_df, img_size, batch_size) val_gen = make_val_gen(valid_df, img_size, batch_size) def load_pretrained_model(): """ model = VGG16(include_top=True, weights='imagenet') transfer_layer = model.get_layer('block5_pool') vgg_model = Model(inputs = model.input, outputs = transfer_layer.output) for layer in vgg_model.layers[0:17]: layer.trainable = False """ model = ResNet50V2(include_top=False, weights='imagenet') resnet_model = Model(inputs=model.input, outputs=model.output, name='Resnet') return resnet_model def build_my_model(): """ # my_model = Sequential() # ....add your pre-trained model, and then whatever additional layers you think you might # want for fine-tuning (Flatteen, Dense, Dropout, etc.) # if you want to compile your model within this function, consider which layers of your pre-trained model, # you want to freeze before you compile # also make sure you set your optimizer, loss function, and metrics to monitor # Todo my_model = Sequential() vgg_model = load_pretrained_model() # Add the convolutional part of the VGG16 model from above. my_model.add(vgg_model) # Flatten the output of the VGG16 model because it is from a # convolutional layer. my_model.add(Flatten()) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1024, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(512, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(256, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1, activation='sigmoid')) """ resnet_model = load_pretrained_model() my_model = Sequential([resnet_model, BatchNormalization(), Conv2D(1024, 1, activation='relu'), Dropout(0.5), BatchNormalization(), Conv2D(256, 1, activation='relu'), Dropout(0.5), AveragePooling2D((7, 7)), BatchNormalization(), Conv2D(1, 1, activation='sigmoid'), Reshape((-1,))]) return my_model my_model = build_my_model() my_model.summary() weight_path = '{}_my_model.best.hdf5'.format('xray_class') checkpoint = ModelCheckpoint(weight_path, monitor='val_binary_accuracy', verbose=1, save_best_only=True, mode='auto', save_weights_only=True) early = EarlyStopping(monitor='val_binary_accuracy', mode='auto', patience=5) def scheduler(epoch, lr): if epoch < 1: return lr else: return lr * np.exp(-0.1) lr_scheduler = LearningRateScheduler(scheduler) callbacks_list = [checkpoint, early, lr_scheduler] from keras.models import model_from_json model_path = '/kaggle/input/model-and-weights/my_model2.json' weight_path = '/kaggle/input/model-and-weights/xray_class_my_model2.best.hdf5' json_file = open(model_path, 'r') loaded_model_json = json_file.read() json_file.close() my_model = model_from_json(loaded_model_json) my_model.load_weights(weight_path) optimizer = RMSprop(learning_rate=0.0001) loss = 'binary_crossentropy' metrics = ['binary_accuracy'] my_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) history = my_model.fit_generator(train_gen, validation_data=(valX, valY), epochs=10, callbacks=callbacks_list)
code
34118365/cell_2
[ "text_plain_output_1.png" ]
from glob import glob import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv') all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))} print('Scans found:', len(all_image_paths), ', Total Headers', all_xray_df.shape[0]) all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get) all_xray_df.sample(3)
code
34118365/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import os from glob import glob import matplotlib.pyplot as plt from itertools import chain from random import sample import scipy import sklearn.model_selection as skl from sklearn.utils import class_weight import tensorflow as tf from skimage import io from keras.preprocessing.image import ImageDataGenerator from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation from keras.models import Sequential, Model from keras.applications.vgg16 import VGG16 from keras.applications.resnet_v2 import ResNet50V2 from keras.optimizers import Adam, RMSprop from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, plot_precision_recall_curve, f1_score, confusion_matrix, accuracy_score
code
34118365/cell_7
[ "image_output_1.png" ]
from glob import glob from itertools import chain from keras.preprocessing.image import ImageDataGenerator from random import sample import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.model_selection as skl all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv') all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))} all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get) all_xray_df.sample(3) all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist()))) all_labels = [x for x in all_labels if len(x) > 0] for c_label in all_labels: if len(c_label) > 1: all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0) all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia'] all_xray_df.sample(3) def create_splits(df, test_size, column_name): train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name]) p_inds = train_df[train_df[column_name] == 1].index.tolist() np_inds = train_df[train_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, len(p_inds)) train_df = train_df.loc[p_inds + np_sample] p_inds = valid_df[valid_df[column_name] == 1].index.tolist() np_inds = valid_df[valid_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, 4 * len(p_inds)) valid_df = valid_df.loc[p_inds + np_sample] return (train_df, valid_df) train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class') def my_image_augmentation(): my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1) return my_idg def make_train_gen(train_df, img_size, batch_size): idg = my_image_augmentation() train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return train_gen def make_val_gen(valid_df, img_size, batch_size): val_idg = ImageDataGenerator(rescale=1.0 / 255.0) val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return val_gen batch_size = 64 img_size = (224, 224) train_gen = make_train_gen(train_df, img_size, batch_size) val_gen = make_val_gen(valid_df, img_size, batch_size) t_x, t_y = next(train_gen) fig, m_axs = plt.subplots(4, 4, figsize=(16, 16)) for c_x, c_y, c_ax in zip(t_x, t_y, m_axs.flatten()): c_ax.imshow(c_x[:, :, 0], cmap='bone') if c_y == 1: c_ax.set_title('Pneumonia') else: c_ax.set_title('No Pneumonia') c_ax.axis('off')
code
34118365/cell_18
[ "text_plain_output_1.png" ]
from glob import glob from itertools import chain from keras.applications.resnet_v2 import ResNet50V2 from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D from keras.models import Sequential, Model from keras.models import model_from_json from keras.optimizers import Adam, RMSprop from keras.preprocessing.image import ImageDataGenerator from random import sample from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, plot_precision_recall_curve, f1_score, confusion_matrix, accuracy_score import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.model_selection as skl all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv') all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))} all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get) all_xray_df.sample(3) all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist()))) all_labels = [x for x in all_labels if len(x) > 0] for c_label in all_labels: if len(c_label) > 1: all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0) all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia'] all_xray_df.sample(3) def create_splits(df, test_size, column_name): train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name]) p_inds = train_df[train_df[column_name] == 1].index.tolist() np_inds = train_df[train_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, len(p_inds)) train_df = train_df.loc[p_inds + np_sample] p_inds = valid_df[valid_df[column_name] == 1].index.tolist() np_inds = valid_df[valid_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, 4 * len(p_inds)) valid_df = valid_df.loc[p_inds + np_sample] return (train_df, valid_df) train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class') def my_image_augmentation(): my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1) return my_idg def make_train_gen(train_df, img_size, batch_size): idg = my_image_augmentation() train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return train_gen def make_val_gen(valid_df, img_size, batch_size): val_idg = ImageDataGenerator(rescale=1.0 / 255.0) val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return val_gen batch_size = 64 img_size = (224, 224) train_gen = make_train_gen(train_df, img_size, batch_size) val_gen = make_val_gen(valid_df, img_size, batch_size) ## May want to look at some examples of our augmented training data. ## This is helpful for understanding the extent to which data is being manipulated prior to training, ## and can be compared with how the raw data look prior to augmentation t_x, t_y = next(train_gen) fig, m_axs = plt.subplots(4, 4, figsize = (16, 16)) for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()): c_ax.imshow(c_x[:,:,0], cmap = 'bone') if c_y == 1: c_ax.set_title('Pneumonia') else: c_ax.set_title('No Pneumonia') c_ax.axis('off') def load_pretrained_model(): """ model = VGG16(include_top=True, weights='imagenet') transfer_layer = model.get_layer('block5_pool') vgg_model = Model(inputs = model.input, outputs = transfer_layer.output) for layer in vgg_model.layers[0:17]: layer.trainable = False """ model = ResNet50V2(include_top=False, weights='imagenet') resnet_model = Model(inputs=model.input, outputs=model.output, name='Resnet') return resnet_model def build_my_model(): """ # my_model = Sequential() # ....add your pre-trained model, and then whatever additional layers you think you might # want for fine-tuning (Flatteen, Dense, Dropout, etc.) # if you want to compile your model within this function, consider which layers of your pre-trained model, # you want to freeze before you compile # also make sure you set your optimizer, loss function, and metrics to monitor # Todo my_model = Sequential() vgg_model = load_pretrained_model() # Add the convolutional part of the VGG16 model from above. my_model.add(vgg_model) # Flatten the output of the VGG16 model because it is from a # convolutional layer. my_model.add(Flatten()) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1024, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(512, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(256, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1, activation='sigmoid')) """ resnet_model = load_pretrained_model() my_model = Sequential([resnet_model, BatchNormalization(), Conv2D(1024, 1, activation='relu'), Dropout(0.5), BatchNormalization(), Conv2D(256, 1, activation='relu'), Dropout(0.5), AveragePooling2D((7, 7)), BatchNormalization(), Conv2D(1, 1, activation='sigmoid'), Reshape((-1,))]) return my_model my_model = build_my_model() my_model.summary() weight_path = '{}_my_model.best.hdf5'.format('xray_class') checkpoint = ModelCheckpoint(weight_path, monitor='val_binary_accuracy', verbose=1, save_best_only=True, mode='auto', save_weights_only=True) early = EarlyStopping(monitor='val_binary_accuracy', mode='auto', patience=5) def scheduler(epoch, lr): if epoch < 1: return lr else: return lr * np.exp(-0.1) lr_scheduler = LearningRateScheduler(scheduler) callbacks_list = [checkpoint, early, lr_scheduler] from keras.models import model_from_json model_path = '/kaggle/input/model-and-weights/my_model2.json' weight_path = '/kaggle/input/model-and-weights/xray_class_my_model2.best.hdf5' json_file = open(model_path, 'r') loaded_model_json = json_file.read() json_file.close() my_model = model_from_json(loaded_model_json) my_model.load_weights(weight_path) optimizer = RMSprop(learning_rate=0.0001) loss = 'binary_crossentropy' metrics = ['binary_accuracy'] my_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) history = my_model.fit_generator(train_gen, validation_data=(valX, valY), epochs=10, callbacks=callbacks_list) weight_path = 'xray_class_my_model.best.hdf5' my_model.load_weights(weight_path) pred_Y = my_model.predict(valX, batch_size=100, verbose=True) def plot_auc(t_y, p_y): fpr, tpr, threshold = roc_curve(valY, pred_Y) roc_auc = auc(fpr, tpr) plt.xlim([0, 1]) plt.ylim([0, 1]) return def plot_prec_rec(val_Y, pred_Y): prec, rec, threshold = precision_recall_curve(val_Y, pred_Y) plt.xlim([0, 1]) plt.ylim([0, 1]) def plot_history(history): n = len(history.history['loss']) return def optimize_accuracy(t_y, p_y): best_threshold = None best_accuracy = 0.0 for t in np.arange(0.5, 1, 0.1): pred = (p_y.reshape(-1) > t) * 1.0 accuracy = np.mean(pred == t_y) if accuracy > best_accuracy: best_threshold = t best_accuracy = accuracy return (best_threshold, best_accuracy) best_threshold, best_accuracy = optimize_accuracy(valY, pred_Y) pred_Y_class = pred_Y > best_threshold f1_score(valY, pred_Y_class) YOUR_THRESHOLD = best_threshold fig, m_axs = plt.subplots(8, 8, figsize=(16, 16)) i = 0 for c_x, c_y, c_ax in zip(valX[0:64], valY[0:64], m_axs.flatten()): c_ax.imshow(c_x[:, :, 0], cmap='bone') if c_y == 1: if pred_Y[i] > YOUR_THRESHOLD: c_ax.set_title('1, 1') else: c_ax.set_title('1, 0') elif pred_Y[i] > YOUR_THRESHOLD: c_ax.set_title('0, 1') else: c_ax.set_title('0, 0') c_ax.axis('off') i = i + 1
code
34118365/cell_16
[ "image_output_1.png" ]
from glob import glob from itertools import chain from keras.applications.resnet_v2 import ResNet50V2 from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D from keras.models import Sequential, Model from keras.models import model_from_json from keras.optimizers import Adam, RMSprop from keras.preprocessing.image import ImageDataGenerator from random import sample from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, plot_precision_recall_curve, f1_score, confusion_matrix, accuracy_score import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.model_selection as skl all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv') all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))} all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get) all_xray_df.sample(3) all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist()))) all_labels = [x for x in all_labels if len(x) > 0] for c_label in all_labels: if len(c_label) > 1: all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0) all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia'] all_xray_df.sample(3) def create_splits(df, test_size, column_name): train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name]) p_inds = train_df[train_df[column_name] == 1].index.tolist() np_inds = train_df[train_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, len(p_inds)) train_df = train_df.loc[p_inds + np_sample] p_inds = valid_df[valid_df[column_name] == 1].index.tolist() np_inds = valid_df[valid_df[column_name] == 0].index.tolist() np_sample = sample(np_inds, 4 * len(p_inds)) valid_df = valid_df.loc[p_inds + np_sample] return (train_df, valid_df) train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class') def my_image_augmentation(): my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1) return my_idg def make_train_gen(train_df, img_size, batch_size): idg = my_image_augmentation() train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return train_gen def make_val_gen(valid_df, img_size, batch_size): val_idg = ImageDataGenerator(rescale=1.0 / 255.0) val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size) return val_gen batch_size = 64 img_size = (224, 224) train_gen = make_train_gen(train_df, img_size, batch_size) val_gen = make_val_gen(valid_df, img_size, batch_size) ## May want to look at some examples of our augmented training data. ## This is helpful for understanding the extent to which data is being manipulated prior to training, ## and can be compared with how the raw data look prior to augmentation t_x, t_y = next(train_gen) fig, m_axs = plt.subplots(4, 4, figsize = (16, 16)) for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()): c_ax.imshow(c_x[:,:,0], cmap = 'bone') if c_y == 1: c_ax.set_title('Pneumonia') else: c_ax.set_title('No Pneumonia') c_ax.axis('off') def load_pretrained_model(): """ model = VGG16(include_top=True, weights='imagenet') transfer_layer = model.get_layer('block5_pool') vgg_model = Model(inputs = model.input, outputs = transfer_layer.output) for layer in vgg_model.layers[0:17]: layer.trainable = False """ model = ResNet50V2(include_top=False, weights='imagenet') resnet_model = Model(inputs=model.input, outputs=model.output, name='Resnet') return resnet_model def build_my_model(): """ # my_model = Sequential() # ....add your pre-trained model, and then whatever additional layers you think you might # want for fine-tuning (Flatteen, Dense, Dropout, etc.) # if you want to compile your model within this function, consider which layers of your pre-trained model, # you want to freeze before you compile # also make sure you set your optimizer, loss function, and metrics to monitor # Todo my_model = Sequential() vgg_model = load_pretrained_model() # Add the convolutional part of the VGG16 model from above. my_model.add(vgg_model) # Flatten the output of the VGG16 model because it is from a # convolutional layer. my_model.add(Flatten()) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1024, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(512, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(256, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. my_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. my_model.add(Dense(1, activation='sigmoid')) """ resnet_model = load_pretrained_model() my_model = Sequential([resnet_model, BatchNormalization(), Conv2D(1024, 1, activation='relu'), Dropout(0.5), BatchNormalization(), Conv2D(256, 1, activation='relu'), Dropout(0.5), AveragePooling2D((7, 7)), BatchNormalization(), Conv2D(1, 1, activation='sigmoid'), Reshape((-1,))]) return my_model my_model = build_my_model() my_model.summary() weight_path = '{}_my_model.best.hdf5'.format('xray_class') checkpoint = ModelCheckpoint(weight_path, monitor='val_binary_accuracy', verbose=1, save_best_only=True, mode='auto', save_weights_only=True) early = EarlyStopping(monitor='val_binary_accuracy', mode='auto', patience=5) def scheduler(epoch, lr): if epoch < 1: return lr else: return lr * np.exp(-0.1) lr_scheduler = LearningRateScheduler(scheduler) callbacks_list = [checkpoint, early, lr_scheduler] from keras.models import model_from_json model_path = '/kaggle/input/model-and-weights/my_model2.json' weight_path = '/kaggle/input/model-and-weights/xray_class_my_model2.best.hdf5' json_file = open(model_path, 'r') loaded_model_json = json_file.read() json_file.close() my_model = model_from_json(loaded_model_json) my_model.load_weights(weight_path) optimizer = RMSprop(learning_rate=0.0001) loss = 'binary_crossentropy' metrics = ['binary_accuracy'] my_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) history = my_model.fit_generator(train_gen, validation_data=(valX, valY), epochs=10, callbacks=callbacks_list) weight_path = 'xray_class_my_model.best.hdf5' my_model.load_weights(weight_path) pred_Y = my_model.predict(valX, batch_size=100, verbose=True) def plot_auc(t_y, p_y): fpr, tpr, threshold = roc_curve(valY, pred_Y) roc_auc = auc(fpr, tpr) plt.xlim([0, 1]) plt.ylim([0, 1]) return def plot_prec_rec(val_Y, pred_Y): prec, rec, threshold = precision_recall_curve(val_Y, pred_Y) plt.xlim([0, 1]) plt.ylim([0, 1]) def plot_history(history): n = len(history.history['loss']) return plot_auc(valY, pred_Y) plot_prec_rec(valY, pred_Y) plot_history(history)
code