path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18111545/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status']) pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status']) pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status']) pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
code
18111545/cell_28
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status']) pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status']) pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status']) pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status']) pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender']) pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age']) cr['loan_status'].replace('PAIDOFF', 0, inplace=True) cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True) cr['loan_status'].replace('COLLECTION', 2, inplace=True) cr.sample(20) education_dummies = pd.get_dummies(cr.education, prefix='education') education_dummies.sample(4) education_dummies.drop(education_dummies.columns[0], axis=1, inplace=True) cr = pd.concat([cr, education_dummies], axis=1) cr.drop(cr.columns[9], axis=1, inplace=True) cr.sample(15) gender_dummies = pd.get_dummies(cr['Gender'], prefix='gender') gender_dummies.drop(gender_dummies.columns[0], axis=1, inplace=True) gender_dummies.head()
code
18111545/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10)
code
18111545/cell_15
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
code
18111545/cell_16
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status']) pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
code
18111545/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.head()
code
18111545/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status']) pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status']) pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
code
18111545/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
code
18111545/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) cr['loan_status'].replace('PAIDOFF', 0, inplace=True) cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True) cr['loan_status'].replace('COLLECTION', 2, inplace=True) cr.sample(20)
code
18111545/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) plt.figure(figsize=(10, 7)) sns.boxplot(data=cr, x='loan_status', y='age', hue='Gender', linewidth=2, order=['PAIDOFF', 'COLLECTION_PAIDOFF', 'COLLECTION']) plt.show()
code
18111545/cell_27
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date']) pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status']) pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status']) pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status']) pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status']) pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender']) pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age']) cr['loan_status'].replace('PAIDOFF', 0, inplace=True) cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True) cr['loan_status'].replace('COLLECTION', 2, inplace=True) cr.sample(20) education_dummies = pd.get_dummies(cr.education, prefix='education') education_dummies.sample(4) education_dummies.drop(education_dummies.columns[0], axis=1, inplace=True) cr = pd.concat([cr, education_dummies], axis=1) cr.drop(cr.columns[9], axis=1, inplace=True) cr.sample(15)
code
18111545/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True}) cr.fillna('0', axis=1, inplace=True) cr.sample(10) plt.figure(figsize=(60, 20)) sns.factorplot(data=cr, x='loan_status', y='age', hue='education', col='Gender', kind='box', order=['PAIDOFF', 'COLLECTION_PAIDOFF', 'COLLECTION'], aspect=1.5) plt.show()
code
128013563/cell_13
[ "image_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt import numpy as np blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints im_with_keypoints = cv.drawKeypoints( blob_gray, keypoints, np.array([]), (255,0,), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ) coun = plt.imread('../input/opencv-samples-images/data/blox.jpg') _, coun_thresh = cv.threshold(cv.cvtColor(coun, cv.COLOR_RGB2GRAY), 180, 255, cv.THRESH_BINARY) plt.imshow(coun_thresh) plt.show()
code
128013563/cell_9
[ "image_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt import numpy as np blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints im_with_keypoints = cv.drawKeypoints( blob_gray, keypoints, np.array([]), (255,0,), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ) plt.imshow(im_with_keypoints) plt.show()
code
128013563/cell_7
[ "image_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints
code
128013563/cell_15
[ "image_output_1.png" ]
import cv2 import cv2 as cv import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints im_with_keypoints = cv.drawKeypoints( blob_gray, keypoints, np.array([]), (255,0,), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ) coun = plt.imread('../input/opencv-samples-images/data/blox.jpg') _, coun_thresh = cv.threshold(cv.cvtColor(coun, cv.COLOR_RGB2GRAY), 180, 255, cv.THRESH_BINARY) contours, hierarchy = cv.findContours(image=coun_thresh, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE) image_copy = coun.copy() import cv2 import matplotlib.pyplot as plt img = cv2.imread('/kaggle/input/new-data-image/iron image.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY) plt.imshow(thresh, cmap='gray') plt.show()
code
128013563/cell_16
[ "image_output_1.png" ]
import cv2 import cv2 as cv import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints im_with_keypoints = cv.drawKeypoints( blob_gray, keypoints, np.array([]), (255,0,), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ) coun = plt.imread('../input/opencv-samples-images/data/blox.jpg') _, coun_thresh = cv.threshold(cv.cvtColor(coun, cv.COLOR_RGB2GRAY), 180, 255, cv.THRESH_BINARY) contours, hierarchy = cv.findContours(image=coun_thresh, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE) image_copy = coun.copy() import cv2 import matplotlib.pyplot as plt img = cv2.imread('/kaggle/input/new-data-image/iron image.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) img_copy = img.copy() cv2.drawContours(img_copy, contours, -1, (255, 0, 0), 10, cv2.LINE_AA) plt.imshow(cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)) plt.show()
code
128013563/cell_14
[ "text_plain_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt import numpy as np blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) detector = cv.SimpleBlobDetector_create() keypoints = detector.detect(blob_gray) keypoints im_with_keypoints = cv.drawKeypoints( blob_gray, keypoints, np.array([]), (255,0,), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ) coun = plt.imread('../input/opencv-samples-images/data/blox.jpg') _, coun_thresh = cv.threshold(cv.cvtColor(coun, cv.COLOR_RGB2GRAY), 180, 255, cv.THRESH_BINARY) contours, hierarchy = cv.findContours(image=coun_thresh, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE) image_copy = coun.copy() cv.drawContours(image=image_copy, contours=contours, contourIdx=-1, color=(255, 0, 0), thickness=2, lineType=cv.LINE_AA) plt.imshow(image_copy) plt.show()
code
128013563/cell_5
[ "image_output_1.png" ]
import cv2 as cv import matplotlib.pyplot as plt blob = plt.imread('../input/opencv-samples-images/blobs.jpg') blob_gray = cv.cvtColor(blob, cv.COLOR_RGB2GRAY) plt.imshow(blob_gray) plt.show()
code
88098930/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
!pip install python-pptx
code
88098930/cell_17
[ "text_plain_output_1.png" ]
import pptx prs = pptx.Presentation() title_slide_layout = prs.slide_layouts[0] slide = prs.slides.add_slide(title_slide_layout) slide.shapes.title.text = "This is 'slide.shapes.title.text'." slide.placeholders[1].text = "This is 'slide.placeholders[1].text'." prs.save('title.pptx') prs = Presentation('title.pptx') text = [] for slide in prs.slides: for shape in slide.shapes: print(shape.text)
code
122258415/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd df_2 = pd.read_csv('/kaggle/input/usa-housing/USA_Housing.csv') X = df_2[['Avg. Area Income']] y = df_2['Price'] df_2.head from sklearn.linear_model import LinearRegression df_2 = LinearRegression() df_2.fit(X_train, y_train) predictions = df_2.predict(X_test) import matplotlib.pyplot as plt plt.scatter(X_train, y_train, color='g') plt.plot(X_train, df_2.predict(X_train), color='k')
code
122258415/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df_2 = pd.read_csv('/kaggle/input/usa-housing/USA_Housing.csv') X = df_2[['Avg. Area Income']] y = df_2['Price'] df_2.head
code
122258415/cell_5
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd df_2 = pd.read_csv('/kaggle/input/usa-housing/USA_Housing.csv') X = df_2[['Avg. Area Income']] y = df_2['Price'] df_2.head from sklearn.linear_model import LinearRegression df_2 = LinearRegression() df_2.fit(X_train, y_train) predictions = df_2.predict(X_test) print('coefficient of determination:', df_2.score(X_train, y_train)) print('intercept:', df_2.intercept_) print('slope:', df_2.coef_)
code
34136040/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() data_cp = data_city_price.dropna() data_cp.shape import seaborn as sns fig_dims = (14, 10) fig, ax = plt.subplots(figsize=fig_dims) sns.barplot(x='city_name', y='price', hue='property_type', ax=ax, data=data_cp) ax.set_xticklabels(ax.get_xticklabels(), rotation=90) ax.set_xlabel('City') ax.set_ylabel('Prices') plt.show()
code
34136040/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() plt.title('Missing Values') plt.xlabel('Percentage') plt.show()
code
34136040/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes
code
34136040/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34136040/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.animation as animation import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() data_cp = data_city_price.dropna() data_cp.shape import seaborn as sns fig_dims = (14, 10) fig, ax = plt.subplots(figsize=fig_dims) sns.barplot(x='city_name', y='price', hue='property_type', ax=ax, data=data_cp) ax.set_xticklabels(ax.get_xticklabels(), rotation=90) ax.set_xlabel('City') ax.set_ylabel('Prices') plt.show() sydney = data[data['city_name'] == 'Sydney'][['date_sold', 'price', 'property_type']] sydney.shape sydney = sydney.dropna() sydney.shape sydney.sort_values('date_sold') import matplotlib.animation as animation Writer = animation.writers['ffmpeg'] writer = Writer(fps=20, metadata=dict(artist='akshita'), bitrate=1800) fig = plt.figure(figsize=(10, 6)) plt.xlim(2017, 2021) plt.ylim(np.min(sydney['price']), np.max(sydney['price'])) plt.xlabel('Year', fontsize=20) plt.ylabel('Prices', fontsize=20) plt.title('Prices per Year', fontsize=20) def animate(i): d = sydney.iloc[:int(i + 1)] p = sns.lineplot(x=data.index, y=data['price'], data=d) p.tick_params(labelsize=17) plt.setp(p.lines, linewidth=7) ani = animation.FuncAnimation(fig, animate, frames=60, repeat=True) plt.show()
code
34136040/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() sydney = data[data['city_name'] == 'Sydney'][['date_sold', 'price', 'property_type']] sydney.shape
code
34136040/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() sydney = data[data['city_name'] == 'Sydney'][['date_sold', 'price', 'property_type']] sydney.shape sydney = sydney.dropna() sydney.shape
code
34136040/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.head()
code
34136040/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() sydney = data[data['city_name'] == 'Sydney'][['date_sold', 'price', 'property_type']] sydney.shape sydney = sydney.dropna() sydney.shape sydney.sort_values('date_sold')
code
34136040/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/aus-real-estate-sales-march-2019-to-april-2020/aus-property-sales-sep2018-april2020.csv') data.dtypes data_city_price = data[['city_name', 'price', 'property_type']] import matplotlib.pyplot as plt per = data_city_price.isnull().sum() / data.shape[0] * 100 per.plot.barh() data_cp = data_city_price.dropna() data_cp.shape
code
90117868/cell_42
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) shape = 50 * 50 * 3 x_train = x_train.reshape(x_train.shape[0], shape) x_test = x_test.reshape(x_test.shape[0], shape) from imblearn.under_sampling import RandomUnderSampler underampler = RandomUnderSampler(sampling_strategy='auto') x_train_fit, y_train_fit = underampler.fit_resample(x_train, y_train) X_test_fit, Y_test_fit = underampler.fit_resample(x_test, y_test) x_train_ = x_train_fit.reshape(x_train_fit.shape[0], 50, 50, 3) x_test_ = X_test_fit.reshape(X_test_fit.shape[0], 50, 50, 3) batch_size = 32 image_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) test_data_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) train = image_gen.flow(x_train_, y_train_fit, shuffle=True, batch_size=batch_size) test = test_data_gen.flow(x_test_, Y_test_fit, shuffle=True, batch_size=batch_size) for train_img, train_label in train: print('image shape ', train_img.shape) print('label shape ', train_label.shape) break
code
90117868/cell_13
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import os labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] print('Total Images :', len(images)) print('Total Labels :', len(all_labels)) print('Total Pathes', len(pathes))
code
90117868/cell_25
[ "text_plain_output_1.png" ]
import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) print('Train Images shape is : ', x_train.shape) print('Train Labels shape is : ', y_train.shape)
code
90117868/cell_33
[ "text_plain_output_1.png" ]
import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) shape = 50 * 50 * 3 x_train = x_train.reshape(x_train.shape[0], shape) x_test = x_test.reshape(x_test.shape[0], shape) print('shape of new train data is :', x_train.shape) print('shape of new Test data is :', x_test.shape)
code
90117868/cell_44
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input cnn_model = Sequential() cnn_model.add(layers.Conv2D(64, (3, 3), padding='Same', activation='relu', input_shape=(50, 50, 3))) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.summary()
code
90117868/cell_26
[ "text_plain_output_1.png" ]
import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) print('Test Images shape is : ', x_test.shape) print('Test Labels shape is : ', y_test.shape)
code
90117868/cell_11
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import os image_path = '../input/breast-histopathology-images/' labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images, all_labels, pathes = load_images_from_directory(image_path)
code
90117868/cell_19
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import os import seaborn as sns labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] for i in range(30): plt.xticks([]) plt.yticks([]) plt.title(' Labels Visualization') sns.countplot(x=all_labels, palette='flare') plt.show()
code
90117868/cell_45
[ "text_plain_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input cnn_model = Sequential() cnn_model.add(layers.Conv2D(64, (3, 3), padding='Same', activation='relu', input_shape=(50, 50, 3))) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.summary() cnn_model.add(layers.Flatten()) cnn_model.add(layers.Dense(1024, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(1024, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(1, activation='sigmoid')) cnn_model.summary()
code
90117868/cell_49
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.callbacks import EarlyStopping,ReduceLROnPlateau from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) shape = 50 * 50 * 3 x_train = x_train.reshape(x_train.shape[0], shape) x_test = x_test.reshape(x_test.shape[0], shape) from imblearn.under_sampling import RandomUnderSampler underampler = RandomUnderSampler(sampling_strategy='auto') x_train_fit, y_train_fit = underampler.fit_resample(x_train, y_train) X_test_fit, Y_test_fit = underampler.fit_resample(x_test, y_test) x_train_ = x_train_fit.reshape(x_train_fit.shape[0], 50, 50, 3) x_test_ = X_test_fit.reshape(X_test_fit.shape[0], 50, 50, 3) batch_size = 32 image_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) test_data_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) train = image_gen.flow(x_train_, y_train_fit, shuffle=True, batch_size=batch_size) test = test_data_gen.flow(x_test_, Y_test_fit, shuffle=True, batch_size=batch_size) cnn_model = Sequential() cnn_model.add(layers.Conv2D(64, (3, 3), padding='Same', activation='relu', input_shape=(50, 50, 3))) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu')) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(BatchNormalization()) cnn_model.summary() cnn_model.add(layers.Flatten()) cnn_model.add(layers.Dense(1024, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(1024, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(1, activation='sigmoid')) cnn_model.summary() cnn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early = EarlyStopping(monitor='loss', mode='min', min_delta=0, patience=10, verbose=1, restore_best_weights=True) learning_rate_reduction = ReduceLROnPlateau(monitor='loss', patience=2, verbose=1, factor=0.3, min_lr=1e-06) callbacks_list = [early, learning_rate_reduction] n_training_samples = len(train) n_validation_samples = len(test) history = cnn_model.fit(train, epochs=150, validation_data=test, validation_steps=n_validation_samples // batch_size, shuffle=True, callbacks=callbacks_list)
code
90117868/cell_18
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import os labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] plt.figure(figsize=(15, 10)) plt.suptitle(' Images', fontsize=20) for i in range(30): plt.subplot(5, 6, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.xlabel(get_Label(all_labels[i])) plt.imshow(images[i], cmap=plt.cm.binary)
code
90117868/cell_28
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] for i in range(30): plt.xticks([]) plt.yticks([]) x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) plt.title('Train Labels Visualization') sns.countplot(x=y_train, palette='flare') plt.show()
code
90117868/cell_15
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import os import pandas as pd labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] df = pd.DataFrame({'image_path': pathes, 'label': all_labels}) df.shape
code
90117868/cell_16
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import os import pandas as pd labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] df = pd.DataFrame({'image_path': pathes, 'label': all_labels}) df.shape df['label'].value_counts()
code
90117868/cell_38
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) shape = 50 * 50 * 3 x_train = x_train.reshape(x_train.shape[0], shape) x_test = x_test.reshape(x_test.shape[0], shape) from imblearn.under_sampling import RandomUnderSampler underampler = RandomUnderSampler(sampling_strategy='auto') x_train_fit, y_train_fit = underampler.fit_resample(x_train, y_train) X_test_fit, Y_test_fit = underampler.fit_resample(x_test, y_test) x_train_ = x_train_fit.reshape(x_train_fit.shape[0], 50, 50, 3) x_test_ = X_test_fit.reshape(X_test_fit.shape[0], 50, 50, 3) print('Train data shape =', x_train_.shape) print(' Test data shape =', x_test_.shape)
code
90117868/cell_43
[ "text_plain_output_1.png" ]
from imblearn.under_sampling import RandomUnderSampler from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np seed = 42 np.random.seed = seed x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) shape = 50 * 50 * 3 x_train = x_train.reshape(x_train.shape[0], shape) x_test = x_test.reshape(x_test.shape[0], shape) from imblearn.under_sampling import RandomUnderSampler underampler = RandomUnderSampler(sampling_strategy='auto') x_train_fit, y_train_fit = underampler.fit_resample(x_train, y_train) X_test_fit, Y_test_fit = underampler.fit_resample(x_test, y_test) x_train_ = x_train_fit.reshape(x_train_fit.shape[0], 50, 50, 3) x_test_ = X_test_fit.reshape(X_test_fit.shape[0], 50, 50, 3) batch_size = 32 image_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) test_data_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.3, height_shift_range=0.2, width_shift_range=0.2, fill_mode='nearest', horizontal_flip=True, rotation_range=20) train = image_gen.flow(x_train_, y_train_fit, shuffle=True, batch_size=batch_size) test = test_data_gen.flow(x_test_, Y_test_fit, shuffle=True, batch_size=batch_size) for t_img, t_label in test: print('image shape ', t_img.shape) print('label shape ', t_label.shape) break
code
90117868/cell_14
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import cv2 import os import pandas as pd labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] df = pd.DataFrame({'image_path': pathes, 'label': all_labels}) df.head(5)
code
90117868/cell_27
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] for i in range(30): plt.xticks([]) plt.yticks([]) x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) plt.title('Test Labels Visualization') sns.countplot(x=y_test, palette='flare') plt.show()
code
90117868/cell_37
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] for i in range(30): plt.xticks([]) plt.yticks([]) x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) plt.title('Test Labels Visualization') sns.countplot(x=Y_test_fit, palette='flare') plt.show()
code
90117868/cell_36
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['0', '1'] def load_images_from_directory(main_dirictory): total_labels = [] images = [] pathes = [] total_normal = 0 total_infected = 0 folders = os.listdir(main_dirictory) for i, file in enumerate(folders): if file == 'IDC_regular_ps50_idx5': continue for lab in labels: full_path = main_dirictory + os.path.join(file, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (50, 50)) images.append(img) pathes.append(full_path + '/' + image) if lab == '0': label = 0 total_normal += 1 elif lab == '1': label = 1 total_infected += 1 total_labels.append(label) return shuffle(images, total_labels, pathes, random_state=756349782) def get_Label(number): labels = {0: 'Uninfected', 1: 'Infected'} return labels[number] images = images[0:150000] all_labels = all_labels[0:150000] pathes = pathes[0:150000] for i in range(30): plt.xticks([]) plt.yticks([]) x_train = np.asarray(x_train, np.float32) / 255 x_test = np.asarray(x_test, np.float32) / 255 y_train = np.asarray(y_train) y_test = np.asarray(y_test) plt.title('Train Labels Visualization') sns.countplot(x=y_train_fit, palette='flare') plt.show()
code
74050322/cell_6
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv') X = data.drop('MEDV', axis=1).values Y = data['MEDV'].values Room_number = X[:, 5] Room_number = Room_number.reshape(-1, 1) Y = Y.reshape(-1, 1) from sklearn.linear_model import LinearRegression regression = LinearRegression() regression.fit(Room_number, Y) regression_line = np.linspace(min(Room_number), max(Room_number)) plt.scatter(Room_number, Y, color='green') plt.xlabel('Average Room number') plt.ylabel('Average price (x1000 $)') plt.title('The relationship between the number of rooms and the price of the house') plt.plot(regression_line, regression.predict(regression_line), color='black', linewidth=3) plt.show()
code
74050322/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv') data.head()
code
74050322/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv') X = data.drop('MEDV', axis=1).values Y = data['MEDV'].values Room_number = X[:, 5] Room_number = Room_number.reshape(-1, 1) Y = Y.reshape(-1, 1) plt.scatter(Room_number, Y) plt.xlabel('Average Room number') plt.ylabel('Average price (x1000 $)') plt.title('The relationship between the number of rooms and the price of the house') plt.show()
code
18127820/cell_23
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Input from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.merge import concatenate from keras.layers.pooling import MaxPooling2D from keras.models import Model, load_model from keras.optimizers import Adam from skimage.io import imread from skimage.transform import resize from tqdm import tqdm import numpy as np import os import random TRAIN_PATH = '../input/train/' TEST_PATH = '../input/test/' seed = 42 random.seed = seed np.random.seed = seed tot_num = 5635 IMG_HEIGHT = 128 IMG_WIDTH = 128 files = os.listdir(TRAIN_PATH) masks_list = [] imgs_list = [] for f in files: if 'mask' in f: masks_list.append(f) else: imgs_list.append(f) masks_list = sorted(masks_list) imgs_list = sorted(imgs_list) X_train = np.zeros((tot_num, IMG_HEIGHT, IMG_WIDTH), dtype=np.float32) Y_train = np.zeros((tot_num, IMG_HEIGHT, IMG_WIDTH), dtype=np.float32) Y_train_one = [] X_train_one = [] Y_train_zero = [] X_train_zero = [] for i, file in tqdm(enumerate(imgs_list), total=len(imgs_list)): img_path = file mask_path = img_path[:-4] + '_mask.tif' mask = imread(TRAIN_PATH + mask_path) mask = resize(mask, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) img = imread(TRAIN_PATH + img_path) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) if mask.any() == False: Y_train_zero.append(mask) X_train_zero.append(img) else: Y_train_one.append(mask) X_train_one.append(img) X_train_one = np.array(X_train_one) Y_train_one = np.array(Y_train_one) X_train_zero = np.array(X_train_zero) Y_train_zero = np.array(Y_train_zero) X_train = [] Y_train = [] def augmentation(imgs, masks): for img, mask in zip(imgs, masks): img_lr = np.fliplr(img) mask_lr = np.fliplr(mask) img_up = np.flipud(img) mask_up = np.flipud(mask) X_train.append(img) Y_train.append(mask) X_train.append(img_lr) Y_train.append(mask_lr) X_train.append(img_up) Y_train.append(mask_up) augmentation(X_train_one, Y_train_one) X_train = np.array(X_train) Y_train = np.array(Y_train) X_train_ax = X_train[:, :, :, np.newaxis] / 255.0 Y_train_ax = Y_train[:, :, :, np.newaxis] / 255.0 def to_one(x): if x == 0: return 0 else: return 1 to_one = np.vectorize(to_one) Y_train_ax = to_one(Y_train_ax) smooth = 1.0 def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) inputs = Input((IMG_HEIGHT, IMG_WIDTH, 1)) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) model.compile(optimizer=Adam(lr=1e-05), loss=dice_coef_loss, metrics=[dice_coef]) results = model.fit(X_train_ax, Y_train_ax, validation_split=0.1, batch_size=8, epochs=18)
code
18127820/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
!pip3 install git+https://github.com/qubvel/segmentation_models from segmentation_models import Unet # model = Unet('densenet121',encorder_weights='imagenet',freeze_encorder=True)
code
18127820/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import sys import random import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm from keras.models import Model, load_model from keras.layers import Input from keras.layers.core import Dropout, Lambda from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras.callbacks import EarlyStopping, ModelCheckpoint from keras import backend as K from keras.optimizers import Adam import tensorflow as tf from skimage.io import imread from skimage.transform import resize
code
18127820/cell_5
[ "text_plain_output_1.png" ]
from skimage.io import imread from skimage.transform import resize from tqdm import tqdm import numpy as np import os import random TRAIN_PATH = '../input/train/' TEST_PATH = '../input/test/' seed = 42 random.seed = seed np.random.seed = seed tot_num = 5635 IMG_HEIGHT = 128 IMG_WIDTH = 128 files = os.listdir(TRAIN_PATH) masks_list = [] imgs_list = [] for f in files: if 'mask' in f: masks_list.append(f) else: imgs_list.append(f) masks_list = sorted(masks_list) imgs_list = sorted(imgs_list) Y_train_one = [] X_train_one = [] Y_train_zero = [] X_train_zero = [] for i, file in tqdm(enumerate(imgs_list), total=len(imgs_list)): img_path = file mask_path = img_path[:-4] + '_mask.tif' mask = imread(TRAIN_PATH + mask_path) mask = resize(mask, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) img = imread(TRAIN_PATH + img_path) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) if mask.any() == False: Y_train_zero.append(mask) X_train_zero.append(img) else: Y_train_one.append(mask) X_train_one.append(img)
code
2014936/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(oil.head()) print(oil.shape) print(oil.describe())
code
2014936/cell_9
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(holiday_events.head()) print(holiday_events.shape) print(holiday_events.describe())
code
2014936/cell_25
[ "text_plain_output_1.png" ]
import datetime import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = store_count.index, y = store_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('store_nbr',fontsize = 15) ax.tick_params(labelsize=15) item_count = pd.DataFrame() item_count['count'] = train_sample['item_nbr'].value_counts().sort_index() plt.plot(item_count.index)
code
2014936/cell_4
[ "text_html_output_1.png" ]
from subprocess import check_output from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2014936/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import datetime import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = store_count.index, y = store_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('store_nbr',fontsize = 15) ax.tick_params(labelsize=15) item_count = pd.DataFrame() item_count['count'] = train_sample['item_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = item_count.index, y = item_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('item_nbr',fontsize = 15) ax.tick_params(axis = 'x',which = 'both',top = 'off', bottom = 'off', labelbottom = 'off') neg_unit_sale = pd.DataFrame() neg_unit_sale['unit_sales'] = -train_sample[train_sample['unit_sales'] < 0]['unit_sales'] fig, ax = plt.subplots(figsize = (18, 5)) #ax.set_xscale('log') np.log(neg_unit_sale['unit_sales']).plot.hist(ax = ax, log = True,edgecolor = 'white', bins = 50) ax.set_xlabel('neg_unit_sales (log10 scale)', fontsize=15) ax.set_ylabel('count', fontsize=15) ax.tick_params(labelsize=15) city_count = pd.DataFrame() city_count['count'] = stores['city'].value_counts().sort_index() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 4)) g = sns.barplot(x=city_count.index, y=city_count['count'], ax=ax1) ax1.set_ylabel('count', fontsize=15) ax1.set_xlabel('city', fontsize=15) ax1.tick_params(labelsize=15) g.set_xticklabels(city_count.index, rotation=45) state_count = pd.DataFrame() state_count['count'] = stores['state'].value_counts().sort_index() g2 = sns.barplot(x=state_count.index, y=state_count['count'], ax=ax2) ax2.set_ylabel('count', fontsize=15) ax2.set_xlabel('state', fontsize=15) ax2.tick_params(labelsize=15) g2.set_xticklabels(state_count.index, rotation=45)
code
2014936/cell_20
[ "text_plain_output_1.png" ]
import datetime import pandas as pd train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') print(daily_sale.head(3)) unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales')
code
2014936/cell_6
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv')
code
2014936/cell_26
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import datetime import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = store_count.index, y = store_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('store_nbr',fontsize = 15) ax.tick_params(labelsize=15) item_count = pd.DataFrame() item_count['count'] = train_sample['item_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize=(18, 3)) sns.barplot(x=item_count.index, y=item_count['count'], ax=ax) ax.set_ylabel('count', fontsize=15) ax.set_xlabel('item_nbr', fontsize=15) ax.tick_params(axis='x', which='both', top='off', bottom='off', labelbottom='off')
code
2014936/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(len(items['family'].value_counts().index)) print(len(items['class'].value_counts().index)) print(items['perishable'].value_counts())
code
2014936/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) train_sample.head()
code
2014936/cell_28
[ "image_output_1.png" ]
import datetime import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = store_count.index, y = store_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('store_nbr',fontsize = 15) ax.tick_params(labelsize=15) item_count = pd.DataFrame() item_count['count'] = train_sample['item_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = item_count.index, y = item_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('item_nbr',fontsize = 15) ax.tick_params(axis = 'x',which = 'both',top = 'off', bottom = 'off', labelbottom = 'off') neg_unit_sale = pd.DataFrame() neg_unit_sale['unit_sales'] = -train_sample[train_sample['unit_sales'] < 0]['unit_sales'] fig, ax = plt.subplots(figsize=(18, 5)) np.log(neg_unit_sale['unit_sales']).plot.hist(ax=ax, log=True, edgecolor='white', bins=50) ax.set_xlabel('neg_unit_sales (log10 scale)', fontsize=15) ax.set_ylabel('count', fontsize=15) ax.tick_params(labelsize=15)
code
2014936/cell_15
[ "text_plain_output_1.png" ]
import datetime import pandas as pd train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) train_sample['onpromotion'].fillna(value='missing', inplace=True) print(train_sample['onpromotion'].value_counts())
code
2014936/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(test.head()) print(test.shape) print(test.describe()) print(test.isnull().sum())
code
2014936/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(test['onpromotion'].value_counts())
code
2014936/cell_24
[ "text_plain_output_1.png" ]
import datetime import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize=(18, 3)) sns.barplot(x=store_count.index, y=store_count['count'], ax=ax) ax.set_ylabel('count', fontsize=15) ax.set_xlabel('store_nbr', fontsize=15) ax.tick_params(labelsize=15)
code
2014936/cell_14
[ "text_plain_output_1.png" ]
import datetime import pandas as pd train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) print(train_sample.head()) print(train_sample.shape) print(train_sample.describe()) print(train_sample.isnull().sum())
code
2014936/cell_22
[ "text_plain_output_1.png" ]
import datetime import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x=promotion_count.index, y=promotion_count['count'], ax=ax2) ax2.set_ylabel('count', fontsize=15) ax2.set_xlabel('onpromotion', fontsize=15) ax2.tick_params(labelsize=15) ax2.ticklabel_format(style='sci', scilimits=(0, 0), axis='y') plt.subplot(1, 3, 3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize=15) plt.xlabel('positive_unit_sales', fontsize=15) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.show()
code
2014936/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') print(items.head()) print(items.shape) print(items.describe())
code
2014936/cell_27
[ "image_output_1.png" ]
import datetime import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') train_sample = train.sample(frac=0.05) holiday_events = pd.read_csv('../input/holidays_events.csv') items = pd.read_csv('../input/items.csv') oil = pd.read_csv('../input/oil.csv') stores = pd.read_csv('../input/stores.csv') test = pd.read_csv('../input/test.csv') transactions = pd.read_csv('../input/transactions.csv') import datetime train_sample['date'] = train_sample['date'].apply(datetime.datetime.strptime, args=('%Y-%m-%d',)) train_sample.sort_values(by='date', inplace=True) daily_sale = pd.DataFrame() daily_sale['count'] = train_sample['date'].value_counts() daily_sale['date'] = train_sample['date'].value_counts().index daily_sale = daily_sale.sort_values(by='date') unit_sale = pd.DataFrame() unit_sale['count'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts() unit_sale['positive_unit_sales'] = train_sample[train['unit_sales'] > 0]['unit_sales'].value_counts().index unit_sale = unit_sale.sort_values(by='positive_unit_sales') promotion_count = pd.DataFrame() promotion_count['count'] = train_sample['onpromotion'].value_counts() fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (18,5)) sns.set_style('darkgrid') ax1.plot(daily_sale['date'], daily_sale['count']) ax1.set_xlabel('date', fontsize=15) ax1.set_ylabel('count', fontsize=15) ax1.tick_params(labelsize=15) sns.barplot(x = promotion_count.index, y = promotion_count['count'],ax = ax2) ax2.set_ylabel('count', fontsize = 15) ax2.set_xlabel ('onpromotion',fontsize =15) ax2.tick_params(labelsize = 15) ax2.ticklabel_format(style = 'sci',scilimits = (0,0), axis = 'y') plt.subplot(1,3,3) plt.loglog(unit_sale['positive_unit_sales'], unit_sale['count']) plt.ylabel('count', fontsize = 15) plt.xlabel('positive_unit_sales', fontsize = 15) plt.xticks(fontsize = 15) plt.yticks(fontsize = 15) plt.show() store_count = pd.DataFrame() store_count['count'] = train_sample['store_nbr'].value_counts().sort_index() fig, ax = plt.subplots(figsize = (18, 3)) sns.barplot(x = store_count.index, y = store_count['count'], ax = ax) ax.set_ylabel('count', fontsize = 15) ax.set_xlabel('store_nbr',fontsize = 15) ax.tick_params(labelsize=15) item_count = pd.DataFrame() item_count['count'] = train_sample['item_nbr'].value_counts().sort_index() neg_unit_sale = pd.DataFrame() neg_unit_sale['unit_sales'] = -train_sample[train_sample['unit_sales'] < 0]['unit_sales'] neg_unit_sale.head()
code
129012165/cell_21
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix,classification_report import itertools import keras import matplotlib.image as img import matplotlib.pyplot as plt import numpy as np import pathlib path = pathlib.Path('/kaggle/input/5-flower-types-classification-dataset/flower_images') lilly = list(path.glob('Lilly/*'))[:1000] lotus = list(path.glob('Lotus/*'))[:1000] orchid = list(path.glob('Orchid/*'))[:1000] sunflower = list(path.glob('Sunflower/*'))[:1000] tulip = list(path.glob('Tulip/*'))[:1000] data = {'lilly': lilly, 'lotus': lotus, 'orchid': orchid, 'sunflower': sunflower, 'tulip': tulip} flower_labels = {k: v for k, v in enumerate(data.keys())} fig, ax = plt.subplots(5,5, figsize=(30,25)) fig.suptitle('Flower Category',color='magenta',fontsize=40) for i in range(5): for j in range(5): image = img.imread(data.get(flower_labels.get(i))[j]) ax[i, j].imshow(image) ax[i, j].set_title(flower_labels.get(i)) train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') def plot(c): pass cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)) y_pred = cnn.predict(test_data) main_y_pred = np.argmax(y_pred, axis=1) test_steps_per_epoch = np.math.ceil(test_data.samples / test_data.batch_size) predictions = cnn.predict_generator(test_data, steps=test_steps_per_epoch) predicted_classes = np.argmax(predictions, axis=1) true_classes = test_data.classes class_labels = list(test_data.class_indices.keys()) report = classification_report(true_classes, predicted_classes, target_names=class_labels) cm = confusion_matrix(test_data.classes, predicted_classes) d1 = test_data.class_indices classes = list(d1.keys()) cmap = plt.cm.YlGnBu plt.figure(figsize=(6, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title('Confusion Matrix') plt.colorbar(shrink=True) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='aqua' if cm[i, j] > thresh else 'red') plt.tight_layout() plt.ylabel('True Label') plt.xlabel('Predicted Label') cm
code
129012165/cell_20
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix,classification_report import keras import numpy as np train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)) y_pred = cnn.predict(test_data) main_y_pred = np.argmax(y_pred, axis=1) test_steps_per_epoch = np.math.ceil(test_data.samples / test_data.batch_size) predictions = cnn.predict_generator(test_data, steps=test_steps_per_epoch) predicted_classes = np.argmax(predictions, axis=1) true_classes = test_data.classes class_labels = list(test_data.class_indices.keys()) report = classification_report(true_classes, predicted_classes, target_names=class_labels) print(report)
code
129012165/cell_18
[ "image_output_2.png", "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import keras import numpy as np train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)) y_pred = cnn.predict(test_data) main_y_pred = np.argmax(y_pred, axis=1)
code
129012165/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.image as img import matplotlib.pyplot as plt import pathlib path = pathlib.Path('/kaggle/input/5-flower-types-classification-dataset/flower_images') lilly = list(path.glob('Lilly/*'))[:1000] lotus = list(path.glob('Lotus/*'))[:1000] orchid = list(path.glob('Orchid/*'))[:1000] sunflower = list(path.glob('Sunflower/*'))[:1000] tulip = list(path.glob('Tulip/*'))[:1000] data = {'lilly': lilly, 'lotus': lotus, 'orchid': orchid, 'sunflower': sunflower, 'tulip': tulip} flower_labels = {k: v for k, v in enumerate(data.keys())} fig, ax = plt.subplots(5, 5, figsize=(30, 25)) fig.suptitle('Flower Category', color='magenta', fontsize=40) for i in range(5): for j in range(5): image = img.imread(data.get(flower_labels.get(i))[j]) ax[i, j].imshow(image) ax[i, j].set_title(flower_labels.get(i))
code
129012165/cell_16
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import keras train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10))
code
129012165/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator import keras import matplotlib.image as img import matplotlib.pyplot as plt import pathlib path = pathlib.Path('/kaggle/input/5-flower-types-classification-dataset/flower_images') lilly = list(path.glob('Lilly/*'))[:1000] lotus = list(path.glob('Lotus/*'))[:1000] orchid = list(path.glob('Orchid/*'))[:1000] sunflower = list(path.glob('Sunflower/*'))[:1000] tulip = list(path.glob('Tulip/*'))[:1000] data = {'lilly': lilly, 'lotus': lotus, 'orchid': orchid, 'sunflower': sunflower, 'tulip': tulip} flower_labels = {k: v for k, v in enumerate(data.keys())} fig, ax = plt.subplots(5,5, figsize=(30,25)) fig.suptitle('Flower Category',color='magenta',fontsize=40) for i in range(5): for j in range(5): image = img.imread(data.get(flower_labels.get(i))[j]) ax[i, j].imshow(image) ax[i, j].set_title(flower_labels.get(i)) train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') def plot(c): pass cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)) plot(cnn)
code
129012165/cell_14
[ "text_plain_output_1.png" ]
import keras cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary()
code
129012165/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix,classification_report import itertools import keras import matplotlib.image as img import matplotlib.pyplot as plt import numpy as np import pathlib path = pathlib.Path('/kaggle/input/5-flower-types-classification-dataset/flower_images') lilly = list(path.glob('Lilly/*'))[:1000] lotus = list(path.glob('Lotus/*'))[:1000] orchid = list(path.glob('Orchid/*'))[:1000] sunflower = list(path.glob('Sunflower/*'))[:1000] tulip = list(path.glob('Tulip/*'))[:1000] data = {'lilly': lilly, 'lotus': lotus, 'orchid': orchid, 'sunflower': sunflower, 'tulip': tulip} flower_labels = {k: v for k, v in enumerate(data.keys())} fig, ax = plt.subplots(5,5, figsize=(30,25)) fig.suptitle('Flower Category',color='magenta',fontsize=40) for i in range(5): for j in range(5): image = img.imread(data.get(flower_labels.get(i))[j]) ax[i, j].imshow(image) ax[i, j].set_title(flower_labels.get(i)) train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation') def plot(c): pass cnn = keras.models.Sequential() cnn.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu', input_shape=(224, 224, 3))) cnn.add(keras.layers.MaxPool2D(pool_size=2, strides=2)) cnn.add(keras.layers.Flatten()) cnn.add(keras.layers.Dense(45, activation='relu')) cnn.add(keras.layers.Dense(15, activation='relu')) cnn.add(keras.layers.Dropout(rate=0.1, seed=100)) cnn.add(keras.layers.Dense(units=5, activation='sigmoid')) cnn.summary() cnn.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') cnn.fit(train_data, epochs=20, validation_data=test_data, shuffle=True, callbacks=keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)) y_pred = cnn.predict(test_data) main_y_pred = np.argmax(y_pred, axis=1) test_steps_per_epoch = np.math.ceil(test_data.samples / test_data.batch_size) predictions = cnn.predict_generator(test_data, steps=test_steps_per_epoch) predicted_classes = np.argmax(predictions, axis=1) true_classes = test_data.classes class_labels = list(test_data.class_indices.keys()) report = classification_report(true_classes, predicted_classes, target_names=class_labels) cm = confusion_matrix(test_data.classes, predicted_classes) d1 = test_data.class_indices classes = list(d1.keys()) cmap = plt.cm.YlGnBu plt.colorbar(shrink=True) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='aqua' if cm[i, j] > thresh else 'red') plt.tight_layout() cm train_score = cnn2.evaluate(train_data, verbose=1) test_score = cnn2.evaluate(test_data, verbose=1) print('Train Loss: ', train_score[0]) print('Train Accuracy: ', train_score[1]) print('*****************************') print('Test Loss: ', test_score[0]) print('Test Accuracy: ', test_score[1])
code
129012165/cell_10
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_gen = ImageDataGenerator(rotation_range=10, rescale=1.0 / 255, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1, shear_range=0.1, brightness_range=[0.8, 1.2], fill_mode='nearest', validation_split=0.2) train_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=32, class_mode='categorical', shuffle=True, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/5-flower-types-classification-dataset/flower_images', target_size=(224, 224), batch_size=1, shuffle=False, subset='validation')
code
32068475/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error, mean_squared_log_error import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1))[:10] train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[test.Date > train.Date.max()].copy() train = pd.concat((train, publictest), sort=False) train.sort_values(['Country_Region', 'Province_State', 'Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region', 'Province_State', 'Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0) | train.ForecastId.notnull() | (train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('../input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region', 'Province_State', 'Date', 'Type', 'Reference'] dt = dt.loc[dt.Date == dt.Date] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid', 'Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df = df.loc[df.day >= valid_day - 50].copy() df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag0_8'] = df.groupby('cid')['target0'].shift(8) df['lag0_8'] = df.groupby('cid')['lag0_8'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20).mean().values df['m9'] = df.groupby('cid')['lag0_1'].rolling(25).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['m9'] = df.groupby('cid')['m9'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1 * (df['Country_Region'] == 'China') df['flag_US'] = 1 * (df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1 * (df['cid'] == 'Kosovo_') df['flag_Korea'] = 1 * (df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1 * (df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1 * (df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1 * (df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1 * (df['cid'] == 'Ghana_') df['flag_Togo_'] = 1 * (df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1 * (df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1 * (df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1 * (df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1 * (df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1 * (df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1 * (df['cid'] == 'Honduras_') df['flag_Bangladesh'] = 1 * (df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1 * (df['cid'] == 'Paraguay_') tr = df.loc[df.day < valid_day].copy() vl = df.loc[df.day == valid_day].copy() tr = tr.loc[tr.lag0_1 > 0].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max').reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max').reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid', how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid', how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return (tr, vl) def train_models(self, valid_day=10): train = self.train.copy() train.loc[(train.cid == 'China_Guizhou') & (train.Date == '2020-03-17'), 'target0'] = np.log1p(146) train.loc[(train.cid == 'Guyana_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(12) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-29') & (train.Date <= '2020-03-29'), 'target0'] = np.log1p(24) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-30') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(27) train.loc[(train.cid == 'Iceland_') & (train.Date >= '2020-03-15') & (train.Date <= '2020-03-15'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Kazakhstan_') & (train.Date >= '2020-03-20') & (train.Date <= '2020-03-20'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-26') & (train.Date <= '2020-03-26'), 'target1'] = np.log1p(5) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-27') & (train.Date <= '2020-03-27'), 'target1'] = np.log1p(6) train.loc[(train.cid == 'Slovakia_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) train.loc[(train.cid == 'US_Hawaii') & (train.Date >= '2020-03-25') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) param = {'subsample': 1.0, 'colsample_bytree': 0.85, 'max_depth': 5, 'gamma': 0.0, 'learning_rate': 0.01, 'min_child_weight': 6.0, 'reg_alpha': 0.0, 'reg_lambda': 0.4, 'silent': 1, 'objective': 'reg:squarederror', 'nthread': 12, 'seed': self.seed} tr, vl = self.create_features(train.copy(), valid_day) features = [f for f in tr.columns if f not in ['lag0_8', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features0 = features features = [f for f in tr.columns if f not in ['m0', 'm1', 'm2', 'm3', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features1 = features nrounds0 = 680 nrounds1 = 630 dtrain = xgb.DMatrix(tr[self.features0], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed + 1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[self.features1], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed + 1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(self.vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'ypred0'] = self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'log0_max'] self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'ypred1'] = self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'log1_max'] VALID = self.vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo', 'day'] self.train['ypred0'] = pd.merge(self.train[feats], yesterday[feats + ['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[self.train.ypred0.notnull(), 'target0'] = self.train.loc[self.train.ypred0.notnull(), 'ypred0'] self.train['ypred1'] = pd.merge(self.train[feats], yesterday[feats + ['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[self.train.ypred1.notnull(), 'target1'] = self.train.loc[self.train.ypred1.notnull(), 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy(), self.day) dvalid = xgb.DMatrix(vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[vl.ypred0 < vl.log0_max, 'ypred0'] = vl.loc[vl.ypred0 < vl.log0_max, 'log0_max'] vl.loc[vl.ypred1 < vl.log1_max, 'ypred1'] = vl.loc[vl.ypred1 < vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) TARGETS = ['ConfirmedCases', 'Fatalities'] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') def preprocess(df): for col in ['Country_Region', 'Province_State']: df[col].fillna('', inplace=True) df['Date'] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x) for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df['day'] -= df['day'].min() df['day'] -= df['day'].min() TEST_FIRST = sub_df[sub_df['Date'] > df['Date'].max()]['Date'].min() TEST_DAYS = (sub_df['Date'].max() - TEST_FIRST).days + 1 TEST_FIRST = (TEST_FIRST - df['Date'].min()).days def get_blend(pred_dfs, weights, verbose=True): blend_df = pred_dfs['giba1'].copy() blend_df[TARGETS] = 0 for name, pred_df in pred_dfs.items(): blend_df[TARGETS] += weights[name] * pred_df[TARGETS].values return blend_df cov_models = {'ahmet': CovidModelAhmet(), 'giba1': CovidModelGIBA(lag=1), 'giba2': CovidModelGIBA(lag=2)} weights = {'ahmet': 0.45, 'giba1': 0.275, 'giba2': 0.275} pred_dfs = {name: cm.predict_first_day(TEST_FIRST).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = blend_df.copy() for d in range(1, TEST_DAYS): pred_dfs = {name: cm.predict_next_day(blend_df).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = eval_df.append(blend_df) sub_df = sub_df.merge(df.append(eval_df, sort=False), on=['geo', 'day'], how='left') sub_df.head()
code
32068475/cell_6
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error, mean_squared_log_error import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1))[:10] train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[test.Date > train.Date.max()].copy() train = pd.concat((train, publictest), sort=False) train.sort_values(['Country_Region', 'Province_State', 'Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region', 'Province_State', 'Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0) | train.ForecastId.notnull() | (train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('../input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region', 'Province_State', 'Date', 'Type', 'Reference'] dt = dt.loc[dt.Date == dt.Date] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid', 'Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df = df.loc[df.day >= valid_day - 50].copy() df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag0_8'] = df.groupby('cid')['target0'].shift(8) df['lag0_8'] = df.groupby('cid')['lag0_8'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20).mean().values df['m9'] = df.groupby('cid')['lag0_1'].rolling(25).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['m9'] = df.groupby('cid')['m9'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1 * (df['Country_Region'] == 'China') df['flag_US'] = 1 * (df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1 * (df['cid'] == 'Kosovo_') df['flag_Korea'] = 1 * (df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1 * (df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1 * (df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1 * (df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1 * (df['cid'] == 'Ghana_') df['flag_Togo_'] = 1 * (df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1 * (df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1 * (df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1 * (df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1 * (df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1 * (df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1 * (df['cid'] == 'Honduras_') df['flag_Bangladesh'] = 1 * (df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1 * (df['cid'] == 'Paraguay_') tr = df.loc[df.day < valid_day].copy() vl = df.loc[df.day == valid_day].copy() tr = tr.loc[tr.lag0_1 > 0].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max').reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max').reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid', how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid', how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return (tr, vl) def train_models(self, valid_day=10): train = self.train.copy() train.loc[(train.cid == 'China_Guizhou') & (train.Date == '2020-03-17'), 'target0'] = np.log1p(146) train.loc[(train.cid == 'Guyana_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(12) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-29') & (train.Date <= '2020-03-29'), 'target0'] = np.log1p(24) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-30') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(27) train.loc[(train.cid == 'Iceland_') & (train.Date >= '2020-03-15') & (train.Date <= '2020-03-15'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Kazakhstan_') & (train.Date >= '2020-03-20') & (train.Date <= '2020-03-20'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-26') & (train.Date <= '2020-03-26'), 'target1'] = np.log1p(5) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-27') & (train.Date <= '2020-03-27'), 'target1'] = np.log1p(6) train.loc[(train.cid == 'Slovakia_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) train.loc[(train.cid == 'US_Hawaii') & (train.Date >= '2020-03-25') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) param = {'subsample': 1.0, 'colsample_bytree': 0.85, 'max_depth': 5, 'gamma': 0.0, 'learning_rate': 0.01, 'min_child_weight': 6.0, 'reg_alpha': 0.0, 'reg_lambda': 0.4, 'silent': 1, 'objective': 'reg:squarederror', 'nthread': 12, 'seed': self.seed} tr, vl = self.create_features(train.copy(), valid_day) features = [f for f in tr.columns if f not in ['lag0_8', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features0 = features features = [f for f in tr.columns if f not in ['m0', 'm1', 'm2', 'm3', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features1 = features nrounds0 = 680 nrounds1 = 630 dtrain = xgb.DMatrix(tr[self.features0], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed + 1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[self.features1], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed + 1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(self.vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'ypred0'] = self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'log0_max'] self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'ypred1'] = self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'log1_max'] VALID = self.vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo', 'day'] self.train['ypred0'] = pd.merge(self.train[feats], yesterday[feats + ['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[self.train.ypred0.notnull(), 'target0'] = self.train.loc[self.train.ypred0.notnull(), 'ypred0'] self.train['ypred1'] = pd.merge(self.train[feats], yesterday[feats + ['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[self.train.ypred1.notnull(), 'target1'] = self.train.loc[self.train.ypred1.notnull(), 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy(), self.day) dvalid = xgb.DMatrix(vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[vl.ypred0 < vl.log0_max, 'ypred0'] = vl.loc[vl.ypred0 < vl.log0_max, 'log0_max'] vl.loc[vl.ypred1 < vl.log1_max, 'ypred1'] = vl.loc[vl.ypred1 < vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) TARGETS = ['ConfirmedCases', 'Fatalities'] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') def preprocess(df): for col in ['Country_Region', 'Province_State']: df[col].fillna('', inplace=True) df['Date'] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x) for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df['day'] -= df['day'].min() df['day'] -= df['day'].min() TEST_FIRST = sub_df[sub_df['Date'] > df['Date'].max()]['Date'].min() print(TEST_FIRST) TEST_DAYS = (sub_df['Date'].max() - TEST_FIRST).days + 1 TEST_FIRST = (TEST_FIRST - df['Date'].min()).days print(TEST_FIRST, TEST_DAYS) def get_blend(pred_dfs, weights, verbose=True): if verbose: for n1, n2 in [('giba1', 'ahmet'), ('giba2', 'ahmet')]: print(n1, n2, np.round(rmse(pred_dfs[n1][TARGETS[0]], pred_dfs[n2][TARGETS[0]]), 4), np.round(rmse(pred_dfs[n1][TARGETS[1]], pred_dfs[n2][TARGETS[1]]), 4)) blend_df = pred_dfs['giba1'].copy() blend_df[TARGETS] = 0 for name, pred_df in pred_dfs.items(): blend_df[TARGETS] += weights[name] * pred_df[TARGETS].values return blend_df cov_models = {'ahmet': CovidModelAhmet(), 'giba1': CovidModelGIBA(lag=1), 'giba2': CovidModelGIBA(lag=2)} weights = {'ahmet': 0.45, 'giba1': 0.275, 'giba2': 0.275} pred_dfs = {name: cm.predict_first_day(TEST_FIRST).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = blend_df.copy() for d in range(1, TEST_DAYS): pred_dfs = {name: cm.predict_next_day(blend_df).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = eval_df.append(blend_df) print(d, eval_df.shape, flush=True)
code
32068475/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068475/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error, mean_squared_log_error import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1))[:10] train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[test.Date > train.Date.max()].copy() train = pd.concat((train, publictest), sort=False) train.sort_values(['Country_Region', 'Province_State', 'Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region', 'Province_State', 'Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0) | train.ForecastId.notnull() | (train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('../input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region', 'Province_State', 'Date', 'Type', 'Reference'] dt = dt.loc[dt.Date == dt.Date] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid', 'Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df = df.loc[df.day >= valid_day - 50].copy() df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag0_8'] = df.groupby('cid')['target0'].shift(8) df['lag0_8'] = df.groupby('cid')['lag0_8'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20).mean().values df['m9'] = df.groupby('cid')['lag0_1'].rolling(25).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['m9'] = df.groupby('cid')['m9'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1 * (df['Country_Region'] == 'China') df['flag_US'] = 1 * (df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1 * (df['cid'] == 'Kosovo_') df['flag_Korea'] = 1 * (df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1 * (df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1 * (df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1 * (df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1 * (df['cid'] == 'Ghana_') df['flag_Togo_'] = 1 * (df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1 * (df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1 * (df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1 * (df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1 * (df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1 * (df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1 * (df['cid'] == 'Honduras_') df['flag_Bangladesh'] = 1 * (df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1 * (df['cid'] == 'Paraguay_') tr = df.loc[df.day < valid_day].copy() vl = df.loc[df.day == valid_day].copy() tr = tr.loc[tr.lag0_1 > 0].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max').reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max').reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid', how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid', how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return (tr, vl) def train_models(self, valid_day=10): train = self.train.copy() train.loc[(train.cid == 'China_Guizhou') & (train.Date == '2020-03-17'), 'target0'] = np.log1p(146) train.loc[(train.cid == 'Guyana_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(12) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-29') & (train.Date <= '2020-03-29'), 'target0'] = np.log1p(24) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-30') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(27) train.loc[(train.cid == 'Iceland_') & (train.Date >= '2020-03-15') & (train.Date <= '2020-03-15'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Kazakhstan_') & (train.Date >= '2020-03-20') & (train.Date <= '2020-03-20'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-26') & (train.Date <= '2020-03-26'), 'target1'] = np.log1p(5) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-27') & (train.Date <= '2020-03-27'), 'target1'] = np.log1p(6) train.loc[(train.cid == 'Slovakia_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) train.loc[(train.cid == 'US_Hawaii') & (train.Date >= '2020-03-25') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) param = {'subsample': 1.0, 'colsample_bytree': 0.85, 'max_depth': 5, 'gamma': 0.0, 'learning_rate': 0.01, 'min_child_weight': 6.0, 'reg_alpha': 0.0, 'reg_lambda': 0.4, 'silent': 1, 'objective': 'reg:squarederror', 'nthread': 12, 'seed': self.seed} tr, vl = self.create_features(train.copy(), valid_day) features = [f for f in tr.columns if f not in ['lag0_8', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features0 = features features = [f for f in tr.columns if f not in ['m0', 'm1', 'm2', 'm3', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features1 = features nrounds0 = 680 nrounds1 = 630 dtrain = xgb.DMatrix(tr[self.features0], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed + 1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[self.features1], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed + 1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(self.vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'ypred0'] = self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'log0_max'] self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'ypred1'] = self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'log1_max'] VALID = self.vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo', 'day'] self.train['ypred0'] = pd.merge(self.train[feats], yesterday[feats + ['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[self.train.ypred0.notnull(), 'target0'] = self.train.loc[self.train.ypred0.notnull(), 'ypred0'] self.train['ypred1'] = pd.merge(self.train[feats], yesterday[feats + ['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[self.train.ypred1.notnull(), 'target1'] = self.train.loc[self.train.ypred1.notnull(), 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy(), self.day) dvalid = xgb.DMatrix(vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[vl.ypred0 < vl.log0_max, 'ypred0'] = vl.loc[vl.ypred0 < vl.log0_max, 'log0_max'] vl.loc[vl.ypred1 < vl.log1_max, 'ypred1'] = vl.loc[vl.ypred1 < vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) TARGETS = ['ConfirmedCases', 'Fatalities'] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') def preprocess(df): for col in ['Country_Region', 'Province_State']: df[col].fillna('', inplace=True) df['Date'] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x) for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df['day'] -= df['day'].min() df['day'] -= df['day'].min() TEST_FIRST = sub_df[sub_df['Date'] > df['Date'].max()]['Date'].min() TEST_DAYS = (sub_df['Date'].max() - TEST_FIRST).days + 1 TEST_FIRST = (TEST_FIRST - df['Date'].min()).days def get_blend(pred_dfs, weights, verbose=True): blend_df = pred_dfs['giba1'].copy() blend_df[TARGETS] = 0 for name, pred_df in pred_dfs.items(): blend_df[TARGETS] += weights[name] * pred_df[TARGETS].values return blend_df cov_models = {'ahmet': CovidModelAhmet(), 'giba1': CovidModelGIBA(lag=1), 'giba2': CovidModelGIBA(lag=2)} weights = {'ahmet': 0.45, 'giba1': 0.275, 'giba2': 0.275} pred_dfs = {name: cm.predict_first_day(TEST_FIRST).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = blend_df.copy() for d in range(1, TEST_DAYS): pred_dfs = {name: cm.predict_next_day(blend_df).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = eval_df.append(blend_df) eval_df.head()
code
32068475/cell_8
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error, mean_squared_log_error import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb class CovidModel: def __init__(self): pass def predict_first_day(self, date): return None def predict_next_day(self, yesterday_pred_df): return None class CovidModelGIBA(CovidModel): def __init__(self, lag=1, seed=1): self.lag = lag self.seed = seed train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') train['Date'] = pd.to_datetime(train['Date']) self.maxdate = str(train['Date'].max())[:10] self.testdate = str(train['Date'].max() + pd.Timedelta(days=1))[:10] train['Province_State'].fillna('', inplace=True) train['day'] = train.Date.dt.dayofyear self.day_min = train['day'].min() train['day'] -= self.day_min train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') test['Date'] = pd.to_datetime(test['Date']) test['Province_State'].fillna('', inplace=True) test['day'] = test.Date.dt.dayofyear test['day'] -= self.day_min test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 self.trainmaxday = train['day'].max() self.testday1 = train['day'].max() + 1 self.testdayN = test['day'].max() publictest = test.loc[test.Date > train.Date.max()].copy() train = pd.concat((train, publictest), sort=False) train.sort_values(['Country_Region', 'Province_State', 'Date'], inplace=True) train = train.reset_index(drop=True) train['ForecastId'] = pd.merge(train, test, on=['Country_Region', 'Province_State', 'Date'], how='left')['ForecastId_y'].values train['cid'] = train['Country_Region'] + '_' + train['Province_State'] train['log0'] = np.log1p(train['ConfirmedCases']) train['log1'] = np.log1p(train['Fatalities']) train = train.loc[(train.log0 > 0) | train.ForecastId.notnull() | (train.Date >= '2020-03-17')].copy() train = train.reset_index(drop=True) train['days_since_1case'] = train.groupby('cid')['Id'].cumcount() dt = pd.read_csv('../input/covid19-lockdown-dates-by-country/countryLockdowndates.csv') dt.columns = ['Country_Region', 'Province_State', 'Date', 'Type', 'Reference'] dt = dt.loc[dt.Date == dt.Date] dt['Province_State'] = dt['Province_State'].fillna('') dt['Date'] = pd.to_datetime(dt['Date']) dt['Date'] = dt['Date'] + pd.Timedelta(days=8) dt['Type'] = pd.factorize(dt['Type'])[0] dt['cid'] = dt['Country_Region'] + '_' + dt['Province_State'] del dt['Reference'], dt['Country_Region'], dt['Province_State'] train = pd.merge(train, dt, on=['cid', 'Date'], how='left') train['Type'] = train.groupby('cid')['Type'].fillna(method='ffill') train['target0'] = np.log1p(train['ConfirmedCases']) train['target1'] = np.log1p(train['Fatalities']) self.train = train.copy() def create_features(self, df, valid_day): df = df.loc[df.day >= valid_day - 50].copy() df['lag0_1'] = df.groupby('cid')['target0'].shift(self.lag) df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill') df['lag0_8'] = df.groupby('cid')['target0'].shift(8) df['lag0_8'] = df.groupby('cid')['lag0_8'].fillna(method='bfill') df['lag1_1'] = df.groupby('cid')['target1'].shift(self.lag) df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill') df['m0'] = df.groupby('cid')['lag0_1'].rolling(2).mean().values df['m1'] = df.groupby('cid')['lag0_1'].rolling(3).mean().values df['m2'] = df.groupby('cid')['lag0_1'].rolling(4).mean().values df['m3'] = df.groupby('cid')['lag0_1'].rolling(5).mean().values df['m4'] = df.groupby('cid')['lag0_1'].rolling(7).mean().values df['m5'] = df.groupby('cid')['lag0_1'].rolling(10).mean().values df['m6'] = df.groupby('cid')['lag0_1'].rolling(12).mean().values df['m7'] = df.groupby('cid')['lag0_1'].rolling(16).mean().values df['m8'] = df.groupby('cid')['lag0_1'].rolling(20).mean().values df['m9'] = df.groupby('cid')['lag0_1'].rolling(25).mean().values df['n0'] = df.groupby('cid')['lag1_1'].rolling(2).mean().values df['n1'] = df.groupby('cid')['lag1_1'].rolling(3).mean().values df['n2'] = df.groupby('cid')['lag1_1'].rolling(4).mean().values df['n3'] = df.groupby('cid')['lag1_1'].rolling(5).mean().values df['n4'] = df.groupby('cid')['lag1_1'].rolling(7).mean().values df['n5'] = df.groupby('cid')['lag1_1'].rolling(10).mean().values df['n6'] = df.groupby('cid')['lag1_1'].rolling(12).mean().values df['n7'] = df.groupby('cid')['lag1_1'].rolling(16).mean().values df['n8'] = df.groupby('cid')['lag1_1'].rolling(20).mean().values df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill') df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill') df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill') df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill') df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill') df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill') df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill') df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill') df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill') df['m9'] = df.groupby('cid')['m9'].fillna(method='bfill') df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill') df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill') df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill') df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill') df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill') df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill') df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill') df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill') df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill') df['flag_China'] = 1 * (df['Country_Region'] == 'China') df['flag_US'] = 1 * (df['Country_Region'] == 'US') df['flag_Kosovo_'] = 1 * (df['cid'] == 'Kosovo_') df['flag_Korea'] = 1 * (df['cid'] == 'Korea, South_') df['flag_Nepal_'] = 1 * (df['cid'] == 'Nepal_') df['flag_Holy See_'] = 1 * (df['cid'] == 'Holy See_') df['flag_Suriname_'] = 1 * (df['cid'] == 'Suriname_') df['flag_Ghana_'] = 1 * (df['cid'] == 'Ghana_') df['flag_Togo_'] = 1 * (df['cid'] == 'Togo_') df['flag_Malaysia_'] = 1 * (df['cid'] == 'Malaysia_') df['flag_US_Rhode'] = 1 * (df['cid'] == 'US_Rhode Island') df['flag_Bolivia_'] = 1 * (df['cid'] == 'Bolivia_') df['flag_China_Tib'] = 1 * (df['cid'] == 'China_Tibet') df['flag_Bahrain_'] = 1 * (df['cid'] == 'Bahrain_') df['flag_Honduras_'] = 1 * (df['cid'] == 'Honduras_') df['flag_Bangladesh'] = 1 * (df['cid'] == 'Bangladesh_') df['flag_Paraguay_'] = 1 * (df['cid'] == 'Paraguay_') tr = df.loc[df.day < valid_day].copy() vl = df.loc[df.day == valid_day].copy() tr = tr.loc[tr.lag0_1 > 0].copy() maptarget0 = tr.groupby('cid')['target0'].agg(log0_max='max').reset_index() maptarget1 = tr.groupby('cid')['target1'].agg(log1_max='max').reset_index() vl['log0_max'] = pd.merge(vl, maptarget0, on='cid', how='left')['log0_max'].values vl['log1_max'] = pd.merge(vl, maptarget1, on='cid', how='left')['log1_max'].values vl['log0_max'] = vl['log0_max'].fillna(0) vl['log1_max'] = vl['log1_max'].fillna(0) return (tr, vl) def train_models(self, valid_day=10): train = self.train.copy() train.loc[(train.cid == 'China_Guizhou') & (train.Date == '2020-03-17'), 'target0'] = np.log1p(146) train.loc[(train.cid == 'Guyana_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(12) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-29') & (train.Date <= '2020-03-29'), 'target0'] = np.log1p(24) train.loc[(train.cid == 'US_Virgin Islands') & (train.Date >= '2020-03-30') & (train.Date <= '2020-03-30'), 'target0'] = np.log1p(27) train.loc[(train.cid == 'Iceland_') & (train.Date >= '2020-03-15') & (train.Date <= '2020-03-15'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Kazakhstan_') & (train.Date >= '2020-03-20') & (train.Date <= '2020-03-20'), 'target1'] = np.log1p(0) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-26') & (train.Date <= '2020-03-26'), 'target1'] = np.log1p(5) train.loc[(train.cid == 'Serbia_') & (train.Date >= '2020-03-27') & (train.Date <= '2020-03-27'), 'target1'] = np.log1p(6) train.loc[(train.cid == 'Slovakia_') & (train.Date >= '2020-03-22') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) train.loc[(train.cid == 'US_Hawaii') & (train.Date >= '2020-03-25') & (train.Date <= '2020-03-31'), 'target1'] = np.log1p(1) param = {'subsample': 1.0, 'colsample_bytree': 0.85, 'max_depth': 5, 'gamma': 0.0, 'learning_rate': 0.01, 'min_child_weight': 6.0, 'reg_alpha': 0.0, 'reg_lambda': 0.4, 'silent': 1, 'objective': 'reg:squarederror', 'nthread': 12, 'seed': self.seed} tr, vl = self.create_features(train.copy(), valid_day) features = [f for f in tr.columns if f not in ['lag0_8', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features0 = features features = [f for f in tr.columns if f not in ['m0', 'm1', 'm2', 'm3', 'Id', 'ConfirmedCases', 'Fatalities', 'log0', 'log1', 'target0', 'target1', 'ypred0', 'ypred1', 'Province_State', 'Country_Region', 'Date', 'ForecastId', 'cid', 'geo', 'day', 'GDP_region', 'TRUE POPULATION', 'pct_in_largest_city', ' TFR ', ' Avg_age ', 'latitude', 'longitude', 'abs_latitude', 'temperature', 'humidity', 'Personality_pdi', 'Personality_idv', 'Personality_mas', 'Personality_uai', 'Personality_ltowvs', 'Personality_assertive', 'personality_perform', 'personality_agreeableness', 'murder', 'High_rises', 'max_high_rises', 'AIR_CITIES', 'AIR_AVG', 'continent_gdp_pc', 'continent_happiness', 'continent_generosity', 'continent_corruption', 'continent_Life_expectancy']] self.features1 = features nrounds0 = 680 nrounds1 = 630 dtrain = xgb.DMatrix(tr[self.features0], tr['target0']) param['seed'] = self.seed self.model0 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) param['seed'] = self.seed + 1 self.model1 = xgb.train(param, dtrain, nrounds0, verbose_eval=0) dtrain = xgb.DMatrix(tr[self.features1], tr['target1']) param['seed'] = self.seed self.model2 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) param['seed'] = self.seed + 1 self.model3 = xgb.train(param, dtrain, nrounds1, verbose_eval=0) self.vl = vl return 1 def predict_first_day(self, day): self.day = day self.train_models(day) dvalid = xgb.DMatrix(self.vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(self.vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 self.vl['ypred0'] = ypred0 self.vl['ypred1'] = ypred1 self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'ypred0'] = self.vl.loc[self.vl.ypred0 < self.vl.log0_max, 'log0_max'] self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'ypred1'] = self.vl.loc[self.vl.ypred1 < self.vl.log1_max, 'log1_max'] VALID = self.vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) def predict_next_day(self, yesterday): self.day += 1 feats = ['geo', 'day'] self.train['ypred0'] = pd.merge(self.train[feats], yesterday[feats + ['ConfirmedCases']], on=feats, how='left')['ConfirmedCases'].values self.train.loc[self.train.ypred0.notnull(), 'target0'] = self.train.loc[self.train.ypred0.notnull(), 'ypred0'] self.train['ypred1'] = pd.merge(self.train[feats], yesterday[feats + ['Fatalities']], on=feats, how='left')['Fatalities'].values self.train.loc[self.train.ypred1.notnull(), 'target1'] = self.train.loc[self.train.ypred1.notnull(), 'ypred1'] del self.train['ypred0'], self.train['ypred1'] tr, vl = self.create_features(self.train.copy(), self.day) dvalid = xgb.DMatrix(vl[self.features0]) ypred0 = (self.model0.predict(dvalid) + self.model1.predict(dvalid)) / 2 dvalid = xgb.DMatrix(vl[self.features1]) ypred1 = (self.model2.predict(dvalid) + self.model3.predict(dvalid)) / 2 vl['ypred0'] = ypred0 vl['ypred1'] = ypred1 vl.loc[vl.ypred0 < vl.log0_max, 'ypred0'] = vl.loc[vl.ypred0 < vl.log0_max, 'log0_max'] vl.loc[vl.ypred1 < vl.log1_max, 'ypred1'] = vl.loc[vl.ypred1 < vl.log1_max, 'log1_max'] self.vl = vl VALID = vl[['geo', 'day', 'ypred0', 'ypred1']].copy() VALID.columns = ['geo', 'day', 'ConfirmedCases', 'Fatalities'] return VALID.reset_index(drop=True) TARGETS = ['ConfirmedCases', 'Fatalities'] def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df[TARGETS] = np.log1p(df[TARGETS].values) sub_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') def preprocess(df): for col in ['Country_Region', 'Province_State']: df[col].fillna('', inplace=True) df['Date'] = pd.to_datetime(df['Date']) df['day'] = df.Date.dt.dayofyear df['geo'] = ['_'.join(x) for x in zip(df['Country_Region'], df['Province_State'])] return df df = preprocess(df) sub_df = preprocess(sub_df) sub_df['day'] -= df['day'].min() df['day'] -= df['day'].min() TEST_FIRST = sub_df[sub_df['Date'] > df['Date'].max()]['Date'].min() TEST_DAYS = (sub_df['Date'].max() - TEST_FIRST).days + 1 TEST_FIRST = (TEST_FIRST - df['Date'].min()).days def get_blend(pred_dfs, weights, verbose=True): blend_df = pred_dfs['giba1'].copy() blend_df[TARGETS] = 0 for name, pred_df in pred_dfs.items(): blend_df[TARGETS] += weights[name] * pred_df[TARGETS].values return blend_df cov_models = {'ahmet': CovidModelAhmet(), 'giba1': CovidModelGIBA(lag=1), 'giba2': CovidModelGIBA(lag=2)} weights = {'ahmet': 0.45, 'giba1': 0.275, 'giba2': 0.275} pred_dfs = {name: cm.predict_first_day(TEST_FIRST).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = blend_df.copy() for d in range(1, TEST_DAYS): pred_dfs = {name: cm.predict_next_day(blend_df).sort_values('geo') for name, cm in cov_models.items()} blend_df = get_blend(pred_dfs, weights) eval_df = eval_df.append(blend_df) print(sub_df.shape) sub_df = sub_df.merge(df.append(eval_df, sort=False), on=['geo', 'day'], how='left') print(sub_df.shape) print(sub_df[TARGETS].isnull().mean())
code
122260046/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape)
code
122260046/cell_25
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization from keras.models import Sequential from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=200)
code
122260046/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') train.info()
code
122260046/cell_29
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) A = test.values / 255.0 A = A.reshape(-1, 28, 28, 1) A.shape ids = [i + 1 for i in test.index] len(ids)
code
122260046/cell_26
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization from keras.models import Sequential from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=200) pd.DataFrame(history.history)
code
122260046/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) def select_pic(i): arr = np.array(test[i:i + 1]) pix = arr.reshape(arr.shape[0], 28, 28) img = pix[0] for i in range(0, 1): r = np.random.randint(i, 10000) select_pic(r)
code