path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17109112/cell_10
[ "text_plain_output_1.png" ]
from torchvision import models, transforms, datasets import os data_dir = '../input/dogscats/dogscats/dogscats/' normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]) dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']} dsets['train'].class_to_idx
code
17109112/cell_12
[ "text_plain_output_1.png" ]
from torchvision import models, transforms, datasets import os data_dir = '../input/dogscats/dogscats/dogscats/' normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) vgg_format = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), normalize]) dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']} dset_classes = dsets['valid'].classes dset_classes
code
17109112/cell_5
[ "text_plain_output_1.png" ]
import torch torch.__version__ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('Using gpu : %s ' % torch.cuda.is_available())
code
17109112/cell_36
[ "text_plain_output_1.png" ]
from torchvision import models, transforms, datasets import torch torch.__version__ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') inputs_try.shape model_vgg = models.vgg16(pretrained=True) inputs_try, lables_try = (inputs_try.to(device), labels_try.to(device)) model_vgg = model_vgg.to(device) print(model_vgg)
code
74040532/cell_9
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters import matplotlib.pyplot as plt import skimage NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data WIDE_SCALE_HST = HST_SCI[700:4000, 700:4000] ZOOMED_SCALE_HST = HST_SCI[1800:3400, 1700:3200] ZOOMED_X2_SCALE_HST = HST_SCI[2100:3100, 2000:3000] ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) R_ZOOMED_X3_SCALE_HST = filters.roberts_neg_diag(ZOOMED_X3_SCALE_HST) R_NUCLEUS_SCALE_HST = filters.roberts_neg_diag(NUCLEUS_SCALE_HST) FCF_ZOOMED_X3_SCALE_HST = skimage.feature.corner_foerstner(ZOOMED_X3_SCALE_HST) FCF_NUCLEUS_SCALE_HST = skimage.feature.corner_foerstner(NUCLEUS_SCALE_HST) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(SATO_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(SATO_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(BM_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(BM_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(R_ZOOMED_X3_SCALE_HST, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(R_NUCLEUS_SCALE_HST, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74040532/cell_6
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters import matplotlib.pyplot as plt import skimage NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data WIDE_SCALE_HST = HST_SCI[700:4000, 700:4000] ZOOMED_SCALE_HST = HST_SCI[1800:3400, 1700:3200] ZOOMED_X2_SCALE_HST = HST_SCI[2100:3100, 2000:3000] ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) R_ZOOMED_X3_SCALE_HST = filters.roberts_neg_diag(ZOOMED_X3_SCALE_HST) R_NUCLEUS_SCALE_HST = filters.roberts_neg_diag(NUCLEUS_SCALE_HST) FCF_ZOOMED_X3_SCALE_HST = skimage.feature.corner_foerstner(ZOOMED_X3_SCALE_HST) FCF_NUCLEUS_SCALE_HST = skimage.feature.corner_foerstner(NUCLEUS_SCALE_HST) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(SATO_ZOOMED_X3_SCALE_HST, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(SATO_NUCLEUS_SCALE_HST, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74040532/cell_1
[ "text_plain_output_1.png" ]
!pip install astropy !pip install specutils
code
74040532/cell_7
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters import matplotlib.pyplot as plt import skimage NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data WIDE_SCALE_HST = HST_SCI[700:4000, 700:4000] ZOOMED_SCALE_HST = HST_SCI[1800:3400, 1700:3200] ZOOMED_X2_SCALE_HST = HST_SCI[2100:3100, 2000:3000] ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) R_ZOOMED_X3_SCALE_HST = filters.roberts_neg_diag(ZOOMED_X3_SCALE_HST) R_NUCLEUS_SCALE_HST = filters.roberts_neg_diag(NUCLEUS_SCALE_HST) FCF_ZOOMED_X3_SCALE_HST = skimage.feature.corner_foerstner(ZOOMED_X3_SCALE_HST) FCF_NUCLEUS_SCALE_HST = skimage.feature.corner_foerstner(NUCLEUS_SCALE_HST) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(SATO_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(SATO_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(M_ZOOMED_X3_SCALE_HST, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(M_NUCLEUS_SCALE_HST, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74040532/cell_8
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters import matplotlib.pyplot as plt import skimage NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data WIDE_SCALE_HST = HST_SCI[700:4000, 700:4000] ZOOMED_SCALE_HST = HST_SCI[1800:3400, 1700:3200] ZOOMED_X2_SCALE_HST = HST_SCI[2100:3100, 2000:3000] ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) R_ZOOMED_X3_SCALE_HST = filters.roberts_neg_diag(ZOOMED_X3_SCALE_HST) R_NUCLEUS_SCALE_HST = filters.roberts_neg_diag(NUCLEUS_SCALE_HST) FCF_ZOOMED_X3_SCALE_HST = skimage.feature.corner_foerstner(ZOOMED_X3_SCALE_HST) FCF_NUCLEUS_SCALE_HST = skimage.feature.corner_foerstner(NUCLEUS_SCALE_HST) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(SATO_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(SATO_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(BM_ZOOMED_X3_SCALE_HST, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(BM_NUCLEUS_SCALE_HST, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74040532/cell_10
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters import matplotlib.pyplot as plt import skimage NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data WIDE_SCALE_HST = HST_SCI[700:4000, 700:4000] ZOOMED_SCALE_HST = HST_SCI[1800:3400, 1700:3200] ZOOMED_X2_SCALE_HST = HST_SCI[2100:3100, 2000:3000] ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) R_ZOOMED_X3_SCALE_HST = filters.roberts_neg_diag(ZOOMED_X3_SCALE_HST) R_NUCLEUS_SCALE_HST = filters.roberts_neg_diag(NUCLEUS_SCALE_HST) FCF_ZOOMED_X3_SCALE_HST = skimage.feature.corner_foerstner(ZOOMED_X3_SCALE_HST) FCF_NUCLEUS_SCALE_HST = skimage.feature.corner_foerstner(NUCLEUS_SCALE_HST) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(SATO_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(SATO_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(BM_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(BM_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(R_ZOOMED_X3_SCALE_HST,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(R_NUCLEUS_SCALE_HST,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(FCF_ZOOMED_X3_SCALE_HST[0], cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(FCF_NUCLEUS_SCALE_HST[0], cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
2004239/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_train = pd.read_csv('../input/train.csv', usecols=[1, 2, 3, 4, 5], dtype={'onpromotion': bool}, converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0}, parse_dates=['date'], skiprows=range(1, 66458909)) df_test = pd.read_csv('../input/test.csv', usecols=[0, 1, 2, 3, 4], dtype={'onpromotion': bool}, parse_dates=['date']).set_index(['store_nbr', 'item_nbr', 'date']) items = pd.read_csv('../input/items.csv').set_index('item_nbr') df_2017 = df_train.loc[df_train.date >= pd.datetime(2017, 1, 1)] del df_train promo_2017_train = df_2017.set_index(['store_nbr', 'item_nbr', 'date'])[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_train.columns = promo_2017_train.columns.get_level_values(1) promo_2017_test = df_test[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_test.columns = promo_2017_test.columns.get_level_values(1) promo_2017_train = promo_2017_train.reindex(promo_2017_test.index).fillna(False) promo_2017 = pd.concat([promo_2017_train, promo_2017_test], axis=1) del promo_2017_test, promo_2017_train df_2017 = df_2017.set_index(['store_nbr', 'item_nbr', 'date'])[['unit_sales']].unstack(level=-1).fillna(0) df_2017.columns = df_2017.columns.get_level_values(1) items = items.reindex(df_2017.index.get_level_values(1)) print(df_2017.shape) df_2017.head()
code
2004239/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_train = pd.read_csv('../input/train.csv', usecols=[1, 2, 3, 4, 5], dtype={'onpromotion': bool}, converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0}, parse_dates=['date'], skiprows=range(1, 66458909)) df_test = pd.read_csv('../input/test.csv', usecols=[0, 1, 2, 3, 4], dtype={'onpromotion': bool}, parse_dates=['date']).set_index(['store_nbr', 'item_nbr', 'date']) items = pd.read_csv('../input/items.csv').set_index('item_nbr') df_2017 = df_train.loc[df_train.date >= pd.datetime(2017, 1, 1)] del df_train promo_2017_train = df_2017.set_index(['store_nbr', 'item_nbr', 'date'])[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_train.columns = promo_2017_train.columns.get_level_values(1) promo_2017_test = df_test[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_test.columns = promo_2017_test.columns.get_level_values(1) promo_2017_train = promo_2017_train.reindex(promo_2017_test.index).fillna(False) promo_2017 = pd.concat([promo_2017_train, promo_2017_test], axis=1) del promo_2017_test, promo_2017_train test_all = df_test[[]].reset_index() store_by_item = test_all[test_all.date == pd.to_datetime('2017-08-16')].drop(['date'], axis=1) del test_all print(store_by_item.shape) store_by_item.head()
code
2004239/cell_8
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_train = pd.read_csv('../input/train.csv', usecols=[1, 2, 3, 4, 5], dtype={'onpromotion': bool}, converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0}, parse_dates=['date'], skiprows=range(1, 66458909)) df_test = pd.read_csv('../input/test.csv', usecols=[0, 1, 2, 3, 4], dtype={'onpromotion': bool}, parse_dates=['date']).set_index(['store_nbr', 'item_nbr', 'date']) items = pd.read_csv('../input/items.csv').set_index('item_nbr') df_2017 = df_train.loc[df_train.date >= pd.datetime(2017, 1, 1)] del df_train promo_2017_train = df_2017.set_index(['store_nbr', 'item_nbr', 'date'])[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_train.columns = promo_2017_train.columns.get_level_values(1) promo_2017_test = df_test[['onpromotion']].unstack(level=-1).fillna(False) promo_2017_test.columns = promo_2017_test.columns.get_level_values(1) promo_2017_train = promo_2017_train.reindex(promo_2017_test.index).fillna(False) promo_2017 = pd.concat([promo_2017_train, promo_2017_test], axis=1) del promo_2017_test, promo_2017_train df_2017 = df_2017.set_index(['store_nbr', 'item_nbr', 'date'])[['unit_sales']].unstack(level=-1).fillna(0) df_2017.columns = df_2017.columns.get_level_values(1) items = items.reindex(df_2017.index.get_level_values(1)) test_all = df_test[[]].reset_index() store_by_item = test_all[test_all.date == pd.to_datetime('2017-08-16')].drop(['date'], axis=1) del test_all df_2017 = store_by_item.join(df_2017, on=['store_nbr', 'item_nbr']).set_index(df_2017.index.names).fillna(0) print(df_2017.shape) df_2017.head()
code
18110494/cell_21
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential model = Sequential() model.add(Conv2D(32, (3, 3), padding='Same', activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(64, (5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, (3, 3), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(Conv2D(24, (5, 5), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(32, activation='relu')) model.add(Dense(10, activation='softmax')) model.summary()
code
18110494/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') x_train = train.drop(labels=['label'], axis=1) x_train = x_train / 255.0 test = test / 255.0 x_train = x_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) print('x_train shape:', x_train.shape)
code
18110494/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') y_train = train['label'] y_train.value_counts()
code
18110494/cell_23
[ "text_html_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential from keras.optimizers import RMSprop import keras import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') y_train = train['label'] x_train = train.drop(labels=['label'], axis=1) y_train.value_counts() x_train = x_train / 255.0 test = test / 255.0 x_train = x_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) y_train = keras.utils.to_categorical(y_train, num_classes=10) model = Sequential() model.add(Conv2D(32, (3, 3), padding='Same', activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(64, (5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, (3, 3), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(Conv2D(24, (5, 5), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(32, activation='relu')) model.add(Dense(10, activation='softmax')) model.summary() optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1)
code
18110494/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') test.head()
code
18110494/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') x_train = train.drop(labels=['label'], axis=1) x_train = x_train / 255.0 test = test / 255.0 x_train = x_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) a = plt.imshow(x_train[1][:, :, 0])
code
18110494/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input')) import keras
code
18110494/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') y_train = train['label'] y_train.head()
code
18110494/cell_24
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential from keras.optimizers import RMSprop import keras import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') y_train = train['label'] x_train = train.drop(labels=['label'], axis=1) y_train.value_counts() x_train = x_train / 255.0 test = test / 255.0 x_train = x_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) y_train = keras.utils.to_categorical(y_train, num_classes=10) model = Sequential() model.add(Conv2D(32, (3, 3), padding='Same', activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(64, (5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, (3, 3), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(Conv2D(24, (5, 5), padding='Same', activation='relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(32, activation='relu')) model.add(Dense(10, activation='softmax')) model.summary() optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1])
code
18110494/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') x_train = train.drop(labels=['label'], axis=1) x_train.isnull().describe()
code
18110494/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') train.head()
code
90135546/cell_21
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train_df['Sentiment'].value_counts()
code
90135546/cell_13
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape
code
90135546/cell_9
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape
code
90135546/cell_23
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() sns.countplot(x='Sentiment', data=train)
code
90135546/cell_6
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.head()
code
90135546/cell_48
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data = vectorizer.fit_transform(train['Phrase']) test_data = vectorizer.fit_transform(test['Phrase']) test_data.shape
code
90135546/cell_11
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.info()
code
90135546/cell_52
[ "text_plain_output_1.png" ]
from cuml.linear_model import LogisticRegression from cuml.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data = vectorizer.fit_transform(train['Phrase']) train_data.shape test_data = vectorizer.fit_transform(test['Phrase']) test_data.shape log_reg = LogisticRegression() log_reg.fit(train_data, train_df['Sentiment']) y_pred = log_reg.predict(test_data) len(y_pred)
code
90135546/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90135546/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.info()
code
90135546/cell_49
[ "text_plain_output_1.png" ]
from cuml.linear_model import LogisticRegression from cuml.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data = vectorizer.fit_transform(train['Phrase']) train_data.shape log_reg = LogisticRegression() log_reg.fit(train_data, train_df['Sentiment'])
code
90135546/cell_18
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum() test_df.isnull().any().any()
code
90135546/cell_51
[ "text_plain_output_1.png" ]
from cuml.linear_model import LogisticRegression from cuml.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer import cudf as pd import cupy as cp import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data = vectorizer.fit_transform(train['Phrase']) train_data.shape test_data = vectorizer.fit_transform(test['Phrase']) test_data.shape log_reg = LogisticRegression() log_reg.fit(train_data, train_df['Sentiment']) y_pred = log_reg.predict(test_data) cp.unique(y_pred)
code
90135546/cell_28
[ "text_plain_output_1.png" ]
import string import string import string import re string.punctuation
code
90135546/cell_8
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.describe()
code
90135546/cell_15
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum()
code
90135546/cell_16
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum()
code
90135546/cell_17
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any()
code
90135546/cell_46
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() vectorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data = vectorizer.fit_transform(train['Phrase']) train_data.shape
code
90135546/cell_24
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() train_df['Phrase'][0]
code
90135546/cell_10
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.head()
code
90135546/cell_12
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.describe()
code
90135546/cell_36
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords stopwords.words('english')
code
34138455/cell_9
[ "image_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15) clf.run() clf.classifier.confusion_matrix()
code
34138455/cell_6
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15)
code
34138455/cell_11
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15) clf.run() clf.classifier.confusion_matrix() clf.classifier.summary() target_image = '/test_data/test1/10041.jpg' target_layer = clf.classifier.trained_model.features[30] clf.cam(target_image_path=target_image, target_layer=target_layer, cmap='plasma', type='scorecam')
code
34138455/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15) clf.data_processor.dataset_info(plot=False)
code
34138455/cell_8
[ "text_plain_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15) clf.run()
code
34138455/cell_10
[ "text_html_output_1.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) clf = pipeline.Image_Classification(data_directory=train_dir, is_dicom=False, table=table, type='nn_classifier', model_arch='vgg16', epochs=10, batch_size=100, sampling=0.15) clf.run() clf.classifier.confusion_matrix() clf.classifier.summary()
code
34138455/cell_5
[ "text_html_output_4.png", "text_plain_output_4.png", "text_html_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from radtorch import pipeline, core, utils train_dir = '/train_data/train/' test_dir = '/test_data/test1/' table = utils.datatable_from_filepath(train_dir, classes=['dog', 'cat']) table.head()
code
50224544/cell_7
[ "text_html_output_1.png" ]
from time import time import cv2 import numpy as np import pandas as pd def breaker(): pass def head(x, no_of_ele=5): pass def getImages(file_path=None, file_names=None, size=None): images = [] for name in file_names: try: image = cv2.imread(file_path + name + '.jpg', cv2.IMREAD_GRAYSCALE).astype('float64') except AttributeError: if size: image = cv2.resize(image, dsize=(size, size), interpolation=cv2.INTER_LANCZOS4) cv2.normalize(src=image, dst=image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX) images.append(image.reshape(1, size, size)) return np.array(images) start_time = time() ss = pd.read_csv('../input/ranzcr-clip-catheter-line-classification/sample_submission.csv') ts_img_names = ss['StudyInstanceUID'].values ts_images = getImages('../input/ranzcr-clip-catheter-line-classification/test/', ts_img_names, size=144) breaker() print('Time Taken to read data : {:.2f} minutes'.format((time() - start_time) / 60)) breaker()
code
50224544/cell_18
[ "text_plain_output_1.png" ]
from time import time from torch.utils.data import Dataset import cv2 import numpy as np import pandas as pd import torch import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch import nn, optim from torch.utils.data import Dataset from torch.utils.data import DataLoader as DL from torch.nn.utils import weight_norm as WN import torch.nn.functional as F import gc import os import cv2 from time import time torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False seed = 42 def breaker(): pass def head(x, no_of_ele=5): pass def getImages(file_path=None, file_names=None, size=None): images = [] for name in file_names: try: image = cv2.imread(file_path + name + '.jpg', cv2.IMREAD_GRAYSCALE).astype('float64') except AttributeError: if size: image = cv2.resize(image, dsize=(size, size), interpolation=cv2.INTER_LANCZOS4) cv2.normalize(src=image, dst=image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX) images.append(image.reshape(1, size, size)) return np.array(images) start_time = time() ss = pd.read_csv('../input/ranzcr-clip-catheter-line-classification/sample_submission.csv') ts_img_names = ss['StudyInstanceUID'].values ts_images = getImages('../input/ranzcr-clip-catheter-line-classification/test/', ts_img_names, size=144) breaker() breaker() class Dataset(Dataset): def __init__(this, X=None, y=None, mode='train'): this.mode = mode this.X = X if mode == 'train': this.y = y def __len__(this): return this.X.shape[0] def __getitem__(this, idx): if this.mode == 'train': return (torch.FloatTensor(this.X[idx]), torch.FloatTensor(this.y[idx])) else: return torch.FloatTensor(this.X[idx]) class CFG: tr_batch_size = 128 ts_batch_size = 128 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') in_channels = 1 OL = 11 def __init__(this, filter_sizes=[64, 128, 256, 512], HL=[4096, 4096], epochs=50, n_folds=5): this.filter_sizes = filter_sizes this.HL = HL this.epochs = epochs this.n_folds = n_folds def predict_(model=None, dataloader=None, device=None, path=None): if path: model.load_state_dict(torch.load(path)) else: pass model.to(device) model.eval() y_pred = torch.zeros(1, 11).to(device) for X in dataloader: X = X.to(device) with torch.no_grad(): Pred = torch.sigmoid(model(X)) y_pred = torch.cat((y_pred, Pred), dim=0) return y_pred[1:].detach().cpu().numpy() cfg = CFG(filter_sizes=[64, 128, 256, 512], HL=[4096, 4096], epochs=50, n_folds=5) ts_data_setup = Dataset(ts_images, None, 'test') ts_data = DL(ts_data_setup, batch_size=cfg.ts_batch_size, shuffle=False) model = CNN(in_channels=cfg.in_channels, filter_sizes=cfg.filter_sizes, HL=cfg.HL, OL=cfg.OL) y_pred_e19 = predict_(model=model, dataloader=ts_data, device=cfg.device, path='../input/rccl-1x144-f-train/Epoch_19.pt') y_pred_e20 = predict_(model=model, dataloader=ts_data, device=cfg.device, path='../input/rccl-1x144-f-train/Epoch_20.pt') y_pred_e23 = predict_(model=model, dataloader=ts_data, device=cfg.device, path='../input/rccl-1x144-f-train/Epoch_23.pt') y_pred_e25 = predict_(model=model, dataloader=ts_data, device=cfg.device, path='../input/rccl-1x144-f-train/Epoch_25.pt') y_pred = (y_pred_e19 + y_pred_e20 + y_pred_e23 + y_pred_e25) / 4 y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) ss.iloc[:, 1:] = y_pred ss.to_csv('./submission.csv', index=False) ss.head(5)
code
17117664/cell_4
[ "text_plain_output_1.png" ]
import json import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) type(parsed_file)
code
17117664/cell_20
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) df.Cdc.unique() df['Cdc'] = df['Cdc'].astype('str') df = df.replace({'None': '0'}) df['Cdc'] = df['Cdc'].astype('int') numerical_columns = ['ARTICOLO', 'CAPITOLO', 'Centro di responsabilità', 'NUMERO', 'PDC-Livello1', 'PDC-Livello2', 'PDC-Livello3', 'PDC-Livello4', 'PDC-Missione', 'PDC-Programma'] stuff_with_commas = ['RENDICONTO 2015', 'RENDICONTO 2016', 'STANZIAMENTO 2017', 'STANZIAMENTO 2018', 'STANZIAMENTO 2019', 'STANZIAMENTO DI CASSA 2017'] for col in stuff_with_commas: df[col] = df.replace('.', '').replace(',', '.') numerical_columns = numerical_columns + stuff_with_commas for col in numerical_columns: df[col] = pd.to_numeric(df[col]) df.dtypes for col in df.columns: print('Column: {}, unique values: {}'.format(col, df[col].unique().shape[0]))
code
17117664/cell_6
[ "text_plain_output_1.png" ]
import json import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) type(parsed_file[5])
code
17117664/cell_19
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) df.Cdc.unique() df['Cdc'] = df['Cdc'].astype('str') df = df.replace({'None': '0'}) df['Cdc'] = df['Cdc'].astype('int') numerical_columns = ['ARTICOLO', 'CAPITOLO', 'Centro di responsabilità', 'NUMERO', 'PDC-Livello1', 'PDC-Livello2', 'PDC-Livello3', 'PDC-Livello4', 'PDC-Missione', 'PDC-Programma'] stuff_with_commas = ['RENDICONTO 2015', 'RENDICONTO 2016', 'STANZIAMENTO 2017', 'STANZIAMENTO 2018', 'STANZIAMENTO 2019', 'STANZIAMENTO DI CASSA 2017'] for col in stuff_with_commas: df[col] = df.replace('.', '').replace(',', '.') numerical_columns = numerical_columns + stuff_with_commas for col in numerical_columns: df[col] = pd.to_numeric(df[col]) df.dtypes
code
17117664/cell_8
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) df.head()
code
17117664/cell_16
[ "text_html_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) df.Cdc.unique() df['Cdc'] = df['Cdc'].astype('str') df = df.replace({'None': '0'}) df['Cdc'] = df['Cdc'].astype('int') numerical_columns = ['ARTICOLO', 'CAPITOLO', 'Centro di responsabilità', 'NUMERO', 'PDC-Livello1', 'PDC-Livello2', 'PDC-Livello3', 'PDC-Livello4', 'PDC-Missione', 'PDC-Programma'] stuff_with_commas = ['RENDICONTO 2015', 'RENDICONTO 2016', 'STANZIAMENTO 2017', 'STANZIAMENTO 2018', 'STANZIAMENTO 2019', 'STANZIAMENTO DI CASSA 2017'] for col in stuff_with_commas: df[col] = df.replace('.', '').replace(',', '.') numerical_columns = numerical_columns + stuff_with_commas for col in numerical_columns: df[col] = pd.to_numeric(df[col]) columns_with_text = ['DIR', 'Descrizione Centro di Responsabilità', 'Descrizione Direzione', 'Descrizione capitolo PEG', 'Descrizione centro di costo', 'TIPO'] for col in columns_with_text: print('Column: {}, unique values: {}'.format(col, df[col].unique().shape[0]))
code
17117664/cell_10
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) for val, col in zip(df.iloc[0], df.columns): print('Column: {}, value: {}'.format(col, val))
code
17117664/cell_12
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd import json import numpy as np with open('../input/ds397__ammcomunale_bilancio_rendiconto_previsioni_triennali_2015-2019.json') as json_file: parsed_file = json.load(json_file) df = pd.DataFrame(parsed_file) df.Cdc.unique()
code
17096461/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra paths = ['../input/train.csv', '../input/test.csv'] target_name = 'SalePrice' rd = Reader(sep=',') df = rd.train_test_split(paths, target_name) dft = Drift_thresholder() df = dft.fit_transform(df) rmse = make_scorer(lambda y_true, y_pred: np.sqrt(np.sum((y_true - y_pred) ** 2) / len(y_true)), greater_is_better=False, needs_proba=False) opt = Optimiser(scoring=rmse, n_folds=3) space = {'est__strategy': {'search': 'choice', 'space': ['LightGBM']}, 'est__n_estimators': {'search': 'choice', 'space': [150]}, 'est__colsample_bytree': {'search': 'uniform', 'space': [0.8, 0.95]}, 'est__subsample': {'search': 'uniform', 'space': [0.8, 0.95]}, 'est__max_depth': {'search': 'choice', 'space': [5, 6, 7, 8, 9]}, 'est__learning_rate': {'search': 'choice', 'space': [0.07]}} params = opt.optimise(space, df, 15) prd = Predictor() prd.fit_predict(params, df)
code
17096461/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
paths = ['../input/train.csv', '../input/test.csv'] target_name = 'SalePrice' rd = Reader(sep=',') df = rd.train_test_split(paths, target_name) dft = Drift_thresholder() df = dft.fit_transform(df)
code
17096461/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
!pip install mlbox
code
17096461/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17096461/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra rmse = make_scorer(lambda y_true, y_pred: np.sqrt(np.sum((y_true - y_pred) ** 2) / len(y_true)), greater_is_better=False, needs_proba=False) opt = Optimiser(scoring=rmse, n_folds=3)
code
17096461/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra paths = ['../input/train.csv', '../input/test.csv'] target_name = 'SalePrice' rd = Reader(sep=',') df = rd.train_test_split(paths, target_name) dft = Drift_thresholder() df = dft.fit_transform(df) rmse = make_scorer(lambda y_true, y_pred: np.sqrt(np.sum((y_true - y_pred) ** 2) / len(y_true)), greater_is_better=False, needs_proba=False) opt = Optimiser(scoring=rmse, n_folds=3) space = {'est__strategy': {'search': 'choice', 'space': ['LightGBM']}, 'est__n_estimators': {'search': 'choice', 'space': [150]}, 'est__colsample_bytree': {'search': 'uniform', 'space': [0.8, 0.95]}, 'est__subsample': {'search': 'uniform', 'space': [0.8, 0.95]}, 'est__max_depth': {'search': 'choice', 'space': [5, 6, 7, 8, 9]}, 'est__learning_rate': {'search': 'choice', 'space': [0.07]}} params = opt.optimise(space, df, 15)
code
17096461/cell_3
[ "text_plain_output_1.png" ]
from mlbox.preprocessing import * from mlbox.optimisation import * from mlbox.prediction import *
code
17096461/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) paths = ['../input/train.csv', '../input/test.csv'] target_name = 'SalePrice' submit = pd.read_csv('../input/sample_submission.csv', sep=',') preds = pd.read_csv('save/' + target_name + '_predictions.csv') submit[target_name] = preds[target_name + '_predicted'].values submit.to_csv('mlbox.csv', index=False)
code
17096461/cell_5
[ "text_plain_output_1.png" ]
paths = ['../input/train.csv', '../input/test.csv'] target_name = 'SalePrice' rd = Reader(sep=',') df = rd.train_test_split(paths, target_name)
code
32062473/cell_21
[ "text_plain_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_train.shape
code
32062473/cell_34
[ "text_plain_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def get_test_train_for_country_state(one_hot_encode_flag, df_train, df_test, country, state): if one_hot_encode_flag == True: cs_train = df_train[(df_train['Country_Region_' + country] == 1) & (df_train['Province_State_' + state] == 1)] cs_test = df_test[(df_test['Country_Region_' + country] == 1) & (df_test['Province_State_' + state] == 1)] else: cs_train = df_train[(df_train['Country_Region'] == country) & (df_train['Province_State'] == state)] cs_test = df_test[(df_test['Country_Region'] == country) & (df_test['Province_State'] == state)] return (cs_train, cs_test) def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds def cv_model(country, state, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) X_test = test.iloc[:, 1:] data_train_cases_matrix = xgb.DMatrix(data=X, label=y_cases) data_train_fatal_matrix = xgb.DMatrix(data=X, label=y_fatal) cv_results_cases = xgb.cv(dtrain=data_train_cases_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) cv_results_fatal = xgb.cv(dtrain=data_train_fatal_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_train.shape df_test.shape df_train_original = df_train.copy() df_test_original = df_test.copy() df_train_original['Datetime'] = pd.to_datetime(df_train_original['Date']) df_test_original['Datetime'] = pd.to_datetime(df_test_original['Date']) date_filter = df_train[df_train.Date > '4/1/2020'].index df_train.drop(date_filter, inplace=True) df_train[df_train.Date > '2020/04/01'] base_model = xgb.XGBRegressor(n_estimators=estimators, random_state=12345, max_depth=15) if one_hot_encode == True: country_groups = df_train_original.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_train_dd = setup_df_encode_and_dates(df_train, one_hot_encode, ['Country_Region', 'Province_State'], ['ConfirmedCases', 'Fatalities']) df_test_dd = setup_df_encode_and_dates(df_test, one_hot_encode, ['Country_Region', 'Province_State']) if one_hot_encode == False: country_groups = df_train_dd.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_preds = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) if loop_logic == True: for country in train_country_list: country_states = df_country_list[df_country_list[0] == country][1].values for state in country_states: curr_cs_train, curr_cs_test = get_test_train_for_country_state(one_hot_encode, df_train_dd, df_test_dd, country, state) preds = fit_models_and_train(country, state, base_model if use_base_model == True else None, curr_cs_train, curr_cs_test) preds = preds.round(5) df_preds = pd.concat([df_preds, preds], axis=0) else: preds = fit_models_and_train('All', 'All', base_model if use_base_model == True else None, df_train_dd, df_test_dd) df_preds = pd.concat([df_preds, preds], axis=0) if ~(public_leaderboard_end_date == None): df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'ConfirmedCases'] = 1 df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'Fatalities'] = 1 df_test_dd.shape
code
32062473/cell_33
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def get_test_train_for_country_state(one_hot_encode_flag, df_train, df_test, country, state): if one_hot_encode_flag == True: cs_train = df_train[(df_train['Country_Region_' + country] == 1) & (df_train['Province_State_' + state] == 1)] cs_test = df_test[(df_test['Country_Region_' + country] == 1) & (df_test['Province_State_' + state] == 1)] else: cs_train = df_train[(df_train['Country_Region'] == country) & (df_train['Province_State'] == state)] cs_test = df_test[(df_test['Country_Region'] == country) & (df_test['Province_State'] == state)] return (cs_train, cs_test) def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds def cv_model(country, state, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) X_test = test.iloc[:, 1:] data_train_cases_matrix = xgb.DMatrix(data=X, label=y_cases) data_train_fatal_matrix = xgb.DMatrix(data=X, label=y_fatal) cv_results_cases = xgb.cv(dtrain=data_train_cases_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) cv_results_fatal = xgb.cv(dtrain=data_train_fatal_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_train.shape df_test.shape df_train_original = df_train.copy() df_test_original = df_test.copy() df_train_original['Datetime'] = pd.to_datetime(df_train_original['Date']) df_test_original['Datetime'] = pd.to_datetime(df_test_original['Date']) date_filter = df_train[df_train.Date > '4/1/2020'].index df_train.drop(date_filter, inplace=True) df_train[df_train.Date > '2020/04/01'] base_model = xgb.XGBRegressor(n_estimators=estimators, random_state=12345, max_depth=15) if one_hot_encode == True: country_groups = df_train_original.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_train_dd = setup_df_encode_and_dates(df_train, one_hot_encode, ['Country_Region', 'Province_State'], ['ConfirmedCases', 'Fatalities']) df_test_dd = setup_df_encode_and_dates(df_test, one_hot_encode, ['Country_Region', 'Province_State']) if one_hot_encode == False: country_groups = df_train_dd.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_preds = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) if loop_logic == True: print('Starting forecasting for {0} countries.'.format(len(train_country_list))) for country in train_country_list: print('Starting country {0}.'.format(country)) country_states = df_country_list[df_country_list[0] == country][1].values for state in country_states: curr_cs_train, curr_cs_test = get_test_train_for_country_state(one_hot_encode, df_train_dd, df_test_dd, country, state) preds = fit_models_and_train(country, state, base_model if use_base_model == True else None, curr_cs_train, curr_cs_test) preds = preds.round(5) df_preds = pd.concat([df_preds, preds], axis=0) print('Country {0} complete.'.format(country)) else: print('Starting forecasting for all {0} countries.'.format(len(train_country_list))) preds = fit_models_and_train('All', 'All', base_model if use_base_model == True else None, df_train_dd, df_test_dd) df_preds = pd.concat([df_preds, preds], axis=0) print('All countries complete.') if ~(public_leaderboard_end_date == None): df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'ConfirmedCases'] = 1 df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'Fatalities'] = 1 df_preds[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date)].head()
code
32062473/cell_26
[ "text_html_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_train.shape df_test.shape df_train_original = df_train.copy() df_test_original = df_test.copy() df_train_original['Datetime'] = pd.to_datetime(df_train_original['Date']) df_test_original['Datetime'] = pd.to_datetime(df_test_original['Date']) date_filter = df_train[df_train.Date > '4/1/2020'].index df_train.drop(date_filter, inplace=True) df_train[df_train.Date > '2020/04/01']
code
32062473/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32062473/cell_35
[ "text_html_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def get_test_train_for_country_state(one_hot_encode_flag, df_train, df_test, country, state): if one_hot_encode_flag == True: cs_train = df_train[(df_train['Country_Region_' + country] == 1) & (df_train['Province_State_' + state] == 1)] cs_test = df_test[(df_test['Country_Region_' + country] == 1) & (df_test['Province_State_' + state] == 1)] else: cs_train = df_train[(df_train['Country_Region'] == country) & (df_train['Province_State'] == state)] cs_test = df_test[(df_test['Country_Region'] == country) & (df_test['Province_State'] == state)] return (cs_train, cs_test) def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds def cv_model(country, state, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) X_test = test.iloc[:, 1:] data_train_cases_matrix = xgb.DMatrix(data=X, label=y_cases) data_train_fatal_matrix = xgb.DMatrix(data=X, label=y_fatal) cv_results_cases = xgb.cv(dtrain=data_train_cases_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) cv_results_fatal = xgb.cv(dtrain=data_train_fatal_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_train.shape df_test.shape df_train_original = df_train.copy() df_test_original = df_test.copy() df_train_original['Datetime'] = pd.to_datetime(df_train_original['Date']) df_test_original['Datetime'] = pd.to_datetime(df_test_original['Date']) date_filter = df_train[df_train.Date > '4/1/2020'].index df_train.drop(date_filter, inplace=True) df_train[df_train.Date > '2020/04/01'] base_model = xgb.XGBRegressor(n_estimators=estimators, random_state=12345, max_depth=15) if one_hot_encode == True: country_groups = df_train_original.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_train_dd = setup_df_encode_and_dates(df_train, one_hot_encode, ['Country_Region', 'Province_State'], ['ConfirmedCases', 'Fatalities']) df_test_dd = setup_df_encode_and_dates(df_test, one_hot_encode, ['Country_Region', 'Province_State']) if one_hot_encode == False: country_groups = df_train_dd.groupby(['Country_Region', 'Province_State']).groups df_country_list = pd.DataFrame.from_dict(list(country_groups)) train_country_list = df_country_list[0].unique() df_preds = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) if loop_logic == True: for country in train_country_list: country_states = df_country_list[df_country_list[0] == country][1].values for state in country_states: curr_cs_train, curr_cs_test = get_test_train_for_country_state(one_hot_encode, df_train_dd, df_test_dd, country, state) preds = fit_models_and_train(country, state, base_model if use_base_model == True else None, curr_cs_train, curr_cs_test) preds = preds.round(5) df_preds = pd.concat([df_preds, preds], axis=0) else: preds = fit_models_and_train('All', 'All', base_model if use_base_model == True else None, df_train_dd, df_test_dd) df_preds = pd.concat([df_preds, preds], axis=0) if ~(public_leaderboard_end_date == None): df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'ConfirmedCases'] = 1 df_preds.loc[df_test_original.Datetime > pd.to_datetime(public_leaderboard_end_date), 'Fatalities'] = 1 df_preds
code
32062473/cell_31
[ "text_plain_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds def cv_model(country, state, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) X_test = test.iloc[:, 1:] data_train_cases_matrix = xgb.DMatrix(data=X, label=y_cases) data_train_fatal_matrix = xgb.DMatrix(data=X, label=y_fatal) cv_results_cases = xgb.cv(dtrain=data_train_cases_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) cv_results_fatal = xgb.cv(dtrain=data_train_fatal_matrix, params=parms, nfold=3, num_boost_round=50, early_stopping_rounds=50, metrics='rmse', as_pandas=True, seed=12345) base_model = xgb.XGBRegressor(n_estimators=estimators, random_state=12345, max_depth=15) print('Model ID: {0}'.format(id(base_model)))
code
32062473/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_test.shape
code
32062473/cell_27
[ "text_html_output_1.png" ]
from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split, cross_val_score, KFold from sklearn.preprocessing import MinMaxScaler, LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb verbose = False loop_logic = True scale_data = True use_base_model = True one_hot_encode = False estimators = 5000 public_leaderboard_end_date = None def transform_dates(df): dates = pd.to_datetime(df['Date']) min_dates = dates.min() df['Date_Year'] = dates.dt.year df['Date_Month'] = dates.dt.month df['Date_Day'] = dates.dt.day df.drop(['Date'], axis=1, inplace=True) def setup_df_encode_and_dates(df, encode_flag, dummy_cols, target_cols=[]): enc_df = df.copy() enc_df = enc_df[[enc_df.columns[0], enc_df.columns[2], enc_df.columns[1], enc_df.columns[3]]] if encode_flag == True: enc_df = pd.get_dummies(enc_df, columns=dummy_cols) else: le = LabelEncoder() for dum_col in dummy_cols: enc_df[dum_col] = le.fit_transform(enc_df[dum_col]) transform_dates(enc_df) for col in target_cols: enc_df[col] = df[col] return enc_df def prepare_train_set(df_train): train_x, train_target1, train_target2 = (df_train.iloc[:, :-2], df_train.iloc[:, -2], df_train.iloc[:, -1]) return (train_x, train_target1, train_target2) def prepare_submission(preds): preds['ForecastId'] = preds['ForecastId'].fillna(0.0).astype('int32') preds['Fatalities'] = preds['Fatalities'].fillna(0.0).astype('int32') preds['ConfirmedCases'] = preds['ConfirmedCases'].fillna(0.0).astype('int32') preds.clip(lower=0, inplace=True) preds.to_csv('submission.csv', index=False) def model_and_predict(model, X, y, test, estimators=5000): if model != None: run_model = model else: run_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=estimators) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12345) run_model.fit(X_train, y_train) y_train_pred = run_model.predict(X_train) y_test_pred = run_model.predict(X_test) y_pred = run_model.predict(test) y_pred[y_pred < 0] = 0 r2 = r2_score(y_train_pred, y_train, multioutput='variance_weighted') return (y_pred, r2) def show_results(model): # Code based on "Selecting Optimal Parameters for XGBoost Model Training" by Andrej Baranovskij (Medium) results = model.evals_result() epochs = len(results['validation_0']['error']) x_axis = range(0, epochs) # plot log loss fig, ax = plt.subplots() ax.plot(x_axis, results['validation_0']['logloss'], label='Train') ax.plot(x_axis, results['validation_1']['logloss'], label='Test') ax.legend() plt.ylabel('Log Loss') plt.title('XGBoost Log Loss') plt.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0']['error'], label='Train') ax.plot(x_axis, results['validation_1']['error'], label='Test') ax.legend() plt.ylabel('Classification Error') plt.title('XGBoost Classification Error') plt.show() def fit_models_and_train(country, state, model, train, test): X, y_cases, y_fatal = prepare_train_set(train) X = X.drop(['Id'], axis=1) forecast_IDs = test.iloc[:, 0] test_no_id = test.iloc[:, 1:] if scale_data == True: scaler = MinMaxScaler() X = scaler.fit_transform(X.values) test_no_id = scaler.transform(test_no_id.values) y_cases_pred, cases_r2 = model_and_predict(model, X, y_cases, test_no_id) y_fatal_pred, fatal_r2 = model_and_predict(model, X, y_fatal, test_no_id) preds = pd.DataFrame(forecast_IDs) preds['ConfirmedCases'] = y_cases_pred preds['Fatalities'] = y_fatal_pred return preds df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') df_train.shape df_test.shape df_train_original = df_train.copy() df_test_original = df_test.copy() df_train_original['Datetime'] = pd.to_datetime(df_train_original['Date']) df_test_original['Datetime'] = pd.to_datetime(df_test_original['Date']) df_train_original.head()
code
2044577/cell_21
[ "text_html_output_1.png" ]
from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) train_y = dataset['Survived'] train_x = dataset.drop('Survived', axis=1) clf = GaussianNB() clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) from sklearn import tree clf = tree.DecisionTreeClassifier() clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=10) clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) print('Random Forest Classifier :\n') print('Accuracy: %0.2f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
code
2044577/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes
code
2044577/cell_11
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) survived = dataset[dataset['Survived'] == 1] survived_males = survived[survived['Sex'] == 0] survived_females = survived[survived['Sex'] == 1] sns.set_style('whitegrid') classes = actual_data[actual_data['Survived'] == 0] classes = classes.groupby(['Pclass'])['Pclass'].count() sns.barplot(x=[1, 2, 3], y=classes)
code
2044577/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import tree from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) train_y = dataset['Survived'] train_x = dataset.drop('Survived', axis=1) clf = GaussianNB() clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) from sklearn import tree clf = tree.DecisionTreeClassifier() clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) print('Decision Tree Classifier :\n') print('Accuracy: %0.2f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
code
2044577/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.naive_bayes import GaussianNB from sklearn.metrics import classification_report from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn.model_selection import cross_val_score import seaborn as sns
code
2044577/cell_8
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) survived = dataset[dataset['Survived'] == 1] survived_males = survived[survived['Sex'] == 0] survived_females = survived[survived['Sex'] == 1] print('Out of Total Passengers ', len(dataset)) print('Males Survived :', len(survived_males)) print('Females Survived :', len(survived_females)) sns.set_style('whitegrid') sns.barplot(x=['Males', 'Females'], y=[len(survived_males), len(survived_females)])
code
2044577/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') dataset.head() actual_data = dataset actual_data
code
2044577/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) train_y = dataset['Survived'] train_x = dataset.drop('Survived', axis=1) clf = GaussianNB() clf.fit(train_x, train_y) scores = cross_val_score(clf, train_x, train_y, cv=5) print('Naive Bayes Classifier :\n') print('Accuracy: %0.2f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
code
2044577/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/train.csv') actual_data = dataset actual_data dataset.dtypes def clean_data(dataset): data = dataset.dropna() data = data.drop('PassengerId', axis=1) data = data.drop('Name', axis=1) data = data.drop('Ticket', axis=1) return data def encode_categorical_feautures(dataset): for coloumns in dataset.columns: enc = LabelEncoder() enc.fit(dataset[coloumns]) dataset[coloumns] = enc.fit_transform(dataset[coloumns]) return dataset dataset = clean_data(dataset) dataset = encode_categorical_feautures(dataset) dataset
code
129035264/cell_13
[ "text_plain_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum() women = train.loc[train.Sex == 'female']['Survived'] women_sur_rate = sum(women) / len(women) men = train.loc[train.Sex == 'male']['Survived'] men_sur_rate = sum(men) / len(men) import missingno as msno msno.matrix(train)
code
129035264/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.shape test.info()
code
129035264/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.head()
code
129035264/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape
code
129035264/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.shape test.duplicated().sum()
code
129035264/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129035264/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.shape
code
129035264/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.info()
code
129035264/cell_15
[ "text_plain_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum() women = train.loc[train.Sex == 'female']['Survived'] women_sur_rate = sum(women) / len(women) men = train.loc[train.Sex == 'male']['Survived'] men_sur_rate = sum(men) / len(men) import missingno as msno msno.matrix(train) train.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1, inplace=True) msno.matrix(train) train.Age.fillna(train.Age.mean(), inplace=True) msno.matrix(train)
code