path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88092182/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/train.csv', index_col=0) test = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/test.csv', index_col=0) submission = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2022/sample_submission.csv', index_col=0) cols_with_missing_train = [col for col in train.columns if train[col].isnull().any()] cols_with_missing_test = [col for col in test.columns if test[col].isnull().any()] print('train data missing vlue is : ', cols_with_missing_train) print('test data missing vlue is : ', cols_with_missing_test) print('train data shape is :', train.shape) print('test data shape is :', test.shape)
code
74042868/cell_4
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from skimage import data, io, filters NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) HST_OPEN[0].header['RA_TARG']
code
74042868/cell_8
[ "image_output_2.png", "image_output_1.png" ]
from astropy.io import fits from scipy.ndimage import gaussian_filter from skimage import data, io, filters import matplotlib.pyplot as plt NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] GAUSS_ZOOMED_X2 = gaussian_filter(ZOOMED_X3_SCALE_HST, sigma=7) GAUSS_NUCLEUS = gaussian_filter(NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(GAUSS_ZOOMED_X2, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(GAUSS_NUCLEUS, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74042868/cell_15
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from scipy.ndimage import gaussian_filter from skimage import data, io, filters import cv2 import matplotlib.pyplot as plt import numpy as np NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] GAUSS_ZOOMED_X2 = gaussian_filter(ZOOMED_X3_SCALE_HST, sigma=7) GAUSS_NUCLEUS = gaussian_filter(NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() M_GAUSS_ZOOMED_X2 = gaussian_filter(M_ZOOMED_X3_SCALE_HST, sigma=7) M_GAUSS_NUCLEUS = gaussian_filter(M_NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() BM_GAUSS_ZOOMED_X2 = gaussian_filter(BM_ZOOMED_X3_SCALE_HST, sigma=5) BM_GAUSS_NUCLEUS = gaussian_filter(BM_NUCLEUS_SCALE_HST, sigma=5) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(BM_GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(BM_GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() Clahe_Func = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(3, 3)) CLAHE_ZOOMED_X2 = Clahe_Func.apply(ZOOMED_X3_SCALE_HST.astype(np.uint8)) CLAHE_NUCLEUS_CLAHE = Clahe_Func.apply(NUCLEUS_SCALE_HST.astype(np.uint8)) for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(CLAHE_ZOOMED_X2, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(CLAHE_NUCLEUS_CLAHE, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74042868/cell_16
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from astropy.io import fits from scipy.ndimage import gaussian_filter from skimage import data, io, filters import cv2 import matplotlib.pyplot as plt import numpy as np NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] GAUSS_ZOOMED_X2 = gaussian_filter(ZOOMED_X3_SCALE_HST, sigma=7) GAUSS_NUCLEUS = gaussian_filter(NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() M_GAUSS_ZOOMED_X2 = gaussian_filter(M_ZOOMED_X3_SCALE_HST, sigma=7) M_GAUSS_NUCLEUS = gaussian_filter(M_NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() BM_GAUSS_ZOOMED_X2 = gaussian_filter(BM_ZOOMED_X3_SCALE_HST, sigma=5) BM_GAUSS_NUCLEUS = gaussian_filter(BM_NUCLEUS_SCALE_HST, sigma=5) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(BM_GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(BM_GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() Clahe_Func = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(3, 3)) CLAHE_ZOOMED_X2 = Clahe_Func.apply(ZOOMED_X3_SCALE_HST.astype(np.uint8)) CLAHE_NUCLEUS_CLAHE = Clahe_Func.apply(NUCLEUS_SCALE_HST.astype(np.uint8)) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(CLAHE_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(CLAHE_NUCLEUS_CLAHE,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() figure = plt.figure(figsize=(20, 5)) powerSpectrum_1, freqenciesFound_1, time_1, imageAxis_1 = plt.specgram(ZOOMED_X3_SCALE_HST.flatten()) plt.axis('off') plt.title('ZOOMED') plt.tight_layout() plt.show() figure = plt.figure(figsize=(20, 5)) powerSpectrum_1, freqenciesFound_1, time_1, imageAxis_1 = plt.specgram(NUCLEUS_SCALE_HST.flatten()) plt.axis('off') plt.title('NUCLEUS') plt.tight_layout() plt.show()
code
74042868/cell_10
[ "text_plain_output_1.png" ]
from astropy.io import fits from scipy.ndimage import gaussian_filter from skimage import data, io, filters import matplotlib.pyplot as plt NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] GAUSS_ZOOMED_X2 = gaussian_filter(ZOOMED_X3_SCALE_HST, sigma=7) GAUSS_NUCLEUS = gaussian_filter(NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() M_GAUSS_ZOOMED_X2 = gaussian_filter(M_ZOOMED_X3_SCALE_HST, sigma=7) M_GAUSS_NUCLEUS = gaussian_filter(M_NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(M_GAUSS_ZOOMED_X2, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(M_GAUSS_NUCLEUS, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74042868/cell_12
[ "text_plain_output_1.png" ]
from astropy.io import fits from scipy.ndimage import gaussian_filter from skimage import data, io, filters import matplotlib.pyplot as plt NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) SPECTRAL_LIST = ['gray', 'jet', 'hot', 'prism', 'nipy_spectral', 'gist_ncar', 'gist_earth', 'gist_stern', 'flag', 'gnuplot2', 'terrain'] GAUSS_ZOOMED_X2 = gaussian_filter(ZOOMED_X3_SCALE_HST, sigma=7) GAUSS_NUCLEUS = gaussian_filter(NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() M_GAUSS_ZOOMED_X2 = gaussian_filter(M_ZOOMED_X3_SCALE_HST, sigma=7) M_GAUSS_NUCLEUS = gaussian_filter(M_NUCLEUS_SCALE_HST, sigma=7) for x_spec in SPECTRAL_LIST: figure,axis = plt.subplots(1,2,figsize=(20,20)) axis[0].imshow(M_GAUSS_ZOOMED_X2,cmap=x_spec) axis[0].set_title("ZOOMED" + " / "+ x_spec) axis[0].axis("off") DENSITY_FUNC = axis[1].imshow(M_GAUSS_NUCLEUS,cmap=x_spec) axis[1].set_title("NUCLEUS" + " / "+ x_spec) axis[1].axis("off") figure.colorbar(DENSITY_FUNC,shrink=0.3,label="DENSITY",location="right",extend="max") plt.tight_layout() plt.show() BM_GAUSS_ZOOMED_X2 = gaussian_filter(BM_ZOOMED_X3_SCALE_HST, sigma=5) BM_GAUSS_NUCLEUS = gaussian_filter(BM_NUCLEUS_SCALE_HST, sigma=5) for x_spec in SPECTRAL_LIST: figure, axis = plt.subplots(1, 2, figsize=(20, 20)) axis[0].imshow(BM_GAUSS_ZOOMED_X2, cmap=x_spec) axis[0].set_title('ZOOMED' + ' / ' + x_spec) axis[0].axis('off') DENSITY_FUNC = axis[1].imshow(BM_GAUSS_NUCLEUS, cmap=x_spec) axis[1].set_title('NUCLEUS' + ' / ' + x_spec) axis[1].axis('off') figure.colorbar(DENSITY_FUNC, shrink=0.3, label='DENSITY', location='right', extend='max') plt.tight_layout() plt.show()
code
74042868/cell_5
[ "image_output_2.png", "image_output_1.png" ]
from astropy.io import fits from skimage import data, io, filters NEAR_INFRARED_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/NEAR_INFRARED/n4k48nbsq_cal.fits' HST_OPTICAL_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/OPTICAL/HST/idk404050/idk404050_drc.fits' XMM_NEWTON_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_NEWTON_Soft_Xray/P0200670301EPX0003COLIM8000.FTZ' XMM_OM_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/XMM_OM_Optical/P0200670301OMX000LSIMAGB000.FTZ' ISO_PATH = '../input/center-of-all-observable-galaxiesfits-allesa/GALAXIES_CENTER/NGC6946/ISO/csp3390040401.fits' NI_OPEN = fits.open(NEAR_INFRARED_PATH) HST_OPEN = fits.open(HST_OPTICAL_PATH) XMM_NEWTON_OPEN = fits.open(XMM_NEWTON_PATH) XMM_OM_OPEN = fits.open(XMM_OM_PATH) ISO_OPEN = fits.open(ISO_PATH) HST_SCI = HST_OPEN[1].data ZOOMED_X3_SCALE_HST = HST_SCI[2450:2850, 2300:2700] NUCLEUS_SCALE_HST = HST_SCI[2640:2850, 2330:2650] SATO_ZOOMED_X3_SCALE_HST = filters.sato(ZOOMED_X3_SCALE_HST) SATO_NUCLEUS_SCALE_HST = filters.sato(NUCLEUS_SCALE_HST) M_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST) M_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST) BM_ZOOMED_X3_SCALE_HST = filters.meijering(ZOOMED_X3_SCALE_HST, black_ridges=False) BM_NUCLEUS_SCALE_HST = filters.meijering(NUCLEUS_SCALE_HST, black_ridges=False) HST_OPEN[0].header['DEC_TARG']
code
18159197/cell_21
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.svm import SVC model_linear = SVC(kernel='linear') model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) print('Precision Score :: ', metrics.precision_score(y_test, y_pred, pos_label='positive', average='micro'), '\n') print('Recall Score :: ', metrics.recall_score(y_test, y_pred, pos_label='positive', average='micro'), '\n')
code
18159197/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.shape df.dtypes round(df.isnull().sum() / len(df.index)) df.describe(percentiles=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 1])
code
18159197/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.shape
code
18159197/cell_30
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.svm import SVC import pandas as pd df = pd.read_csv('train.csv') folds = KFold(n_splits=5, shuffle=True, random_state=101) hyper_params = [{'gamma': [0.01, 0.001, 0.0001], 'C': [1, 10, 100, 1000]}] model = SVC(kernel='rbf') model_cv = GridSearchCV(estimator=model, param_grid=hyper_params, scoring='accuracy', cv=folds, verbose=1, return_train_score=True) model_cv.fit(X_train, y_train) cv_results = pd.DataFrame(model_cv.cv_results_) cv_results best_score = model_cv.best_score_ best_hyperparams = model_cv.best_params_ print('The best test score is {0} corresponding to hyperparameters {1}'.format(best_score, best_hyperparams))
code
18159197/cell_20
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.svm import SVC model_linear = SVC(kernel='linear') model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) print('accuracy:', metrics.accuracy_score(y_true=y_test, y_pred=y_pred), '\n') print(metrics.confusion_matrix(y_true=y_test, y_pred=y_pred))
code
18159197/cell_26
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.svm import SVC folds = KFold(n_splits=5, shuffle=True, random_state=101) hyper_params = [{'gamma': [0.01, 0.001, 0.0001], 'C': [1, 10, 100, 1000]}] model = SVC(kernel='rbf') model_cv = GridSearchCV(estimator=model, param_grid=hyper_params, scoring='accuracy', cv=folds, verbose=1, return_train_score=True) model_cv.fit(X_train, y_train)
code
18159197/cell_2
[ "text_plain_output_1.png" ]
import os import os os.getcwd()
code
18159197/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.shape df.dtypes
code
18159197/cell_28
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('train.csv') folds = KFold(n_splits=5, shuffle=True, random_state=101) hyper_params = [{'gamma': [0.01, 0.001, 0.0001], 'C': [1, 10, 100, 1000]}] model = SVC(kernel='rbf') model_cv = GridSearchCV(estimator=model, param_grid=hyper_params, scoring='accuracy', cv=folds, verbose=1, return_train_score=True) model_cv.fit(X_train, y_train) cv_results = pd.DataFrame(model_cv.cv_results_) cv_results cv_results['param_C'] = cv_results['param_C'].astype('int') plt.figure(figsize=(16, 6)) plt.subplot(131) gamma_01 = cv_results[cv_results['param_gamma'] == 0.01] plt.plot(gamma_01['param_C'], gamma_01['mean_test_score']) plt.plot(gamma_01['param_C'], gamma_01['mean_train_score']) plt.xlabel('C') plt.ylabel('Accuracy') plt.title('Gamma=0.01') plt.ylim([0.6, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='upper left') plt.xscale('log') plt.subplot(132) gamma_001 = cv_results[cv_results['param_gamma'] == 0.001] plt.plot(gamma_001['param_C'], gamma_001['mean_test_score']) plt.plot(gamma_001['param_C'], gamma_001['mean_train_score']) plt.xlabel('C') plt.ylabel('Accuracy') plt.title('Gamma=0.001') plt.ylim([0.6, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='upper left') plt.xscale('log') plt.subplot(133) gamma_0001 = cv_results[cv_results['param_gamma'] == 0.0001] plt.plot(gamma_0001['param_C'], gamma_0001['mean_test_score']) plt.plot(gamma_0001['param_C'], gamma_0001['mean_train_score']) plt.xlabel('C') plt.ylabel('Accuracy') plt.title('Gamma=0.0001') plt.ylim([0.6, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='upper left') plt.xscale('log')
code
18159197/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.head()
code
18159197/cell_31
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.svm import SVC model_linear = SVC(kernel='linear') model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) from sklearn.metrics import classification_report non_linear_model = SVC(kernel='rbf') non_linear_model.fit(X_train, y_train) y_pred = non_linear_model.predict(X_test) from sklearn.metrics import classification_report folds = KFold(n_splits=5, shuffle=True, random_state=101) hyper_params = [{'gamma': [0.01, 0.001, 0.0001], 'C': [1, 10, 100, 1000]}] model = SVC(kernel='rbf') model_cv = GridSearchCV(estimator=model, param_grid=hyper_params, scoring='accuracy', cv=folds, verbose=1, return_train_score=True) model_cv.fit(X_train, y_train) model = SVC(C=10, gamma=0.001, kernel='rbf') model.fit(X_train, y_train) y_pred = model.predict(X_test) print('accuracy', metrics.accuracy_score(y_test, y_pred), '\n') print('Precision Score :: ', metrics.precision_score(y_test, y_pred, pos_label='positive', average='micro')) print('Recall Score :: ', metrics.recall_score(y_test, y_pred, pos_label='positive', average='micro')) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) print(metrics.confusion_matrix(y_test, y_pred), '\n')
code
18159197/cell_24
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.svm import SVC model_linear = SVC(kernel='linear') model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) from sklearn.metrics import classification_report non_linear_model = SVC(kernel='rbf') non_linear_model.fit(X_train, y_train) y_pred = non_linear_model.predict(X_test) print('accuracy:', metrics.accuracy_score(y_true=y_test, y_pred=y_pred), '\n') print(metrics.confusion_matrix(y_true=y_test, y_pred=y_pred)) print('Precision Score :: ', metrics.precision_score(y_test, y_pred, pos_label='positive', average='micro'), '\n') print('Recall Score :: ', metrics.recall_score(y_test, y_pred, pos_label='positive', average='micro'), '\n') from sklearn.metrics import classification_report print(classification_report(y_test, y_pred))
code
18159197/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.shape df.info()
code
18159197/cell_27
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.svm import SVC import pandas as pd df = pd.read_csv('train.csv') folds = KFold(n_splits=5, shuffle=True, random_state=101) hyper_params = [{'gamma': [0.01, 0.001, 0.0001], 'C': [1, 10, 100, 1000]}] model = SVC(kernel='rbf') model_cv = GridSearchCV(estimator=model, param_grid=hyper_params, scoring='accuracy', cv=folds, verbose=1, return_train_score=True) model_cv.fit(X_train, y_train) cv_results = pd.DataFrame(model_cv.cv_results_) cv_results
code
18159197/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('train.csv') df.shape df.dtypes round(df.isnull().sum() / len(df.index))
code
18159197/cell_5
[ "image_output_1.png" ]
import os import os os.getcwd() os.chdir('/kaggle') os.chdir('input') os.listdir()
code
121153678/cell_25
[ "text_html_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum()
code
121153678/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts() nameColumn = music_update.pop('name') artistColumn = music_update.pop('artists') music_update.insert(0, 'name', nameColumn) music_update.insert(1, 'artist', artistColumn) music_update.drop('release_date', axis=1)
code
121153678/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.head(1)
code
121153678/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts()
code
121153678/cell_26
[ "text_html_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum()
code
121153678/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts() nameColumn = music_update.pop('name') artistColumn = music_update.pop('artists') music_update.insert(0, 'name', nameColumn) music_update.insert(1, 'artist', artistColumn) music_update.drop('release_date', axis=1) music_updated = music_update Trends = music_updated[['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence', 'year']] sound_features = ['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence'] def get_decade(year): period = int(year / 10) * 10 decade = '{}s'.format(period) return decade Trends['decade'] = Trends['year'].apply(get_decade) Trends_updated = Trends.groupby('decade')[sound_features].mean() Trends_updated
code
121153678/cell_11
[ "text_plain_output_1.png" ]
from deepface import DeepFace import cv2 import matplotlib.pyplot as plt index = 0 def emotions(image): img = cv2.imread(image) plt.imshow(img[:, :, ::-1]) demography = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False, detector_backend='retinaface') return demography emotion = emotions(image='/kaggle/input/newpic/2Q__ (2)_face.png') dominant_emotion = emotion[index]['dominant_emotion'] print(emotion) print('Dominant_emotion:', dominant_emotion)
code
121153678/cell_50
[ "text_plain_output_1.png", "image_output_1.png" ]
from deepface import DeepFace import cv2 import matplotlib.pyplot as plt import pandas as pd import seaborn as sns index = 0 def emotions(image): img = cv2.imread(image) demography = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False, detector_backend='retinaface') return demography emotion = emotions(image='/kaggle/input/newpic/2Q__ (2)_face.png') dominant_emotion = emotion[index]['dominant_emotion'] music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts() nameColumn = music_update.pop('name') artistColumn = music_update.pop('artists') music_update.insert(0, 'name', nameColumn) music_update.insert(1, 'artist', artistColumn) music_update.drop('release_date', axis=1) music_updated = music_update Trends = music_updated[['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence', 'year']] sound_features = ['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence'] def get_decade(year): period = int(year / 10) * 10 decade = '{}s'.format(period) return decade Trends['decade'] = Trends['year'].apply(get_decade) Trends_updated = Trends.groupby('decade')[sound_features].mean() Trends_updated plt.figure(figsize=(12, 8)) sns.heatmap(Trends_updated.corr(), annot=True, square=True)
code
121153678/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
!pip install Deepface
code
121153678/cell_18
[ "text_html_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update.head(1)
code
121153678/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from deepface import DeepFace import cv2 from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from mlxtend.preprocessing import minmax_scaling from sklearn.model_selection import train_test_split from lightgbm import LGBMClassifier from sklearn.metrics.pairwise import cosine_similarity from scipy.spatial.distance import cosine, euclidean, hamming from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import StandardScaler from sklearn.cluster import DBSCAN import lightgbm import random
code
121153678/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.head(2)
code
121153678/cell_16
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_dataset.info()
code
121153678/cell_47
[ "text_html_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts() nameColumn = music_update.pop('name') artistColumn = music_update.pop('artists') music_update.insert(0, 'name', nameColumn) music_update.insert(1, 'artist', artistColumn) music_update.drop('release_date', axis=1) music_updated = music_update Trends = music_updated[['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence', 'year']] sound_features = ['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence'] def get_decade(year): period = int(year / 10) * 10 decade = '{}s'.format(period) return decade Trends['decade'] = Trends['year'].apply(get_decade) Trends_updated = Trends.groupby('decade')[sound_features].mean() Trends_updated Trend_ms = music_updated[['year', 'duration_ms']] Trend_ms['decade'] = Trend_ms['year'].apply(get_decade) Trend_ms = Trend_ms.groupby('decade')['duration_ms'].mean() Trend_ms.plot(legend=True, figsize=(15, 7))
code
121153678/cell_43
[ "text_html_output_1.png" ]
import pandas as pd music_dataset = pd.read_csv('/kaggle/input/kaggledataupdated/KaggleData_updated.csv') music_dataset.shape music_update = music_dataset.set_index('id') music_update['artists'] = music_update['artists'].str.strip("[]'") music_update.isna().sum() music_update.duplicated().sum() music_update = music_update.drop_duplicates() music_update.duplicated().value_counts() nameColumn = music_update.pop('name') artistColumn = music_update.pop('artists') music_update.insert(0, 'name', nameColumn) music_update.insert(1, 'artist', artistColumn) music_update.drop('release_date', axis=1) music_updated = music_update Trends = music_updated[['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence', 'year']] sound_features = ['acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'valence'] def get_decade(year): period = int(year / 10) * 10 decade = '{}s'.format(period) return decade Trends['decade'] = Trends['year'].apply(get_decade) Trends_updated = Trends.groupby('decade')[sound_features].mean() Trends_updated Trends_updated.plot(legend=True, figsize=(15, 7))
code
49116852/cell_10
[ "text_html_output_1.png" ]
from keras.callbacks import ModelCheckpoint,EarlyStopping from keras.models import load_model from sklearn.linear_model import LinearRegression, Ridge from tensorflow.keras import layers from tensorflow.keras import metrics from tensorflow.keras.layers.experimental import preprocessing from tensorflow_addons.layers import WeightNormalization import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle import tensorflow as tf import numpy as np import pandas as pd import os Train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') Train = Train.set_index('Id') Test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') Test = Test.set_index('Id') preprocessing_path = '../input/house-price-eda' train_scale = pd.read_csv(f'{preprocessing_path}/train_scaled.csv') train_scale = train_scale.set_index('Id') test_scale = pd.read_csv(f'{preprocessing_path}/test_scaled.csv') try: test_scale = test_scale.rename(columns={'Unnamed: 0': 'Id'}) except: pass test_scale = test_scale.set_index('Id') Train = Train.loc[train_scale.index] import glob listing = glob.glob(f'{preprocessing_path}/*_one_hot_pickle') import pickle for model_file in listing: col = model_file.split('_one_')[0] col = col.split('/')[-1] enc = pickle.load(open(model_file, 'rb')) new_cols = pd.DataFrame(enc.transform(Train[[col]]).toarray(), columns=f'{col}_' + enc.categories_[0]) new_cols.index = Train.index Train = pd.concat([Train, new_cols], axis=1) Train = Train.drop(col, axis=1) for log_col in train_scale.columns[train_scale.columns.str.endswith('log')]: col = log_col.split('_log')[0] Train[log_col] = np.log(Train[col]) Train = Train.drop(col, axis=1) Train = Train[train_scale.columns] for model_file in listing: col = model_file.split('_one_')[0] col = col.split('/')[-1] enc = pickle.load(open(model_file, 'rb')) new_cols = pd.DataFrame(enc.transform(Test[[col]]).toarray(), columns=f'{col}_' + enc.categories_[0]) new_cols.index = Test.index Test = pd.concat([Test, new_cols], axis=1) Test = Test.drop(col, axis=1) for log_col in test_scale.columns[test_scale.columns.str.endswith('log')]: col = log_col.split('_log')[0] Test[log_col] = np.log(Test[col]) Test = Test.drop(col, axis=1) Test = Test[test_scale.columns] for col in Test.columns: if len(set(Train[col])) > 2 and '_log' not in col: Train[f'{col}_log'] = np.log(Train[col] + 1) Train = Train.drop(col, axis=1) Test[f'{col}_log'] = np.log(Test[col] + 1) Test = Test.drop(col, axis=1) from sklearn.linear_model import LinearRegression, Ridge import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing from tensorflow_addons.layers import WeightNormalization from keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras import regularizers from tensorflow.keras import metrics model = 'linear' X_Train = Train.drop('SalePrice_log', axis=1) Y_Train = Train[['SalePrice_log']] def NN_model(): horsepower = np.array(X_Train) horsepower_normalizer = preprocessing.Normalization(input_shape=[X_Train.shape[1]]) horsepower_normalizer.adapt(horsepower) horsepower_model = tf.keras.Sequential([horsepower_normalizer, WeightNormalization(layers.Dense(units=8, activation='linear')), layers.BatchNormalization(), layers.Dense(units=1, activation='linear')]) horsepower_model.summary() METRICS = [metrics.MeanSquaredError()] horsepower_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1), loss='mse', metrics=METRICS) return horsepower_model if model == 'linear': reg = Ridge(alpha=1.0).fit(Train.drop('SalePrice_log', axis=1), Train[['SalePrice_log']]) predict = reg.predict(Test) elif model == 'NN': nn_model = NN_model() filepath = 'best_weights.hdf5' Monitor = 'loss' checkpoint = ModelCheckpoint(filepath, monitor=Monitor, verbose=1, save_best_only=True, mode='min') nn_model.fit(X_Train, Y_Train, epochs=1000, callbacks=[checkpoint]) from keras.models import load_model nn_model = load_model('best_weights.hdf5') predict = nn_model.predict(Test) predict = pd.DataFrame(predict) predict.index = Test.index predict.columns = ['SalePrice_log'] predict['SalePrice'] = np.e ** predict['SalePrice_log'] predict = predict.drop('SalePrice_log', axis=1) predict
code
49116852/cell_5
[ "text_html_output_1.png" ]
import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle import numpy as np import pandas as pd import os Train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') Train = Train.set_index('Id') Test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') Test = Test.set_index('Id') preprocessing_path = '../input/house-price-eda' train_scale = pd.read_csv(f'{preprocessing_path}/train_scaled.csv') train_scale = train_scale.set_index('Id') test_scale = pd.read_csv(f'{preprocessing_path}/test_scaled.csv') try: test_scale = test_scale.rename(columns={'Unnamed: 0': 'Id'}) except: pass test_scale = test_scale.set_index('Id') Train = Train.loc[train_scale.index] import glob listing = glob.glob(f'{preprocessing_path}/*_one_hot_pickle') import pickle for model_file in listing: col = model_file.split('_one_')[0] col = col.split('/')[-1] enc = pickle.load(open(model_file, 'rb')) new_cols = pd.DataFrame(enc.transform(Train[[col]]).toarray(), columns=f'{col}_' + enc.categories_[0]) new_cols.index = Train.index Train = pd.concat([Train, new_cols], axis=1) Train = Train.drop(col, axis=1) for log_col in train_scale.columns[train_scale.columns.str.endswith('log')]: col = log_col.split('_log')[0] Train[log_col] = np.log(Train[col]) Train = Train.drop(col, axis=1) Train = Train[train_scale.columns] for model_file in listing: col = model_file.split('_one_')[0] col = col.split('/')[-1] enc = pickle.load(open(model_file, 'rb')) new_cols = pd.DataFrame(enc.transform(Test[[col]]).toarray(), columns=f'{col}_' + enc.categories_[0]) new_cols.index = Test.index Test = pd.concat([Test, new_cols], axis=1) Test = Test.drop(col, axis=1) for log_col in test_scale.columns[test_scale.columns.str.endswith('log')]: col = log_col.split('_log')[0] Test[log_col] = np.log(Test[col]) Test = Test.drop(col, axis=1) Test = Test[test_scale.columns] for col in Test.columns: if len(set(Train[col])) > 2 and '_log' not in col: Train[f'{col}_log'] = np.log(Train[col] + 1) Train = Train.drop(col, axis=1) Test[f'{col}_log'] = np.log(Test[col] + 1) Test = Test.drop(col, axis=1) Test
code
130008207/cell_21
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8')
code
130008207/cell_13
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE')
code
130008207/cell_9
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars
code
130008207/cell_25
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8') thanks = u'Thanks 😊'.encode('UTF-8') num_bytes = tf.strings.length(thanks).numpy() num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy() tf.strings.substr(thanks, pos=7, len=1).numpy()
code
130008207/cell_4
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊')
code
130008207/cell_23
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8') thanks = u'Thanks 😊'.encode('UTF-8') num_bytes = tf.strings.length(thanks).numpy() num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy() print('{} bytes; {} UTF-8 characters'.format(num_bytes, num_chars))
code
130008207/cell_30
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8') thanks = u'Thanks 😊'.encode('UTF-8') num_bytes = tf.strings.length(thanks).numpy() num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy() tf.strings.substr(thanks, pos=7, len=1).numpy() tf.strings.unicode_split(thanks, 'UTF-8').numpy() codepoints, offsets = tf.strings.unicode_decode_with_offsets(u'🎈🎉🎊', 'UTF-8') for codepoint, offset in zip(codepoints.numpy(), offsets.numpy()): print('At byte offset {}: codepoint {}'.format(offset, codepoint))
code
130008207/cell_20
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8')
code
130008207/cell_26
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8') thanks = u'Thanks 😊'.encode('UTF-8') num_bytes = tf.strings.length(thanks).numpy() num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy() tf.strings.substr(thanks, pos=7, len=1).numpy() print(tf.strings.substr(thanks, pos=7, len=1, unit='UTF8_CHAR').numpy())
code
130008207/cell_2
[ "text_plain_output_1.png" ]
import tensorflow as tf import numpy as np
code
130008207/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8')
code
130008207/cell_19
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8')
code
130008207/cell_7
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8
code
130008207/cell_18
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8')
code
130008207/cell_28
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]], output_encoding='UTF-8') tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_sparse(batch_chars_sparse), output_encoding='UTF-8') tf.strings.unicode_encode(tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1), output_encoding='UTF-8') thanks = u'Thanks 😊'.encode('UTF-8') num_bytes = tf.strings.length(thanks).numpy() num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy() tf.strings.substr(thanks, pos=7, len=1).numpy() tf.strings.unicode_split(thanks, 'UTF-8').numpy()
code
130008207/cell_8
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be
code
130008207/cell_15
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') for sentence_chars in batch_chars_ragged.to_list(): print(sentence_chars)
code
130008207/cell_16
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) print(batch_chars_padded.numpy())
code
130008207/cell_17
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8') tf.strings.unicode_transcode(text_utf8, input_encoding='UTF8', output_encoding='UTF-16-BE') batch_utf8 = [s.encode('UTF-8') for s in [u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']] batch_chars_ragged = tf.strings.unicode_decode(batch_utf8, input_encoding='UTF-8') batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1) batch_chars_sparse = batch_chars_ragged.to_sparse() nrows, ncols = batch_chars_sparse.dense_shape.numpy() elements = [['_' for i in range(ncols)] for j in range(nrows)] for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()): elements[row][col] = str(value) value_lengths = [] for row in elements: for value in row: value_lengths.append(len(value)) max_width = max(value_lengths) print('[%s]' % '\n '.join(('[%s]' % ', '.join((value.rjust(max_width) for value in row)) for row in elements)))
code
130008207/cell_12
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape text_utf8 = tf.constant(u'语言处理') text_utf8 text_utf16be = tf.constant(u'语言处理'.encode('UTF-16-BE')) text_utf16be text_chars = tf.constant([ord(char) for char in u'语言处理']) text_chars tf.strings.unicode_decode(text_utf8, input_encoding='UTF-8') tf.strings.unicode_encode(text_chars, output_encoding='UTF-8')
code
130008207/cell_5
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.constant(u'Thanks 😊') tf.constant([u"You're", u'welcome!']).shape
code
18100689/cell_13
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') image_array = np.asfarray(train_set.iloc[3, 1:]).reshape((28, 28)) X_train = train_set.iloc[:, 1:].values y_train = train_set.label.values from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(criterion='entropy', n_estimators=10, max_depth=3, random_state=0) classifier.fit(X_train, y_train) classifier.score(X_train, y_train) pred = classifier.predict(test_set) pred
code
18100689/cell_9
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') image_array = np.asfarray(train_set.iloc[3, 1:]).reshape((28, 28)) X_train = train_set.iloc[:, 1:].values y_train = train_set.label.values from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(criterion='entropy', n_estimators=10, max_depth=3, random_state=0) classifier.fit(X_train, y_train)
code
18100689/cell_15
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') image_array = np.asfarray(train_set.iloc[3, 1:]).reshape((28, 28)) X_train = train_set.iloc[:, 1:].values y_train = train_set.label.values from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(criterion='entropy', n_estimators=10, max_depth=3, random_state=0) classifier.fit(X_train, y_train) classifier.score(X_train, y_train) pred = classifier.predict(test_set) pred sub_lines = [] for i in range(0, len(pred)): sub_lines.append([i + 1, pred[i]]) submission = pd.DataFrame(sub_lines, columns=['ImageId', 'Label']) submission.to_csv('submission.csv', index=False) submission.head()
code
18100689/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') train_set.head()
code
18100689/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') image_array = np.asfarray(train_set.iloc[3, 1:]).reshape((28, 28)) X_train = train_set.iloc[:, 1:].values y_train = train_set.label.values from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(criterion='entropy', n_estimators=10, max_depth=3, random_state=0) classifier.fit(X_train, y_train) classifier.score(X_train, y_train)
code
18100689/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') test_set.head()
code
18100689/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_set = pd.read_csv('../input/train.csv') test_set = pd.read_csv('../input/test.csv') print(train_set.iloc[3, 0]) image_array = np.asfarray(train_set.iloc[3, 1:]).reshape((28, 28)) plt.imshow(image_array, cmap='Greys', interpolation='None')
code
90109598/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() sb.heatmap(cor, xticklabels=cor.columns, yticklabels=cor.columns, annot=True)
code
90109598/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.info()
code
90109598/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.nunique()
code
90109598/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum()
code
90109598/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.head()
code
90109598/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sb data = pd.read_csv('../input/insurance/insurance.csv') data.nunique() data.isnull().sum() data.corr() cor = data.corr() sb.pairplot(data)
code
90109598/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/insurance/insurance.csv') data.describe()
code
328194/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import trueskill as ts dfResults = pd.read_csv('../input/201608-SanFracisco-HydrofoilProTour.csv') def doRating(numRaces, dfResults): for raceCol in range(1, numRaces + 1): dfResults['Rating'] = ts.rate(list(zip(dfResults['Rating'].T.values.tolist())), ranks=dfResults['R' + raceCol].T.values.tolist()) dfResults = doRating(16, dfResults) dfResults['Rating']
code
328194/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import trueskill as ts dfResults = pd.read_csv('../input/201608-SanFracisco-HydrofoilProTour.csv') def doRating(numRaces, dfResults): for raceCol in range(1, numRaces + 1): dfResults['Rating'] = ts.rate(list(zip(dfResults['Rating'].T.values.tolist())), ranks=dfResults['R' + raceCol].T.values.tolist()) dfResults = doRating(16, dfResults)
code
328194/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import trueskill as ts dfResults = pd.read_csv('../input/201608-SanFracisco-HydrofoilProTour.csv') def doRating(numRaces, dfResults): for raceCol in range(1, numRaces + 1): dfResults['Rating'] = ts.rate(list(zip(dfResults['Rating'].T.values.tolist())), ranks=dfResults['R' + raceCol].T.values.tolist()) dfResults = doRating(16, dfResults) dfResults['Rating'] = ts.Rating() ts.rate([ts.Rating(), ts.Rating()], ranks=[1, 0])
code
328194/cell_12
[ "text_plain_output_1.png" ]
r1
code
33104348/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_df.describe() titanic_df.head()
code
33104348/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33104348/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') plt.rcParams['figure.figsize'] = (15, 10) fig, axes = plt.subplots(nrows=2, ncols=2) ax0, ax1, ax2, ax3 = axes.flatten() lived = titanic_df.loc[titanic_df['Survived'] == 1, 'Age'] died = titanic_df.loc[titanic_df['Survived'] == 0, 'Age'] ax0.hist([lived, died], stacked=True) ax0.legend(['Lived', 'Died']) ax0.set_title('Age') lived = titanic_df.loc[titanic_df['Survived'] == 1, 'Pclass'] died = titanic_df.loc[titanic_df['Survived'] == 0, 'Pclass'] ax1.hist([lived, died], stacked=True) ax1.legend(['Lived', 'Died']) ax1.set_title('Class (1 = Highest)') lived = titanic_df.loc[titanic_df['Survived'] == 1, 'Fare'] died = titanic_df.loc[titanic_df['Survived'] == 0, 'Fare'] ax2.hist([lived, died], stacked=True) ax2.legend(['Lived', 'Died']) ax2.set_title('Fare') lived = titanic_df.loc[titanic_df['Survived'] == 1, 'Sex'] died = titanic_df.loc[titanic_df['Survived'] == 0, 'Sex'] ax3.hist([lived, died], stacked=True) ax3.legend(['Lived', 'Died']) ax3.set_title('Gender')
code
17113309/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.head()
code
17113309/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras import layers from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.layers import AveragePooling2D, MaxPooling2D, Dropout from keras.models import Model from keras.preprocessing.image import ImageDataGenerator
code
17113309/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x_train = train_data.drop(labels='label', axis=1) x_train = x_train / 255 test_data = test_data / 255 X_train = x_train.values.reshape(x_train.shape[0], 28, 28, 1) X_test = test_data.values.reshape(test_data.shape[0], 28, 28, 1) print(X_train.shape) print(X_test.shape)
code
17113309/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model from keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x_train = train_data.drop(labels='label', axis=1) x_train = x_train / 255 test_data = test_data / 255 X_train = x_train.values.reshape(x_train.shape[0], 28, 28, 1) X_test = test_data.values.reshape(test_data.shape[0], 28, 28, 1) def keras_model(input_shape): X_input = Input(input_shape) X = ZeroPadding2D((3, 3))(X_input) X = Conv2D(48, (5, 5), strides=(1, 1), name='conv0')(X) X = BatchNormalization(axis=3, name='bn0')(X) X = Activation('relu')(X) X = Conv2D(32, (3, 3), strides=(1, 1), name='conv1')(X) X = BatchNormalization(axis=3, name='bn1')(X) X = Activation('relu')(X) X = Conv2D(16, (3, 3), strides=(1, 1), name='conv2')(X) X = BatchNormalization(axis=3, name='bn2')(X) X = Activation('relu')(X) X = MaxPooling2D((2, 2), name='max_pool')(X) X = Flatten()(X) X = Dense(10, activation='sigmoid', name='fc1')(X) model = Model(inputs=X_input, output=X, name='digit_model') return model model = keras_model(X_train_split[0].shape) model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) batch_size = 16 epochs = 15 history = model.fit_generator(datagen.flow(X_train_split, y_train_split, batch_size=batch_size), epochs=epochs, validation_data=(X_test_split, y_test_split), verbose=2, steps_per_epoch=X_train_split.shape[0] // batch_size) preds = model.evaluate(x=X_test_split, y=y_test_split) print('Loss=', preds[0]) print('Accuracy=', preds[1])
code
17113309/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17113309/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x_train = train_data.drop(labels='label', axis=1) print('number of training examples', x_train.shape[0]) print('number of test examples', test_data.shape[0]) print('number of pixels', x_train.shape[1]) print('number of rows and columns', int(np.sqrt(x_train.shape[1])))
code
17113309/cell_15
[ "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model def keras_model(input_shape): X_input = Input(input_shape) X = ZeroPadding2D((3, 3))(X_input) X = Conv2D(48, (5, 5), strides=(1, 1), name='conv0')(X) X = BatchNormalization(axis=3, name='bn0')(X) X = Activation('relu')(X) X = Conv2D(32, (3, 3), strides=(1, 1), name='conv1')(X) X = BatchNormalization(axis=3, name='bn1')(X) X = Activation('relu')(X) X = Conv2D(16, (3, 3), strides=(1, 1), name='conv2')(X) X = BatchNormalization(axis=3, name='bn2')(X) X = Activation('relu')(X) X = MaxPooling2D((2, 2), name='max_pool')(X) X = Flatten()(X) X = Dense(10, activation='sigmoid', name='fc1')(X) model = Model(inputs=X_input, output=X, name='digit_model') return model model = keras_model(X_train_split[0].shape)
code
17113309/cell_17
[ "text_plain_output_1.png" ]
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D from keras.models import Model from keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') x_train = train_data.drop(labels='label', axis=1) x_train = x_train / 255 test_data = test_data / 255 X_train = x_train.values.reshape(x_train.shape[0], 28, 28, 1) X_test = test_data.values.reshape(test_data.shape[0], 28, 28, 1) def keras_model(input_shape): X_input = Input(input_shape) X = ZeroPadding2D((3, 3))(X_input) X = Conv2D(48, (5, 5), strides=(1, 1), name='conv0')(X) X = BatchNormalization(axis=3, name='bn0')(X) X = Activation('relu')(X) X = Conv2D(32, (3, 3), strides=(1, 1), name='conv1')(X) X = BatchNormalization(axis=3, name='bn1')(X) X = Activation('relu')(X) X = Conv2D(16, (3, 3), strides=(1, 1), name='conv2')(X) X = BatchNormalization(axis=3, name='bn2')(X) X = Activation('relu')(X) X = MaxPooling2D((2, 2), name='max_pool')(X) X = Flatten()(X) X = Dense(10, activation='sigmoid', name='fc1')(X) model = Model(inputs=X_input, output=X, name='digit_model') return model model = keras_model(X_train_split[0].shape) model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) batch_size = 16 epochs = 15 history = model.fit_generator(datagen.flow(X_train_split, y_train_split, batch_size=batch_size), epochs=epochs, validation_data=(X_test_split, y_test_split), verbose=2, steps_per_epoch=X_train_split.shape[0] // batch_size)
code
17113309/cell_12
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') y_train = train_data['label'] from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() Y_train = encoder.fit_transform(y_train.values.reshape(-1, 1)) Y_train = Y_train.toarray()
code
72072145/cell_8
[ "text_plain_output_1.png" ]
from summarizer import Summarizer,TransformerSummarizer body = '\n Scientists say they have discovered a new species of orangutans on Indonesia’s island of Sumatra.\nThe population differs in several ways from the two existing orangutan species found in Sumatra and the neighboring island of Borneo.\nThe orangutans were found inside North Sumatra’s Batang Toru forest, the science publication Current Biology reported.\nResearchers named the new species the Tapanuli orangutan. They say the animals are considered a new species because of genetic, skeletal and tooth differences.\nMichael Kruetzen is a geneticist with the University of Zurich who has studied the orangutans for several years. He said he was excited to be part of the unusual discovery of a new great ape in the present day. He noted that most great apes are currently considered endangered or severely endangered.\nGorillas, chimpanzees and bonobos also belong to the great ape species.\nOrangutan – which means person of the forest in the Indonesian and Malay languages - is the world’s biggest tree-living mammal. The orange-haired animals can move easily among the trees because their arms are longer than their legs. They live more lonely lives than other great apes, spending a lot of time sleeping and eating fruit in the forest.\nThe new study said fewer than 800 of the newly-described orangutans exist. Their low numbers make the group the most endangered of all the great ape species.\nThey live within an area covering about 1,000 square kilometers. The population is considered highly vulnerable. That is because the environment which they depend on is greatly threatened by development.\nResearchers say if steps are not taken quickly to reduce the current and future threats, the new species could become extinct “within our lifetime.”\nResearch into the new species began in 2013, when an orangutan protection group in Sumatra found an injured orangutan in an area far away from the other species. The adult male orangutan had been beaten by local villagers and died of his injuries. The complete skull was examined by researchers.\nAmong the physical differences of the new species are a notably smaller head and frizzier hair. The Tapanuli orangutans also have a different diet and are found only in higher forest areas.\nThere is no unified international system for recognizing new species. But to be considered, discovery claims at least require publication in a major scientific publication.\nRussell Mittermeier is head of the primate specialist group at the International Union for the Conservation of Nature. He called the finding a “remarkable discovery.” He said it puts responsibility on the Indonesian government to help the species survive.\nMatthew Nowak is one of the writers of the study. He told the Associated Press that there are three groups of the Tapanuli orangutans that are separated by non-protected land.He said forest land needs to connect the separated groups.\nIn addition, the writers of the study are recommending that plans for a hydropower center in the area be stopped by the government.\nIt also recommended that remaining forest in the Sumatran area where the orangutans live be protected.\nI’m Bryan Lynn.\n\n ' bert_model = Summarizer() bert_summary = ''.join(bert_model(body, min_length=60)) print(bert_summary)
code
72072145/cell_16
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from gensim.summarization.summarizer import summarize from pysummarization.abstractabledoc.top_n_rank_abstractor import TopNRankAbstractor from pysummarization.nlpbase.auto_abstractor import AutoAbstractor from pysummarization.tokenizabledoc.simple_tokenizer import SimpleTokenizer body = '\n Scientists say they have discovered a new species of orangutans on Indonesia’s island of Sumatra.\nThe population differs in several ways from the two existing orangutan species found in Sumatra and the neighboring island of Borneo.\nThe orangutans were found inside North Sumatra’s Batang Toru forest, the science publication Current Biology reported.\nResearchers named the new species the Tapanuli orangutan. They say the animals are considered a new species because of genetic, skeletal and tooth differences.\nMichael Kruetzen is a geneticist with the University of Zurich who has studied the orangutans for several years. He said he was excited to be part of the unusual discovery of a new great ape in the present day. He noted that most great apes are currently considered endangered or severely endangered.\nGorillas, chimpanzees and bonobos also belong to the great ape species.\nOrangutan – which means person of the forest in the Indonesian and Malay languages - is the world’s biggest tree-living mammal. The orange-haired animals can move easily among the trees because their arms are longer than their legs. They live more lonely lives than other great apes, spending a lot of time sleeping and eating fruit in the forest.\nThe new study said fewer than 800 of the newly-described orangutans exist. Their low numbers make the group the most endangered of all the great ape species.\nThey live within an area covering about 1,000 square kilometers. The population is considered highly vulnerable. That is because the environment which they depend on is greatly threatened by development.\nResearchers say if steps are not taken quickly to reduce the current and future threats, the new species could become extinct “within our lifetime.”\nResearch into the new species began in 2013, when an orangutan protection group in Sumatra found an injured orangutan in an area far away from the other species. The adult male orangutan had been beaten by local villagers and died of his injuries. The complete skull was examined by researchers.\nAmong the physical differences of the new species are a notably smaller head and frizzier hair. The Tapanuli orangutans also have a different diet and are found only in higher forest areas.\nThere is no unified international system for recognizing new species. But to be considered, discovery claims at least require publication in a major scientific publication.\nRussell Mittermeier is head of the primate specialist group at the International Union for the Conservation of Nature. He called the finding a “remarkable discovery.” He said it puts responsibility on the Indonesian government to help the species survive.\nMatthew Nowak is one of the writers of the study. He told the Associated Press that there are three groups of the Tapanuli orangutans that are separated by non-protected land.He said forest land needs to connect the separated groups.\nIn addition, the writers of the study are recommending that plans for a hydropower center in the area be stopped by the government.\nIt also recommended that remaining forest in the Sumatran area where the orangutans live be protected.\nI’m Bryan Lynn.\n\n ' from gensim.summarization.summarizer import summarize from gensim.summarization import keywords def summarize_text(text): try: summarized_text = summarize(text, word_count=150) if summarized_text != '': if summarized_text != text: return summarized_text else: return '' else: return '' except Exception as e: return '' if __name__ == '__main__': sentence = body from pysummarization.nlpbase.auto_abstractor import AutoAbstractor from pysummarization.tokenizabledoc.simple_tokenizer import SimpleTokenizer from pysummarization.abstractabledoc.top_n_rank_abstractor import TopNRankAbstractor auto_abstractor = AutoAbstractor() auto_abstractor.tokenizable_doc = SimpleTokenizer() auto_abstractor.delimiter_list = ['.', '\n'] abstractable_doc = TopNRankAbstractor() result_dict = auto_abstractor.summarize(body, abstractable_doc) for sentence in result_dict['summarize_result']: print(sentence)
code
72072145/cell_3
[ "text_plain_output_1.png" ]
!pip install bert-extractive-summarizer !pip install transformers !pip install spacy !pip install gensim==3.8.0 !pip install pysummarization
code
72072145/cell_14
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from gensim.summarization.summarizer import summarize body = '\n Scientists say they have discovered a new species of orangutans on Indonesia’s island of Sumatra.\nThe population differs in several ways from the two existing orangutan species found in Sumatra and the neighboring island of Borneo.\nThe orangutans were found inside North Sumatra’s Batang Toru forest, the science publication Current Biology reported.\nResearchers named the new species the Tapanuli orangutan. They say the animals are considered a new species because of genetic, skeletal and tooth differences.\nMichael Kruetzen is a geneticist with the University of Zurich who has studied the orangutans for several years. He said he was excited to be part of the unusual discovery of a new great ape in the present day. He noted that most great apes are currently considered endangered or severely endangered.\nGorillas, chimpanzees and bonobos also belong to the great ape species.\nOrangutan – which means person of the forest in the Indonesian and Malay languages - is the world’s biggest tree-living mammal. The orange-haired animals can move easily among the trees because their arms are longer than their legs. They live more lonely lives than other great apes, spending a lot of time sleeping and eating fruit in the forest.\nThe new study said fewer than 800 of the newly-described orangutans exist. Their low numbers make the group the most endangered of all the great ape species.\nThey live within an area covering about 1,000 square kilometers. The population is considered highly vulnerable. That is because the environment which they depend on is greatly threatened by development.\nResearchers say if steps are not taken quickly to reduce the current and future threats, the new species could become extinct “within our lifetime.”\nResearch into the new species began in 2013, when an orangutan protection group in Sumatra found an injured orangutan in an area far away from the other species. The adult male orangutan had been beaten by local villagers and died of his injuries. The complete skull was examined by researchers.\nAmong the physical differences of the new species are a notably smaller head and frizzier hair. The Tapanuli orangutans also have a different diet and are found only in higher forest areas.\nThere is no unified international system for recognizing new species. But to be considered, discovery claims at least require publication in a major scientific publication.\nRussell Mittermeier is head of the primate specialist group at the International Union for the Conservation of Nature. He called the finding a “remarkable discovery.” He said it puts responsibility on the Indonesian government to help the species survive.\nMatthew Nowak is one of the writers of the study. He told the Associated Press that there are three groups of the Tapanuli orangutans that are separated by non-protected land.He said forest land needs to connect the separated groups.\nIn addition, the writers of the study are recommending that plans for a hydropower center in the area be stopped by the government.\nIt also recommended that remaining forest in the Sumatran area where the orangutans live be protected.\nI’m Bryan Lynn.\n\n ' from gensim.summarization.summarizer import summarize from gensim.summarization import keywords def summarize_text(text): try: summarized_text = summarize(text, word_count=150) if summarized_text != '': if summarized_text != text: return summarized_text else: return '' else: return '' except Exception as e: print('Exception In summary-->') print(str(e)) return '' if __name__ == '__main__': sentence = body print(summarize_text(sentence))
code
72072145/cell_10
[ "text_plain_output_1.png" ]
from summarizer import Summarizer,TransformerSummarizer body = '\n Scientists say they have discovered a new species of orangutans on Indonesia’s island of Sumatra.\nThe population differs in several ways from the two existing orangutan species found in Sumatra and the neighboring island of Borneo.\nThe orangutans were found inside North Sumatra’s Batang Toru forest, the science publication Current Biology reported.\nResearchers named the new species the Tapanuli orangutan. They say the animals are considered a new species because of genetic, skeletal and tooth differences.\nMichael Kruetzen is a geneticist with the University of Zurich who has studied the orangutans for several years. He said he was excited to be part of the unusual discovery of a new great ape in the present day. He noted that most great apes are currently considered endangered or severely endangered.\nGorillas, chimpanzees and bonobos also belong to the great ape species.\nOrangutan – which means person of the forest in the Indonesian and Malay languages - is the world’s biggest tree-living mammal. The orange-haired animals can move easily among the trees because their arms are longer than their legs. They live more lonely lives than other great apes, spending a lot of time sleeping and eating fruit in the forest.\nThe new study said fewer than 800 of the newly-described orangutans exist. Their low numbers make the group the most endangered of all the great ape species.\nThey live within an area covering about 1,000 square kilometers. The population is considered highly vulnerable. That is because the environment which they depend on is greatly threatened by development.\nResearchers say if steps are not taken quickly to reduce the current and future threats, the new species could become extinct “within our lifetime.”\nResearch into the new species began in 2013, when an orangutan protection group in Sumatra found an injured orangutan in an area far away from the other species. The adult male orangutan had been beaten by local villagers and died of his injuries. The complete skull was examined by researchers.\nAmong the physical differences of the new species are a notably smaller head and frizzier hair. The Tapanuli orangutans also have a different diet and are found only in higher forest areas.\nThere is no unified international system for recognizing new species. But to be considered, discovery claims at least require publication in a major scientific publication.\nRussell Mittermeier is head of the primate specialist group at the International Union for the Conservation of Nature. He called the finding a “remarkable discovery.” He said it puts responsibility on the Indonesian government to help the species survive.\nMatthew Nowak is one of the writers of the study. He told the Associated Press that there are three groups of the Tapanuli orangutans that are separated by non-protected land.He said forest land needs to connect the separated groups.\nIn addition, the writers of the study are recommending that plans for a hydropower center in the area be stopped by the government.\nIt also recommended that remaining forest in the Sumatran area where the orangutans live be protected.\nI’m Bryan Lynn.\n\n ' GPT2_model = TransformerSummarizer(transformer_type='GPT2', transformer_model_key='gpt2-medium') full = ''.join(GPT2_model(body, min_length=60)) print(full)
code
72072145/cell_12
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from summarizer import Summarizer,TransformerSummarizer body = '\n Scientists say they have discovered a new species of orangutans on Indonesia’s island of Sumatra.\nThe population differs in several ways from the two existing orangutan species found in Sumatra and the neighboring island of Borneo.\nThe orangutans were found inside North Sumatra’s Batang Toru forest, the science publication Current Biology reported.\nResearchers named the new species the Tapanuli orangutan. They say the animals are considered a new species because of genetic, skeletal and tooth differences.\nMichael Kruetzen is a geneticist with the University of Zurich who has studied the orangutans for several years. He said he was excited to be part of the unusual discovery of a new great ape in the present day. He noted that most great apes are currently considered endangered or severely endangered.\nGorillas, chimpanzees and bonobos also belong to the great ape species.\nOrangutan – which means person of the forest in the Indonesian and Malay languages - is the world’s biggest tree-living mammal. The orange-haired animals can move easily among the trees because their arms are longer than their legs. They live more lonely lives than other great apes, spending a lot of time sleeping and eating fruit in the forest.\nThe new study said fewer than 800 of the newly-described orangutans exist. Their low numbers make the group the most endangered of all the great ape species.\nThey live within an area covering about 1,000 square kilometers. The population is considered highly vulnerable. That is because the environment which they depend on is greatly threatened by development.\nResearchers say if steps are not taken quickly to reduce the current and future threats, the new species could become extinct “within our lifetime.”\nResearch into the new species began in 2013, when an orangutan protection group in Sumatra found an injured orangutan in an area far away from the other species. The adult male orangutan had been beaten by local villagers and died of his injuries. The complete skull was examined by researchers.\nAmong the physical differences of the new species are a notably smaller head and frizzier hair. The Tapanuli orangutans also have a different diet and are found only in higher forest areas.\nThere is no unified international system for recognizing new species. But to be considered, discovery claims at least require publication in a major scientific publication.\nRussell Mittermeier is head of the primate specialist group at the International Union for the Conservation of Nature. He called the finding a “remarkable discovery.” He said it puts responsibility on the Indonesian government to help the species survive.\nMatthew Nowak is one of the writers of the study. He told the Associated Press that there are three groups of the Tapanuli orangutans that are separated by non-protected land.He said forest land needs to connect the separated groups.\nIn addition, the writers of the study are recommending that plans for a hydropower center in the area be stopped by the government.\nIt also recommended that remaining forest in the Sumatran area where the orangutans live be protected.\nI’m Bryan Lynn.\n\n ' GPT2_model = TransformerSummarizer(transformer_type='GPT2', transformer_model_key='gpt2-medium') full = ''.join(GPT2_model(body, min_length=60)) model = TransformerSummarizer(transformer_type='XLNet', transformer_model_key='xlnet-base-cased') full = ''.join(model(body, min_length=60)) print(full)
code
1008563/cell_21
[ "text_plain_output_1.png" ]
from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/HR_comma_sep.csv') df.isnull().any() df = df.rename(columns={'sales': 'job'}) X = np.array(df.drop('left', 1)) y = np.array(df['left']) model = ExtraTreesClassifier() model.fit(X, y) feature_list = list(df.drop('left', 1).columns) feature_importance_dict = dict(zip(feature_list, model.feature_importances_)) from sklearn.ensemble import ExtraTreesRegressor model = ExtraTreesRegressor() X = df.drop(['left', 'satisfaction_level'], axis=1) y = df['satisfaction_level'] model.fit(X, y) feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns) feature_importance_dict = dict(zip(feature_list, model.feature_importances_)) df1 = df.copy() group_name = list(range(20)) df1['last_evaluation'] = pd.cut(df1['last_evaluation'], 20, labels=group_name) df1['average_montly_hours'] = pd.cut(df1['average_montly_hours'], 20, labels=group_name) "\n{0: '(149.5, 160.2]', 1: '(256.5, 267.2]', 2: '(267.2, 277.9]', 3: '(213.7, 224.4]', 4: '(245.8, 256.5]', 5: '(138.8, 149.5]',\n 6: '(128.1, 138.8]', 7: '(299.3, 310]', 8: '(224.4, 235.1]', 9: '(277.9, 288.6]', 10: '(235.1, 245.8]'\n , 11: '(117.4, 128.1]', 12: '(288.6, 299.3]', 13: '(181.6, 192.3]', 14: '(160.2, 170.9]',\n 15: '(170.9, 181.6]', 16: '(192.3, 203]', 17: '(203, 213.7]', 18: '(106.7, 117.4]',\n 19: '(95.786, 106.7]'}\n " sns.pointplot(df['number_project'], df['last_evaluation'])
code
1008563/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/HR_comma_sep.csv') df.isnull().any() df = df.rename(columns={'sales': 'job'}) X = np.array(df.drop('left', 1)) y = np.array(df['left']) model = ExtraTreesClassifier() model.fit(X, y) feature_list = list(df.drop('left', 1).columns) feature_importance_dict = dict(zip(feature_list, model.feature_importances_)) from sklearn.ensemble import ExtraTreesRegressor model = ExtraTreesRegressor() X = df.drop(['left', 'satisfaction_level'], axis=1) y = df['satisfaction_level'] model.fit(X, y) feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns) feature_importance_dict = dict(zip(feature_list, model.feature_importances_)) plt.scatter(df['satisfaction_level'], df['average_montly_hours']) plt.ylabel('average_montly_hours') plt.xlabel('satisfaction_level')
code