path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90153696/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y)
code
90153696/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df.head()
code
90153696/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_
code
90153696/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] y.head()
code
90153696/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ model.predict([[6000, 3]]) model.predict([[10000, 3]]) model.predict([[6000, 4]]) y_hat = model.predict(X) y_hat
code
90153696/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') sns.lmplot(x='BodyFat', y='Density', data=df, ci=None)
code
50214099/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ """ here we created a cropped square image of 100 pixels starting at 100,100 and extending down and left to 200,200. """ cropped_img = image.crop((100, 100, 200, 200)) image = Image.open('../input/bridge-image/sydney_bridge.png') img_arr = np.asarray(image) print('Type %s' % img_arr.dtype) print('min pixel value %s and max pixel value %s' % (img_arr.min(), img_arr.max())) img_arr = img_arr.astype('float32') print('Type : %s' % img_arr.dtype) img_arr = img_arr / 255.0 print('min pixel value %.3f and max pixel value %.3f' % (img_arr.min(), img_arr.max()))
code
50214099/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ """ here we created a cropped square image of 100 pixels starting at 100,100 and extending down and left to 200,200. """ cropped_img = image.crop((100, 100, 200, 200)) image = Image.open('../input/bridge-image/sydney_bridge.png') plt.imshow(image)
code
50214099/cell_20
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ """ here we created a cropped square image of 100 pixels starting at 100,100 and extending down and left to 200,200. """ cropped_img = image.crop((100, 100, 200, 200)) plt.imshow(cropped_img)
code
50214099/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50214099/cell_7
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) print(img.mode) print(img.size) print(img.format)
code
50214099/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ plt.subplot(3, 1, 1) plt.imshow(image) plt.subplot(3, 1, 2) plt.imshow(image.rotate(45)) plt.subplot(3, 1, 3) plt.imshow(image.rotate(90))
code
50214099/cell_28
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ """ here we created a cropped square image of 100 pixels starting at 100,100 and extending down and left to 200,200. """ cropped_img = image.crop((100, 100, 200, 200)) image = Image.open('../input/bridge-image/sydney_bridge.png') img_arr = np.asarray(image) img_arr = img_arr.astype('float32') img_arr = img_arr / 255.0 image = Image.open('../input/pilimages/opera_house.jpg') img_arr = np.asarray(image) img_arr = img_arr.astype('float32') mean = img_arr.mean() print('Mean : %.3f' % mean) print('Min : %.3f and Max: %.3f' % (img_arr.min(), img_arr.max())) print('\nAfter applying global centering\n') img_arr = img_arr - mean mean = img_arr.mean() print('Mean : %.3f' % mean) print('Min: %.3f and Max: %.3f' % (img_arr.min(), img_arr.max()))
code
50214099/cell_16
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) plt.subplot(3, 1, 1) plt.imshow(image) plt.subplot(3, 1, 2) plt.imshow(hoz_flip) plt.subplot(3, 1, 3) plt.imshow(ver_flip)
code
50214099/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') print(image.format) print(image.mode) print(image.size)
code
50214099/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) print('old image dim {} and new img dim {}'.format(image.size, new_img.size))
code
50214099/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) image.save('opera_house.png', format='PNG') new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) """ Current image dim width x height (640,360). thumbnail will resize the bigger dim i.e 640 to 100 and other dim will be rescaled to maintain aspect ratio. Standard resampling algorithms are used to invent or remove pixels when resizing, and you can specify a technique, although default is a bicubic resampling algorithm that suits most general applications """ new_img.thumbnail((100, 100)) hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT) ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) """ In both rotations, the pixels are clipped to the original dimensions of the image and the empty pixels are filled with black color. """ """ here we created a cropped square image of 100 pixels starting at 100,100 and extending down and left to 200,200. """ cropped_img = image.crop((100, 100, 200, 200)) image = Image.open('../input/bridge-image/sydney_bridge.png') print(image.format) print(image.mode) print(image.size)
code
50214099/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) new_img = Image.open('./opera_house.png') print(new_img.format) print(new_img.size) print(new_img.mode)
code
50214099/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import ImageOps import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) img = Image.fromarray(img_arr) new_img = Image.open('./opera_house.png') from PIL import ImageOps gray_img = ImageOps.grayscale(new_img) gray_img_arr = np.asarray(gray_img) plt.imshow(gray_img_arr)
code
50214099/cell_5
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra from PIL import Image image = Image.open('../input/pilimages/opera_house.jpg') import matplotlib.pyplot as plt import numpy as np img_arr = np.asarray(image) print(img_arr.dtype) print(img_arr.shape) plt.imshow(img_arr)
code
1003162/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_folder = '../input/' data = pd.read_csv(base_folder + 'train.csv') data.head()
code
1003162/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
figure = plt.figure(figsize=(15, 8)) plt.hist([data[data['Survived'] == 1]['Age'], data[data['Survived'] == 0]['Age']], color=['g', 'r'], bins=10, label=['Survived', 'Dead']) plt.xlabel('Age') plt.ylabel('Number of passengers') plt.legend()
code
1003162/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1003162/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_folder = '../input/' data = pd.read_csv(base_folder + 'train.csv') survived_sex = data[data['Survived'] == 1]['Sex'].value_counts() dead_sex = data[data['Survived'] == 0]['Sex'].value_counts() df = pd.DataFrame([survived_sex, dead_sex]) df.index = ['Survived', 'Dead'] df.plot(kind='bar', figsize=(15, 8))
code
1003162/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # dead and survived based on age of people figure = plt.figure(figsize=(15,8)) plt.hist([data[data['Survived']==1]['Age'],data[data['Survived']==0]['Age']], color = ['g','r'], bins = 10,label = ['Survived','Dead']) plt.xlabel('Age') plt.ylabel('Number of passengers') plt.legend() base_folder = '../input/' data = pd.read_csv(base_folder + 'train.csv') figure = plt.figure(figsize=(15, 8)) plt.hist([data[data['Survived'] == 1]['Age'], data[data['Survived'] == 0]['Age']], color=['g', 'r'], bins=10, label=['Survived', 'Dead']) plt.xlabel('Age') plt.ylabel('Number of passengers') plt.legend()
code
1003162/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_folder = '../input/' data = pd.read_csv(base_folder + 'train.csv') data.describe()
code
49127047/cell_4
[ "text_plain_output_1.png" ]
print(' * ') print(' *** ') print(' ***** ') print(' *******') print(' ***** ') print(' *** ') print(' * ')
code
49127047/cell_6
[ "text_plain_output_1.png" ]
for genap in range(50, 103, 4): if genap % 2 == 0: print(genap)
code
49127047/cell_2
[ "text_plain_output_1.png" ]
A = eval(input('masukan angka')) kuadrat = A * A * A print('hasil kuadrat dari', A, 'adalah', kuadrat, ',', sep='')
code
49127047/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
print('1 gram =424,000 Rupiah') gram = input('Masukan gram emas:') try: gram = int(gram) except ValueError: exit('Input wajib bilangan bulat') print('Emas', gram, 'gram setara', format(gram * 424000, ','), 'Rupiah')
code
49127047/cell_5
[ "text_plain_output_1.png" ]
n = eval(input('masukan jumlah bilangan Fibonacci = ')) n1 = 1 n2 = 1 for i in range(n): nth = n1 + n2 n1 = n2 n2 = nth print(n1, end='.')
code
49127148/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) print(f'ElasticNet Score= {score}')
code
49127148/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') print(f'Shape of X= {X.shape}') X.head()
code
49127148/cell_23
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) kernel_ridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) score = rmse(kernel_ridge) models_scores.append(['KernelRidge', score]) print(f'KernelRidge Score= {score}')
code
49127148/cell_29
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) kernel_ridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) score = rmse(kernel_ridge) models_scores.append(['KernelRidge', score]) models = (linear_regression, lasso, elastic_net, kernel_ridge) for model in models: model.fit(X_train, y_train) predictions = np.column_stack((model.predict(X_test) for model in models)) y_pred = np.mean(predictions, axis=1) rmse_val = mean_squared_error(y_test, y_pred, squared=False) models_scores.append(['Bagging', rmse_val]) gradient_boosting_regressor = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state=random_state) score = rmse(gradient_boosting_regressor) models_scores.append(['GradientBoostingRegressor', score]) print(f'GradientBoostingRegressor Score= {score}')
code
49127148/cell_26
[ "text_html_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) kernel_ridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) score = rmse(kernel_ridge) models_scores.append(['KernelRidge', score]) models = (linear_regression, lasso, elastic_net, kernel_ridge) for model in models: model.fit(X_train, y_train) predictions = np.column_stack((model.predict(X_test) for model in models)) y_pred = np.mean(predictions, axis=1) rmse_val = mean_squared_error(y_test, y_pred, squared=False) models_scores.append(['Bagging', rmse_val]) print(f'rmse= {rmse_val}')
code
49127148/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) print(f'Lasso Score= {score}')
code
49127148/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) print(f'LinearRegression Score= {score}')
code
49127148/cell_31
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings import xgboost as xgb warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) kernel_ridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) score = rmse(kernel_ridge) models_scores.append(['KernelRidge', score]) models = (linear_regression, lasso, elastic_net, kernel_ridge) for model in models: model.fit(X_train, y_train) predictions = np.column_stack((model.predict(X_test) for model in models)) y_pred = np.mean(predictions, axis=1) rmse_val = mean_squared_error(y_test, y_pred, squared=False) models_scores.append(['Bagging', rmse_val]) gradient_boosting_regressor = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state=random_state) score = rmse(gradient_boosting_regressor) models_scores.append(['GradientBoostingRegressor', score]) xgb_regressor = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05, max_depth=3, min_child_weight=1.7817, n_estimators=2200, reg_alpha=0.464, reg_lambda=0.8571, subsample=0.5213, verbosity=0, nthread=-1, random_state=random_state) score = rmse(xgb_regressor) models_scores.append(['XGBRegressor', score]) print(f'XGBRegressor Score= {score}')
code
49127148/cell_24
[ "text_plain_output_1.png" ]
from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import LinearRegression, Lasso, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) def rmse_cv(model): """ Get Root Mean Square Error. Using KFold, split the data into 5 folds Finding the best score using cross_val_score() """ score = cross_val_score(model, X.values, y, scoring='neg_mean_squared_error', cv=5) rmse = np.sqrt(-score) return rmse def rmse_old(y, y_pred): """ Get Root Mean Square Error without using cross_val_score """ return np.sqrt(mean_squared_error(y, y_pred)) models_scores = [] def rmse(model): model.fit(X_train, y_train) y_pred = model.predict(X_test) return mean_squared_error(y_test, y_pred, squared=False) linear_regression = make_pipeline(LinearRegression()) score = rmse(linear_regression) models_scores.append(['LinearRegression', score]) lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=random_state)) score = rmse(lasso) models_scores.append(['Lasso', score]) elastic_net = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=random_state)) score = rmse(elastic_net) models_scores.append(['ElasticNet', score]) kernel_ridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) score = rmse(kernel_ridge) models_scores.append(['KernelRidge', score]) pd.DataFrame(models_scores).sort_values(by=[1], ascending=True)
code
49127148/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') print(f'Shape of y= {y.shape}') y.head()
code
49127148/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import warnings warnings.filterwarnings('ignore') n_jobs = -1 random_state = 42 X = pd.read_csv('/kaggle/input/modelling-ready-data/X.csv') y = pd.read_csv('/kaggle/input/modelling-ready-data/y.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=random_state) print(f'Training set--> X_train shape= {X_train.shape}, y_train shape= {y_train.shape}') print(f'Holdout set--> X_test shape= {X_test.shape}, y_test shape= {y_test.shape}')
code
1009303/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009303/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Rate.csv') df.head().T
code
1009303/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Rate.csv') df.head().T df.loc[df['IssuerId'] == 11324, 'IndividualRate']
code
1003644/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu price = df_train['SalePrice'] sns.distplot(price)
code
1003644/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu price = df_train['SalePrice'] correlation = df_train.corr() k = 10 cols = correlation.nlargest(k, 'SalePrice')['SalePrice'].index coef = np.corrcoef(df_train[cols].values.T) sns.heatmap(coef, yticklabels=cols.values, annot=True, xticklabels=cols.values)
code
1003644/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu
code
1003644/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) df_train = pd.read_csv('../input/train.csv')
code
1003644/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu price = df_train['SalePrice'] correlation = df_train.corr() k = 10 cols = correlation.nlargest(k, 'SalePrice')['SalePrice'].index coef = np.corrcoef(df_train[cols].values.T) cols_low = correlcation.nsmallest(k, 'SalePrice')['SalePrice'].index coef_low = np.corrcoef(df_train[cols].values.T) sns.heatmap(coef_low, yticklabels=cols_low.values)
code
1003644/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu price = df_train['SalePrice'] price.describe()
code
1003644/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output df_train = pd.read_csv('../input/train.csv') df_train.colu price = df_train['SalePrice'] correlation = df_train.corr() sns.heatmap(correlation, vmin=-0.8, vmax=0.8, cmap='YlGnBu', square=True)
code
34145078/cell_9
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) s = campus_data.dtypes == 'object' object_cols = list(s[s].index) drop_data = campus_data.select_dtypes(exclude=['object']) index_names = campus_data[campus_data['salary'] == 'NaN'].index campus_data.drop(index_names, inplace=True) campus_data.head()
code
34145078/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) campus_data.describe()
code
34145078/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) s = campus_data.dtypes == 'object' object_cols = list(s[s].index) print('categorical columns:') print(object_cols)
code
34145078/cell_2
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34145078/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) s = campus_data.dtypes == 'object' object_cols = list(s[s].index) print('unic objects in the salary: ', campus_data['salary'].unique())
code
34145078/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) s = campus_data.dtypes == 'object' object_cols = list(s[s].index) drop_data = campus_data.select_dtypes(exclude=['object']) drop_data.head()
code
34145078/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) print('successfuly uploading the data.')
code
34145078/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DataDir = '/kaggle/input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv' campus_data = pd.read_csv(DataDir) campus_data.head()
code
32071248/cell_21
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum() df_env.loc[df_env['Country_Region'].isin(['Norway', 'Finland', 'Iceland', 'Estonia']), 'wind'] = 4.689151 df_env.loc[df_env['Country_Region'].isin(['Maldives']), 'wind'] = np.mean([2.698925, 3.494908]) df_env.loc[df_env['Country_Region'].isin(['Bahrain']), 'wind'] = np.mean([3.728877, 3.173667, 4.525724]) df_env.loc[df_env['Country_Region'].isin(['Antigua and Barbuda']), 'wind'] = np.mean([3.586282, 3.378886, 2.749947]) df_env.loc[df_env['Country_Region'].isin(['Saint Vincent and the Grenadines']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Malta']), 'wind'] = np.mean([3.078635, 2.648621]) df_env.loc[df_env['Country_Region'].isin(['Seychelles']), 'wind'] = 2.736786 df_env.loc[df_env['Country_Region'].isin(['Saint Kitts and Nevis']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Grenada']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Saint Lucia']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Barbados']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Monaco']), 'wind'] = np.mean([5.745106, 3.369222]) cols_na = ['accessibility_to_cities', 'elevation', 'aspect', 'slope', 'tree_canopy_cover', 'isothermality', 'rain_coldestQuart', 'rain_driestMonth', 'rain_driestQuart', 'rain_mean_annual', 'rain_seasonailty', 'rain_warmestQuart', 'rain_wettestMonth', 'rain_wettestQuart', 'temp_annual_range', 'temp_coldestQuart', 'temp_diurnal_range', 'temp_driestQuart', 'temp_max_warmestMonth', 'temp_mean_annual', 'temp_min_coldestMonth', 'temp_seasonality', 'temp_warmestQuart', 'temp_wettestQuart', 'cloudiness'] for c in cols_na: country = df_env.loc[df_env[c].isna(), 'Country_Region'].unique() if 'Maldives' in country: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'India'][c].mean() else: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'Denmark'][c].mean() df_pop = df_pop[~df_pop['Country_Region'].isna()] df_pop.drop(['quarantine', 'schools', 'restrictions'], axis=1, inplace=True) df_pop.isna().sum() df_pop.dtypes cols_na = ['pop', 'tests', 'testpop', 'density', 'medianage', 'urbanpop', 'hospibed', 'smokers', 'sex0', 'sex14', 'sex25', 'sex54', 'sex64', 'sex65plus', 'sexratio', 'lung', 'femalelung', 'malelung'] for c in cols_na: df_pop[c] = df_pop.groupby(['Country_Region'])[c].transform(lambda x: x.fillna(x.mean())) for c in cols_na: df_pop[c].fillna(df_pop[c].mean(), inplace=True) df_pop.columns.values
code
32071248/cell_9
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape date_valid_start = df_test_raw[df_test_raw['Date'].isin(df_train_raw['Date'])]['Date'].unique().min() date_valid_end = df_test_raw[df_test_raw['Date'].isin(df_train_raw['Date'])]['Date'].unique().max() print('valid start: ', date_valid_start) print('valid end: ', date_valid_end)
code
32071248/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum() df_env.loc[df_env['Country_Region'].isin(['Norway', 'Finland', 'Iceland', 'Estonia']), 'wind'] = 4.689151 df_env.loc[df_env['Country_Region'].isin(['Maldives']), 'wind'] = np.mean([2.698925, 3.494908]) df_env.loc[df_env['Country_Region'].isin(['Bahrain']), 'wind'] = np.mean([3.728877, 3.173667, 4.525724]) df_env.loc[df_env['Country_Region'].isin(['Antigua and Barbuda']), 'wind'] = np.mean([3.586282, 3.378886, 2.749947]) df_env.loc[df_env['Country_Region'].isin(['Saint Vincent and the Grenadines']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Malta']), 'wind'] = np.mean([3.078635, 2.648621]) df_env.loc[df_env['Country_Region'].isin(['Seychelles']), 'wind'] = 2.736786 df_env.loc[df_env['Country_Region'].isin(['Saint Kitts and Nevis']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Grenada']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Saint Lucia']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Barbados']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Monaco']), 'wind'] = np.mean([5.745106, 3.369222]) cols_na = ['accessibility_to_cities', 'elevation', 'aspect', 'slope', 'tree_canopy_cover', 'isothermality', 'rain_coldestQuart', 'rain_driestMonth', 'rain_driestQuart', 'rain_mean_annual', 'rain_seasonailty', 'rain_warmestQuart', 'rain_wettestMonth', 'rain_wettestQuart', 'temp_annual_range', 'temp_coldestQuart', 'temp_diurnal_range', 'temp_driestQuart', 'temp_max_warmestMonth', 'temp_mean_annual', 'temp_min_coldestMonth', 'temp_seasonality', 'temp_warmestQuart', 'temp_wettestQuart', 'cloudiness'] for c in cols_na: country = df_env.loc[df_env[c].isna(), 'Country_Region'].unique() print(c, country) if 'Maldives' in country: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'India'][c].mean() else: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'Denmark'][c].mean()
code
32071248/cell_20
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum() df_env.loc[df_env['Country_Region'].isin(['Norway', 'Finland', 'Iceland', 'Estonia']), 'wind'] = 4.689151 df_env.loc[df_env['Country_Region'].isin(['Maldives']), 'wind'] = np.mean([2.698925, 3.494908]) df_env.loc[df_env['Country_Region'].isin(['Bahrain']), 'wind'] = np.mean([3.728877, 3.173667, 4.525724]) df_env.loc[df_env['Country_Region'].isin(['Antigua and Barbuda']), 'wind'] = np.mean([3.586282, 3.378886, 2.749947]) df_env.loc[df_env['Country_Region'].isin(['Saint Vincent and the Grenadines']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Malta']), 'wind'] = np.mean([3.078635, 2.648621]) df_env.loc[df_env['Country_Region'].isin(['Seychelles']), 'wind'] = 2.736786 df_env.loc[df_env['Country_Region'].isin(['Saint Kitts and Nevis']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Grenada']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Saint Lucia']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Barbados']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Monaco']), 'wind'] = np.mean([5.745106, 3.369222]) cols_na = ['accessibility_to_cities', 'elevation', 'aspect', 'slope', 'tree_canopy_cover', 'isothermality', 'rain_coldestQuart', 'rain_driestMonth', 'rain_driestQuart', 'rain_mean_annual', 'rain_seasonailty', 'rain_warmestQuart', 'rain_wettestMonth', 'rain_wettestQuart', 'temp_annual_range', 'temp_coldestQuart', 'temp_diurnal_range', 'temp_driestQuart', 'temp_max_warmestMonth', 'temp_mean_annual', 'temp_min_coldestMonth', 'temp_seasonality', 'temp_warmestQuart', 'temp_wettestQuart', 'cloudiness'] for c in cols_na: country = df_env.loc[df_env[c].isna(), 'Country_Region'].unique() if 'Maldives' in country: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'India'][c].mean() else: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'Denmark'][c].mean() dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique() df = pd.concat([df_train_clean, df_test_clean], sort=False).reset_index(drop=True) df = pd.merge(df, df_airpol.drop_duplicates(subset=['Country_Region']), how='left') df.shape df = pd.merge(df, df_env.drop_duplicates(subset=['Country_Region']), how='left') df.shape df.dtypes
code
32071248/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_pop = df_pop[~df_pop['Country_Region'].isna()] df_pop.drop(['quarantine', 'schools', 'restrictions'], axis=1, inplace=True) df_pop.isna().sum() df_pop.dtypes
code
32071248/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') print('env: ', df_env.shape) df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') print('pol: ', df_airpol.shape) df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') print('pop: ', df_pop.shape)
code
32071248/cell_11
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape base_date = pd.to_datetime('2020-01-01') base_date
code
32071248/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum() df_env.loc[df_env['Country_Region'].isin(['Norway', 'Finland', 'Iceland', 'Estonia']), 'wind'] = 4.689151 df_env.loc[df_env['Country_Region'].isin(['Maldives']), 'wind'] = np.mean([2.698925, 3.494908]) df_env.loc[df_env['Country_Region'].isin(['Bahrain']), 'wind'] = np.mean([3.728877, 3.173667, 4.525724]) df_env.loc[df_env['Country_Region'].isin(['Antigua and Barbuda']), 'wind'] = np.mean([3.586282, 3.378886, 2.749947]) df_env.loc[df_env['Country_Region'].isin(['Saint Vincent and the Grenadines']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Malta']), 'wind'] = np.mean([3.078635, 2.648621]) df_env.loc[df_env['Country_Region'].isin(['Seychelles']), 'wind'] = 2.736786 df_env.loc[df_env['Country_Region'].isin(['Saint Kitts and Nevis']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Grenada']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Saint Lucia']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Barbados']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Monaco']), 'wind'] = np.mean([5.745106, 3.369222]) cols_na = ['accessibility_to_cities', 'elevation', 'aspect', 'slope', 'tree_canopy_cover', 'isothermality', 'rain_coldestQuart', 'rain_driestMonth', 'rain_driestQuart', 'rain_mean_annual', 'rain_seasonailty', 'rain_warmestQuart', 'rain_wettestMonth', 'rain_wettestQuart', 'temp_annual_range', 'temp_coldestQuart', 'temp_diurnal_range', 'temp_driestQuart', 'temp_max_warmestMonth', 'temp_mean_annual', 'temp_min_coldestMonth', 'temp_seasonality', 'temp_warmestQuart', 'temp_wettestQuart', 'cloudiness'] for c in cols_na: country = df_env.loc[df_env[c].isna(), 'Country_Region'].unique() if 'Maldives' in country: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'India'][c].mean() else: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'Denmark'][c].mean() dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique() df = pd.concat([df_train_clean, df_test_clean], sort=False).reset_index(drop=True) df = pd.merge(df, df_airpol.drop_duplicates(subset=['Country_Region']), how='left') df.shape df = pd.merge(df, df_env.drop_duplicates(subset=['Country_Region']), how='left') df.shape
code
32071248/cell_1
[ "text_plain_output_1.png" ]
import os import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32071248/cell_18
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique() df = pd.concat([df_train_clean, df_test_clean], sort=False).reset_index(drop=True) df = pd.merge(df, df_airpol.drop_duplicates(subset=['Country_Region']), how='left') df.shape
code
32071248/cell_8
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape
code
32071248/cell_15
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) df_test_clean['ConfirmedCases'] = df_test_clean['ConfirmedCases'].astype('float') df_test_clean['Fatalities'] = df_test_clean['Fatalities'].astype('float') df_test_clean['Fatalities'].dtype
code
32071248/cell_16
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique() df = pd.concat([df_train_clean, df_test_clean], sort=False).reset_index(drop=True) print(df.shape) print(df.columns.values)
code
32071248/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum()
code
32071248/cell_14
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) print('cc skew: {}'.format(df_train_clean['ConfirmedCases'].skew())) print('ft skew: {}'.format(df_train_clean['Fatalities'].skew()))
code
32071248/cell_22
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_env.isna().sum() df_env.loc[df_env['Country_Region'].isin(['Norway', 'Finland', 'Iceland', 'Estonia']), 'wind'] = 4.689151 df_env.loc[df_env['Country_Region'].isin(['Maldives']), 'wind'] = np.mean([2.698925, 3.494908]) df_env.loc[df_env['Country_Region'].isin(['Bahrain']), 'wind'] = np.mean([3.728877, 3.173667, 4.525724]) df_env.loc[df_env['Country_Region'].isin(['Antigua and Barbuda']), 'wind'] = np.mean([3.586282, 3.378886, 2.749947]) df_env.loc[df_env['Country_Region'].isin(['Saint Vincent and the Grenadines']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Malta']), 'wind'] = np.mean([3.078635, 2.648621]) df_env.loc[df_env['Country_Region'].isin(['Seychelles']), 'wind'] = 2.736786 df_env.loc[df_env['Country_Region'].isin(['Saint Kitts and Nevis']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Grenada']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Saint Lucia']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Barbados']), 'wind'] = 3.515223 df_env.loc[df_env['Country_Region'].isin(['Monaco']), 'wind'] = np.mean([5.745106, 3.369222]) cols_na = ['accessibility_to_cities', 'elevation', 'aspect', 'slope', 'tree_canopy_cover', 'isothermality', 'rain_coldestQuart', 'rain_driestMonth', 'rain_driestQuart', 'rain_mean_annual', 'rain_seasonailty', 'rain_warmestQuart', 'rain_wettestMonth', 'rain_wettestQuart', 'temp_annual_range', 'temp_coldestQuart', 'temp_diurnal_range', 'temp_driestQuart', 'temp_max_warmestMonth', 'temp_mean_annual', 'temp_min_coldestMonth', 'temp_seasonality', 'temp_warmestQuart', 'temp_wettestQuart', 'cloudiness'] for c in cols_na: country = df_env.loc[df_env[c].isna(), 'Country_Region'].unique() if 'Maldives' in country: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'India'][c].mean() else: df_env.loc[df_env[c].isna(), c] = df_env[df_env['Country_Region'] == 'Denmark'][c].mean() df_pop = df_pop[~df_pop['Country_Region'].isna()] df_pop.drop(['quarantine', 'schools', 'restrictions'], axis=1, inplace=True) df_pop.isna().sum() df_pop.dtypes cols_na = ['pop', 'tests', 'testpop', 'density', 'medianage', 'urbanpop', 'hospibed', 'smokers', 'sex0', 'sex14', 'sex25', 'sex54', 'sex64', 'sex65plus', 'sexratio', 'lung', 'femalelung', 'malelung'] for c in cols_na: df_pop[c] = df_pop.groupby(['Country_Region'])[c].transform(lambda x: x.fillna(x.mean())) for c in cols_na: df_pop[c].fillna(df_pop[c].mean(), inplace=True) dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique() df = pd.concat([df_train_clean, df_test_clean], sort=False).reset_index(drop=True) df = pd.merge(df, df_airpol.drop_duplicates(subset=['Country_Region']), how='left') df.shape df = pd.merge(df, df_env.drop_duplicates(subset=['Country_Region']), how='left') df.shape df.dtypes df_pop.columns.values df = pd.merge(df, df_pop.drop_duplicates(subset=['Country_Region', 'Province_State']), how='left') df.shape
code
32071248/cell_10
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) print('train shape: ', df_train_clean.shape) print('test shape: ', df_test_clean.shape)
code
32071248/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd import os import math import pandas as pd import numpy as np import scipy as sp import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import cross_val_score from sklearn.metrics import make_scorer, mean_squared_error import lightgbm as lgb from lightgbm import LGBMRegressor from hyperopt import hp, tpe from hyperopt.fmin import fmin df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') dirname = '/kaggle/input' train_enriched_filename = 'covid19-forecasting-data-with-containment-measures/train-enriched-with-containment_v4.csv' train_filename = 'covid19-global-forecasting-week-4/train.csv' test_filename = 'covid19-global-forecasting-week-4/test.csv' df_train_enriched = pd.read_csv(os.path.join(dirname, train_enriched_filename)) df_train_raw = pd.read_csv(os.path.join(dirname, train_filename)) df_test_raw = pd.read_csv(os.path.join(dirname, test_filename)) df_train_raw.shape df_train_clean = df_train_enriched.drop(['Id'], axis=1) df_test_clean = df_test_raw[~df_test_raw['Date'].isin(df_train_enriched['Date'])] df_test_clean = df_test_clean.drop(['ForecastId'], axis=1) base_date = pd.to_datetime('2020-01-01') base_date df_test_clean['days_since'] = (pd.to_datetime(df_test_clean['Date']) - base_date).dt.days df_test_clean['days_since'].unique()
code
32071248/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_env = pd.read_csv('/kaggle/input/global-environmental-factors/env.csv') df_airpol = pd.read_csv('/kaggle/input/pm25-global-air-pollution/pm25-global-air-pollution-2017.csv') df_pop = pd.read_csv('/kaggle/input/world-population-by-country-state/country_population.csv') df_pop = df_pop[~df_pop['Country_Region'].isna()] df_pop.drop(['quarantine', 'schools', 'restrictions'], axis=1, inplace=True) df_pop.isna().sum()
code
32068979/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities') df_train[df_train.Fatalities > df_train.ConfirmedCases]
code
32068979/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases')
code
32068979/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np from scipy.interpolate import Rbf from scipy.optimize import curve_fit from scipy.stats import linregress from datetime import timedelta from sklearn.metrics import mean_squared_log_error from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import ElasticNet from sklearn.model_selection import GridSearchCV, KFold from sklearn.preprocessing import OrdinalEncoder import category_encoders as ce import xgboost from catboost import Pool, CatBoostRegressor import matplotlib.pyplot as plt plt.style.use('ggplot') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068979/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') df_test.head(3)
code
32068979/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities') df_train[df_train.Fatalities > df_train.ConfirmedCases] df_train[(df_train.NewCasesPct > 0.4) & (df_train.NewCases > 1000)] df_train[(df_train.NewFatalitiesPct > 0.8) & (df_train.NewFatalities > 50)] df_train[(df_train['Country_Region'] == 'China') & (df_train['Province_State'] == 'Hubei') & (df_train.Date > '2020-02-8')].head(8)
code
32068979/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') df_sub.head(3)
code
32068979/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities') df_train[df_train.Fatalities > df_train.ConfirmedCases] df_train[(df_train.NewCasesPct > 0.4) & (df_train.NewCases > 1000)] df_train[(df_train.NewFatalitiesPct > 0.8) & (df_train.NewFatalities > 50)]
code
32068979/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities') df_train[df_train.Fatalities > df_train.ConfirmedCases] df_train[(df_train.NewCasesPct > 0.4) & (df_train.NewCases > 1000)] df_train[(df_train.NewFatalitiesPct > 0.8) & (df_train.NewFatalities > 50)] df_train[(df_train['Country_Region'] == 'China') & (df_train['Province_State'] == 'Shandong') & (df_train.Date > '2020-02-18')].head(5)
code
32068979/cell_14
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities') df_train[df_train.Fatalities > df_train.ConfirmedCases] df_train[(df_train.NewCasesPct > 0.4) & (df_train.NewCases > 1000)]
code
32068979/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head()
code
32068979/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') by_ctry_prov = df_train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']] df_train[['NewCases', 'NewFatalities']] = by_ctry_prov.transform(lambda x: x.diff().fillna(0)) df_train[['NewCasesPct', 'NewFatalitiesPct']] = by_ctry_prov.transform(lambda x: x.pct_change().fillna(0)) df_train.sort_values('NewCases', ascending=False).head() df_train[df_train.NewCases < 0].sort_values('NewCases') df_train[df_train.NewFatalities < 0].sort_values('NewFatalities')
code
32068979/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_sub = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') df_train.tail(3)
code
90156125/cell_13
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.head()
code
90156125/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import cudf as pd import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna() previous_match.season.unique() plt.subplots(figsize=(15, 6)) sns.countplot(x=previous_match['season'], data=previous_match) plt.show()
code
90156125/cell_6
[ "image_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.info()
code
90156125/cell_26
[ "text_plain_output_1.png" ]
import cudf as pd import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna() previous_match.season.unique() order = previous_match.city.value_counts().iloc[:10].index order = previous_match.winner.value_counts().iloc[:10].index order = previous_match.player_of_match.value_counts().iloc[:3].index sns.countplot(x='player_of_match', data=previous_match, palette='rainbow', order=order) plt.show()
code
90156125/cell_11
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.isnull().sum() df_train.describe().T df_train['Players'].value_counts()
code
90156125/cell_7
[ "image_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.isnull().sum()
code
90156125/cell_18
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna() previous_match.season.unique()
code
90156125/cell_8
[ "image_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.isnull().sum() df_train.describe().T
code
90156125/cell_15
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape sns.heatmap(previous_match.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code