path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17132381/cell_31
[ "text_plain_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) idx = 1 im, cl = learn.data.dl(DatasetType.Valid).dataset[idx] cl = int(cl) im.show(title=f'pred. class: {interp.pred_class[idx]}, actual class: {learn.data.classes[cl]}')
code
17132381/cell_46
[ "text_plain_output_1.png" ]
grad = hook_g.stored[0][0].cpu() grad.shape grad_chan = grad.mean(1).mean(1) grad_chan.shape
code
17132381/cell_24
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) learn.recorder.plot_losses() learn.recorder.plot_metrics()
code
17132381/cell_22
[ "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.recorder.plot_losses() learn.recorder.plot_metrics()
code
17132381/cell_10
[ "text_plain_output_1.png" ]
import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) df.head(10)
code
17132381/cell_27
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) interp.plot_confusion_matrix(figsize=(12, 12), dpi=60)
code
17132381/cell_37
[ "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) idx = 1 im, cl = learn.data.dl(DatasetType.Valid).dataset[idx] cl = int(cl) m = learn.model.eval() len(m)
code
17132381/cell_36
[ "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) idx = 1 im, cl = learn.data.dl(DatasetType.Valid).dataset[idx] cl = int(cl) m = learn.model.eval() type(m)
code
2021876/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum()
code
2021876/cell_4
[ "text_html_output_1.png" ]
import pandas as pd path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) print('Number rows and columns:', train.shape) print('Number rows and columns:', test.shape)
code
2021876/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) fig ,ax = plt.subplots(2,2,figsize=(14,8)) ax1,ax2,ax3,ax4 = ax.flatten() sns.countplot(df['X0'],palette='rainbow',ax=ax1) sns.countplot(df['X1'],palette='summer',ax=ax2) sns.countplot(df['X2'],palette='rainbow',ax=ax3) sns.countplot(df['X3'],palette='magma',ax=ax4) fig, ax = plt.subplots(2, 2, figsize=(14, 8)) ax1, ax2, ax3, ax4 = ax.flatten() sns.countplot(df['X4'], palette='magma', ax=ax1) sns.countplot(df['X5'], palette='rainbow', ax=ax2) sns.countplot(df['X6'], palette='summer', ax=ax3) sns.countplot(df['X8'], palette='magma', ax=ax4)
code
2021876/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.decomposition import PCA import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) def OHE(df, columns): c2, c3 = ([], {}) for c in columns: c2.append(c) c3[c] = 'ohe_' + c df1 = pd.get_dummies(df, prefix=c3, columns=c2, drop_first=True) return df1 col_ohe = ['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'] df1 = OHE(df, col_ohe) pca = PCA(n_components=None, random_state=seed) pca.fit(df1.drop(['y', 'ID'], axis=1))
code
2021876/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T
code
2021876/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() plt.figure(figsize=(16, 10)) sns.heatmap(cor, cmap='viridis')
code
2021876/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col df[other_col].nunique()
code
2021876/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col
code
2021876/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) def OHE(df, columns): c2, c3 = ([], {}) for c in columns: c2.append(c) c3[c] = 'ohe_' + c df1 = pd.get_dummies(df, prefix=c3, columns=c2, drop_first=True) return df1 col_ohe = ['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'] df1 = OHE(df, col_ohe) df1.head()
code
2021876/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T plt.figure(figsize=(12, 6)) sns.distplot(train['y'], bins=120) plt.xlabel('y')
code
2021876/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col)
code
2021876/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) fig ,ax = plt.subplots(2,2,figsize=(14,8)) ax1,ax2,ax3,ax4 = ax.flatten() sns.countplot(df['X0'],palette='rainbow',ax=ax1) sns.countplot(df['X1'],palette='summer',ax=ax2) sns.countplot(df['X2'],palette='rainbow',ax=ax3) sns.countplot(df['X3'],palette='magma',ax=ax4) fig,ax = plt.subplots(2,2,figsize=(14,8)) ax1,ax2,ax3,ax4 = ax.flatten() sns.countplot(df['X4'],palette='magma',ax=ax1) sns.countplot(df['X5'],palette='rainbow',ax=ax2) sns.countplot(df['X6'],palette='summer',ax=ax3) sns.countplot(df['X8'],palette='magma',ax=ax4) plt.figure(figsize=(14, 80)) k = df[bin_col].sum().sort_values() sns.barplot(k, k.index, orient='h', color='b')
code
2021876/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) test.isnull().sum().sum()
code
2021876/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) fig, ax = plt.subplots(2, 2, figsize=(14, 8)) ax1, ax2, ax3, ax4 = ax.flatten() sns.countplot(df['X0'], palette='rainbow', ax=ax1) sns.countplot(df['X1'], palette='summer', ax=ax2) sns.countplot(df['X2'], palette='rainbow', ax=ax3) sns.countplot(df['X3'], palette='magma', ax=ax4)
code
2021876/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/' train = pd.read_csv(path + 'train.csv', na_values=-1) test = pd.read_csv(path + 'test.csv', na_values=-1) train.head(5).T cor = train.corr() train.isnull().sum().sum() test.isnull().sum().sum() train_len = train.shape[0] df = pd.concat([train, test], axis=0) bin_col = [c for c in df.columns if df[c].nunique() == 2] len(bin_col) other_col = [c for c in df.columns if c not in bin_col] other_col def category_type(df): col = df.columns for i in col: if 2 < df[i].nunique() <= 53: df[i] = df[i].astype('category') category_type(df) def OHE(df, columns): c2, c3 = ([], {}) for c in columns: c2.append(c) c3[c] = 'ohe_' + c df1 = pd.get_dummies(df, prefix=c3, columns=c2, drop_first=True) return df1 col_ohe = ['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'] df1 = OHE(df, col_ohe)
code
50213631/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index() df_grouped = df.groupby('diet')['cook_time'].mean() df_grouped.reset_index() dfgrouped1 = df.groupby(['state', 'flavor_profile']).size().reset_index(name='counts') dfgrouped1 dfgrouped2 = dfgrouped1.groupby(['state'])['counts'].max() dfgrouped2
code
50213631/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df['diet'].value_counts().plot(kind='bar')
code
50213631/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist()
code
50213631/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index() df_grouped = df.groupby('diet')['cook_time'].mean() df_grouped.reset_index() dfgrouped1 = df.groupby(['state', 'flavor_profile']).size().reset_index(name='counts') dfgrouped1
code
50213631/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.head()
code
50213631/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique()
code
50213631/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index() df_grouped = df.groupby('diet')['cook_time'].mean() df_grouped.reset_index() df['flavor_profile'].unique()
code
50213631/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50213631/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df['flavor_profile'].unique()
code
50213631/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index()
code
50213631/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.info()
code
50213631/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index() df_grouped = df.groupby('diet')['cook_time'].mean() df_grouped.reset_index()
code
50213631/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique() df_grouped = df.groupby('diet')['prep_time'].mean() df_grouped.reset_index() df_grouped = df.groupby('diet')['cook_time'].mean() df_grouped.reset_index() dfgrouped1 = df.groupby(['state', 'flavor_profile']).size().reset_index(name='counts') dfgrouped1 dfgrouped2 = dfgrouped1.groupby(['state'])['counts'].max() dfgrouped2 dfgrouped1.groupby(['state', 'flavor_profile', 'counts'], as_index=False).max()
code
50213631/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique()
code
50213631/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/indian-food-101/indian_food.csv') df.columns[df.isna().any()].tolist() df = df[df['region'].notna()] df = df[df['region'] != '-1'] df['region'].unique() df = df[df['flavor_profile'] != '-1'] df['flavor_profile'].unique() df = df[df['state'] != '-1'] df['state'].unique()
code
33104934/cell_13
[ "text_plain_output_1.png" ]
from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import plotly.figure_factory as ff import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine') Z = linkage(y=Y) ff.create_dendrogram(Z)
code
33104934/cell_9
[ "text_html_output_1.png" ]
from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine') Z = linkage(y=Y) F = fcluster(Z, t=0.2) F
code
33104934/cell_4
[ "text_plain_output_1.png" ]
import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc
code
33104934/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets
code
33104934/cell_11
[ "text_plain_output_1.png" ]
from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine') Z = linkage(y=Y) F = fcluster(Z, t=0.2) F df_texto = pd.DataFrame({'texto': norm_doc, 'cluster': F}, columns=['texto', 'cluster']) filtro = df_texto.cluster == 2937 df_texto[filtro]
code
33104934/cell_1
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import re import seaborn as sns import scipy.cluster.hierarchy as sch from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import plotly.figure_factory as ff from scipy import stats import io import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33104934/cell_7
[ "text_html_output_1.png" ]
from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine')
code
33104934/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc
code
33104934/cell_10
[ "text_html_output_1.png" ]
from scipy import stats from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine') Z = linkage(y=Y) F = fcluster(Z, t=0.2) F stats.mode(F)
code
33104934/cell_12
[ "text_plain_output_1.png" ]
from scipy.cluster.hierarchy import ward, fcluster, linkage from scipy.spatial.distance import pdist import nltk import numpy as np import numpy as np import pandas as pd import pandas as pd import re import tensorflow_hub as hub tweets = pd.read_csv('/kaggle/input/data-trab-2/Tweets.csv', error_bad_lines=False) tweets doc = tweets['text'] doc wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') def normalize_document(doc): doc = re.sub('[^a-zA-Z\\s]', '', doc) doc = doc.lower() doc = doc.strip() tokens = wpt.tokenize(doc) filtered_tokens = [token for token in tokens if token not in stop_words] doc = ' '.join(filtered_tokens) return doc normalize_doc = np.vectorize(normalize_document) norm_doc = normalize_doc(doc) norm_doc module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) X = embed(norm_doc) Y = pdist(X, 'cosine') Z = linkage(y=Y) F = fcluster(Z, t=0.2) F df_texto = pd.DataFrame({'texto': norm_doc, 'cluster': F}, columns=['texto', 'cluster']) filtro = df_texto.cluster == 2937 df_texto[filtro] df_texto = pd.DataFrame({'texto': norm_doc, 'cluster': F}, columns=['texto', 'cluster']) filtro = df_texto.cluster == 15 df_texto[filtro]
code
33104934/cell_5
[ "text_html_output_1.png" ]
import tensorflow_hub as hub module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) print('module %s loaded' % module_url) def embed(input): return model(input)
code
130021822/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df['HeatingQC'].value_counts()
code
130021822/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df['OverallCond'].value_counts()
code
130021822/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import matplotlib.pyplot as plt plt.style.use('ggplot') train_df['LotFrontage'].hist()
code
130021822/cell_25
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df.plot.scatter(x='SalePrice', y='GarageArea')
code
130021822/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import matplotlib.pyplot as plt plt.style.use('ggplot') import seaborn as sns sns.catplot(kind='box', data=train_df, x='CentralAir', y='SalePrice') plt.show()
code
130021822/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum()
code
130021822/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import matplotlib.pyplot as plt plt.style.use('ggplot') import seaborn as sns sns.stripplot(data=train_df, x='PavedDrive', y='SalePrice', jitter=True, dodge=True)
code
130021822/cell_19
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df.plot.scatter(y='SalePrice', x='TotalBsmtSF')
code
130021822/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130021822/cell_18
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df.plot.scatter(y='SalePrice', x='MasVnrArea')
code
130021822/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import seaborn as sns sns.barplot(y='SalePrice', x='OverallCond', data=train_df)
code
130021822/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df['ExterCond'].value_counts()
code
130021822/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns
code
130021822/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import seaborn as sns sns.barplot(y='SalePrice', x='ExterCond', data=train_df)
code
130021822/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import matplotlib.pyplot as plt plt.style.use('ggplot') import seaborn as sns features = ['LotArea', 'LotFrontage', 'HouseStyle', 'OverallCond', 'ExterCond', 'TotalBsmtSF', 'CentralAir', 'GarageArea', 'GrLivArea', 'PavedDrive'] X = train_df[features] sns.heatmap(X.corr())
code
130021822/cell_14
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import seaborn as sns sns.barplot(y='SalePrice', x='HouseStyle', data=train_df)
code
130021822/cell_22
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import seaborn as sns sns.violinplot(data=train_df, y='SalePrice', x='HeatingQC')
code
130021822/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df['Utilities'].value_counts()
code
130021822/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() import matplotlib.pyplot as plt plt.style.use('ggplot') import seaborn as sns sns.histplot(data=train_df, x='GrLivArea', y='SalePrice', kde=True)
code
130021822/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.isnull().sum() train_df['HouseStyle'].value_counts()
code
130021822/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') train_df.columns train_df.describe()
code
105187067/cell_13
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) df_total['country'].value_counts()
code
105187067/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.head()
code
105187067/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') test.head()
code
105187067/cell_23
[ "text_html_output_1.png" ]
from sklearn.model_selection import cross_val_score, train_test_split,GridSearchCV, cross_val_predict from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) df_train = one_hot_encoded_data[~one_hot_encoded_data.num_sold.isnull()] df_test = one_hot_encoded_data[one_hot_encoded_data.num_sold.isnull()].drop(['num_sold'], axis=1) X_train = df_train.drop('num_sold', axis=1) y_train = df_train['num_sold'] X_test = df_test def My_CV(_model): cv_score = round(cross_val_score(_model, X_train, y_train, cv=5, scoring='r2').mean(), 4) xgb = XGBRegressor(random_state=2, objective='reg:squarederror') My_CV(xgb)
code
105187067/cell_20
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) df_train = one_hot_encoded_data[~one_hot_encoded_data.num_sold.isnull()] df_test = one_hot_encoded_data[one_hot_encoded_data.num_sold.isnull()].drop(['num_sold'], axis=1) X_train = df_train.drop('num_sold', axis=1) y_train = df_train['num_sold'] X_test = df_test X_test.shape
code
105187067/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') train.head()
code
105187067/cell_2
[ "text_plain_output_1.png" ]
import warnings from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, PowerTransformer from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV, cross_val_predict from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor, AdaBoostRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import r2_score from sklearn.compose import TransformedTargetRegressor import warnings warnings.filterwarnings('ignore')
code
105187067/cell_19
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) df_train = one_hot_encoded_data[~one_hot_encoded_data.num_sold.isnull()] df_test = one_hot_encoded_data[one_hot_encoded_data.num_sold.isnull()].drop(['num_sold'], axis=1) X_train = df_train.drop('num_sold', axis=1) y_train = df_train['num_sold'] X_test = df_test X_test.head()
code
105187067/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105187067/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') train.info()
code
105187067/cell_16
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) one_hot_encoded_data.head()
code
105187067/cell_24
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor, AdaBoostRegressor from sklearn.model_selection import cross_val_score, train_test_split,GridSearchCV, cross_val_predict import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) df_train = one_hot_encoded_data[~one_hot_encoded_data.num_sold.isnull()] df_test = one_hot_encoded_data[one_hot_encoded_data.num_sold.isnull()].drop(['num_sold'], axis=1) X_train = df_train.drop('num_sold', axis=1) y_train = df_train['num_sold'] X_test = df_test def My_CV(_model): cv_score = round(cross_val_score(_model, X_train, y_train, cv=5, scoring='r2').mean(), 4) Hgb = HistGradientBoostingRegressor(random_state=2) My_CV(Hgb)
code
105187067/cell_14
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) df_total.info()
code
105187067/cell_22
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor, AdaBoostRegressor from sklearn.model_selection import cross_val_score, train_test_split,GridSearchCV, cross_val_predict import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv') df_total = train.append(test) df_total.drop('row_id', axis=1, inplace=True) df_total['date'] = pd.to_datetime(df_total['date'], format='%Y-%m-%d') df_total['year'] = df_total['date'].dt.year df_total['month'] = df_total['date'].dt.month df_total['day'] = df_total['date'].dt.day df_total['day_of_week'] = df_total['date'].dt.day_of_week df_total['day_of_year'] = df_total['date'].dt.day_of_year df_total['is_weekend'] = np.where(df_total['day_of_week'].isin([5, 6]), 1, 0) df_total.drop('date', axis=1, inplace=True) one_hot_encoded_data = pd.get_dummies(df_total, columns=['country', 'store', 'product'], drop_first=True) df_train = one_hot_encoded_data[~one_hot_encoded_data.num_sold.isnull()] df_test = one_hot_encoded_data[one_hot_encoded_data.num_sold.isnull()].drop(['num_sold'], axis=1) X_train = df_train.drop('num_sold', axis=1) y_train = df_train['num_sold'] X_test = df_test def My_CV(_model): cv_score = round(cross_val_score(_model, X_train, y_train, cv=5, scoring='r2').mean(), 4) rf = RandomForestRegressor(random_state=2) My_CV(rf)
code
72107191/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import os, glob import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(style='white') from statsmodels.distributions.empirical_distribution import ECDF from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.graphics.tsaplots import plot_acf, plot_pacf print('Setup Complete')
code
72107191/cell_25
[ "text_html_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') districts_info.shape districts_info.columns
code
72107191/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') engagement_data.shape engagement_data.columns print(f'Number of rows: {engagement_data.shape[0]}; Number of columns: {engagement_data.shape[1]}; No of missing values: {sum(engagement_data.isna().sum())}')
code
72107191/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') districts_info.shape districts_info.columns districts_info.isnull().sum() districts_info.duplicated().any()
code
72107191/cell_33
[ "text_html_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') engagement_data.shape engagement_data.columns
code
72107191/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.shape product_info.columns product_info.isnull().sum() product_info.info()
code
72107191/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') districts_info.shape districts_info.columns districts_info.isnull().sum() districts_info.info()
code
72107191/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') districts_info.shape districts_info.columns print(f'Number of rows: {districts_info.shape[0]}; Number of columns: {districts_info.shape[1]}; No of missing values: {sum(districts_info.isna().sum())}')
code
72107191/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.shape product_info.columns product_info.isnull().sum() product_info.duplicated().any() product_info = product_info.rename(columns={'Provider/Company Name': 'provider'}) product_info = product_info.rename(columns={'Primary Essential Function': 'essential function'}) plt.figure(figsize=(8, 6)) sns.countplot(x='essential function', order=product_info['essential function'].value_counts().index[:3], data=product_info, color='blue') plt.title("product's essential function")
code
72107191/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.shape product_info.columns product_info.isnull().sum()
code
72107191/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') engagement_data.head() engagement_data.tail()
code
72107191/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') districts_info.shape districts_info.columns districts_info.isnull().sum()
code
72107191/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.head() product_info.tail()
code
72107191/cell_16
[ "text_html_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.shape product_info.columns
code
72107191/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') engagement_data.shape engagement_data.columns engagement_data.isnull().sum() engagement_data.duplicated().any()
code
72107191/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data' files = glob.glob(path + '/*.csv') all_files = [] for filename in files: df = pd.read_csv(filename, index_col=None, header=0) district_id = filename.split('/')[4].split('.')[0] df['district_id'] = district_id all_files.append(df) engagement_data = pd.concat(all_files) engagement_data = engagement_data.reset_index(drop=True) product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv') districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv') product_info.shape product_info.columns print(f'Number of rows: {product_info.shape[0]}; Number of columns: {product_info.shape[1]}; No of missing values: {sum(product_info.isna().sum())}')
code