path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
74056627/cell_26 | [
"text_plain_output_1.png"
] | df = pd.DataFrame({'a': np.random.choice(list('abcd')), 'b': np.random.rand(10000000)}) | code |
74056627/cell_11 | [
"text_plain_output_1.png"
] | !pip install line_profiler | code |
74056627/cell_7 | [
"text_plain_output_1.png"
] | for i in range(5):
pd.Series(np.random.randint(10, 20, 10000)) | code |
74056627/cell_18 | [
"text_plain_output_1.png"
] | code |
|
74056627/cell_32 | [
"text_plain_output_1.png"
] | from numba import vectorize, int64
@vectorize([int64(int64)])
def vect_relu(n):
if n < 0:
return 0
else:
return n | code |
74056627/cell_15 | [
"text_plain_output_1.png"
] | total = 0
for val in s:
total += val | code |
74056627/cell_16 | [
"text_plain_output_1.png"
] | code |
|
74056627/cell_17 | [
"text_plain_output_1.png"
] | code |
|
74056627/cell_31 | [
"text_plain_output_1.png"
] | s = pd.Series(np.random.randint(-3, 10, 1000000))
def relu(n):
return 0 if n < 0 else n | code |
74056627/cell_24 | [
"text_plain_output_1.png"
] | code |
|
74056627/cell_27 | [
"text_plain_output_1.png"
] | code |
|
34126448/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
grid_df = pd.read_pickle('/kaggle/input/m5-simple-fe/grid_part_1.pkl')
grid_df = grid_df[['id', 'd', 'sales']].pivot(index='id', columns='d').reset_index()
ids = grid_df['id']
grid_df = grid_df['sales'].iloc[:, :1913]
grid_df = pd.DataFrame(np.where(grid_df.isnull(), np.nan, np.where(grid_df > 0, 1, -1)))
grid_df.columns = [f'd_{i}' for i in range(1, 1914)]
d1_peak = grid_df.notnull().sum(axis=1)
cluster = d1_peak.copy()
grid_df['cluster'] = cluster
grid_df = grid_df.fillna(0)
grid_df['cluster'].value_counts() | code |
34126448/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
grid_df = pd.read_pickle('/kaggle/input/m5-simple-fe/grid_part_1.pkl')
grid_df = grid_df[['id', 'd', 'sales']].pivot(index='id', columns='d').reset_index()
ids = grid_df['id']
grid_df = grid_df['sales'].iloc[:, :1913]
grid_df = pd.DataFrame(np.where(grid_df.isnull(), np.nan, np.where(grid_df > 0, 1, -1)))
grid_df.columns = [f'd_{i}' for i in range(1, 1914)]
d1_peak = grid_df.notnull().sum(axis=1)
cluster = d1_peak.copy()
sns.kdeplot(d1_peak)
plt.title('# of Nan distribution')
plt.show() | code |
34126448/cell_1 | [
"text_plain_output_1.png"
] | import os
import warnings
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from scipy.cluster.hierarchy import linkage, fcluster
import warnings
warnings.filterwarnings('ignore') | code |
34126448/cell_10 | [
"image_output_1.png"
] | from scipy.cluster.hierarchy import linkage, fcluster
from tqdm import tqdm
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
grid_df = pd.read_pickle('/kaggle/input/m5-simple-fe/grid_part_1.pkl')
grid_df = grid_df[['id', 'd', 'sales']].pivot(index='id', columns='d').reset_index()
ids = grid_df['id']
grid_df = grid_df['sales'].iloc[:, :1913]
grid_df = pd.DataFrame(np.where(grid_df.isnull(), np.nan, np.where(grid_df > 0, 1, -1)))
grid_df.columns = [f'd_{i}' for i in range(1, 1914)]
d1_peak = grid_df.notnull().sum(axis=1)
cluster = d1_peak.copy()
grid_df['cluster'] = cluster
grid_df = grid_df.fillna(0)
for clt in tqdm(range(4, 5)):
df_name = f'cluster_{clt}_df'
cluster_df = grid_df[grid_df['cluster'] == clt]
cluster_array = cluster_df.values
dist_matrix = np.dot(cluster_array, cluster_array.T)
Z = linkage(dist_matrix, method='single')
cluster_num = fcluster(Z, t=4, criterion='maxclust')
cluster_df['cluster'] = cluster_df['cluster'] * cluster_num
globals()[df_name] = cluster_df | code |
18160674/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot() | code |
18160674/cell_9 | [
"image_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy) | code |
18160674/cell_6 | [
"image_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes | code |
18160674/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True) | code |
18160674/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18160674/cell_7 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
data.show_batch(rows=3, figsize=(7, 8)) | code |
18160674/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds)) | code |
18160674/cell_15 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True)
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.001))
learn.save('densenet121_256_s2', return_path=True) | code |
18160674/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True)
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.001))
learn.save('densenet121_256_s2', return_path=True)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix() | code |
18160674/cell_3 | [
"text_plain_output_1.png"
] | path = Path('../input/dataset')
path.ls() | code |
18160674/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True)
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.001)) | code |
18160674/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10) | code |
18160674/cell_12 | [
"image_output_1.png"
] | import numpy as np # linear algebra
path = Path('../input/dataset')
path.ls()
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=False), size=256, num_workers=4).normalize(imagenet_stats)
data.classes
(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
learn = cnn_learner(data, models.densenet121, metrics=accuracy)
learn.fit_one_cycle(10)
learn.model_dir = '/kaggle/working'
learn.save('densenet256', return_path=True)
learn.unfreeze()
learn.lr_find() | code |
73068375/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique()
def null_idx(df):
idx = []
for col in df.columns:
i = df[df[col] == '?'].index.values.tolist()
idx.append(i)
return idx
def null_col(df, null_idx=None):
if null_idx:
n = pd.DataFrame({'null': [len(i) for i in null_idx], 'null_per': [len(i) / df.shape[0] for i in null_idx]}, index=df.columns).sort_values('null', ascending=False)
return n[n.null > 0]
n = pd.DataFrame(df.isnull().sum(), columns=['nans'])
return n[n.nans > 0]
df[df.workclass == '?'].describe() | code |
73068375/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique() | code |
73068375/cell_4 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.info() | code |
73068375/cell_2 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape | code |
73068375/cell_1 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73068375/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum() | code |
73068375/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique()
def null_idx(df):
idx = []
for col in df.columns:
i = df[df[col] == '?'].index.values.tolist()
idx.append(i)
return idx
def null_col(df, null_idx=None):
if null_idx:
n = pd.DataFrame({'null': [len(i) for i in null_idx], 'null_per': [len(i) / df.shape[0] for i in null_idx]}, index=df.columns).sort_values('null', ascending=False)
return n[n.null > 0]
n = pd.DataFrame(df.isnull().sum(), columns=['nans'])
return n[n.nans > 0]
null_indx = null_idx(df)
nans = null_col(df, null_indx)
nans
idx_to_drop = []
for i in null_indx:
idx_to_drop = idx_to_drop + i
idx_to_drop = np.unique(idx_to_drop)
idx_to_drop.shape[0] / df.shape[0]
df = df.drop(idx_to_drop)
df.head() | code |
73068375/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique()
def null_idx(df):
idx = []
for col in df.columns:
i = df[df[col] == '?'].index.values.tolist()
idx.append(i)
return idx
def null_col(df, null_idx=None):
if null_idx:
n = pd.DataFrame({'null': [len(i) for i in null_idx], 'null_per': [len(i) / df.shape[0] for i in null_idx]}, index=df.columns).sort_values('null', ascending=False)
return n[n.null > 0]
n = pd.DataFrame(df.isnull().sum(), columns=['nans'])
return n[n.nans > 0]
null_indx = null_idx(df)
nans = null_col(df, null_indx)
nans | code |
73068375/cell_3 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.head() | code |
73068375/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique()
def null_idx(df):
idx = []
for col in df.columns:
i = df[df[col] == '?'].index.values.tolist()
idx.append(i)
return idx
def null_col(df, null_idx=None):
if null_idx:
n = pd.DataFrame({'null': [len(i) for i in null_idx], 'null_per': [len(i) / df.shape[0] for i in null_idx]}, index=df.columns).sort_values('null', ascending=False)
return n[n.null > 0]
n = pd.DataFrame(df.isnull().sum(), columns=['nans'])
return n[n.nans > 0]
null_indx = null_idx(df)
nans = null_col(df, null_indx)
nans
idx_to_drop = []
for i in null_indx:
idx_to_drop = idx_to_drop + i
idx_to_drop = np.unique(idx_to_drop)
idx_to_drop.shape[0] / df.shape[0] | code |
73068375/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique() | code |
73068375/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique()
df.isnull().values.sum()
df.workclass.unique()
df.occupation.unique()
def null_idx(df):
idx = []
for col in df.columns:
i = df[df[col] == '?'].index.values.tolist()
idx.append(i)
return idx
def null_col(df, null_idx=None):
if null_idx:
n = pd.DataFrame({'null': [len(i) for i in null_idx], 'null_per': [len(i) / df.shape[0] for i in null_idx]}, index=df.columns).sort_values('null', ascending=False)
return n[n.null > 0]
n = pd.DataFrame(df.isnull().sum(), columns=['nans'])
return n[n.nans > 0]
df[df.workclass == '?'].head() | code |
73068375/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings as filt
from scipy.stats import skew, norm
pd.options.display.max_columns = None
filt('ignore')
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (12, 6)
import os
df = pd.read_csv('/kaggle/input/adult-census-income/adult.csv')
df.shape
df.columns = [c.replace('.', '_') for c in df.columns]
df.nunique() | code |
88090498/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
88090498/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape | code |
88090498/cell_23 | [
"text_plain_output_1.png"
] | # installing openpyxl to run our excel file - pd.read_excel
!pip install openpyxl | code |
88090498/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.head() | code |
88090498/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes | code |
88090498/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns | code |
88090498/cell_54 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size()
ratings = final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size().reset_index().rename(columns={0: 'Rating Count'})
plt.rcParams['figure.figsize'] = (12, 6)
plt.rcParams['figure.figsize'] = (12, 6)
plt.rcParams['figure.figsize'] = (12, 6)
sns.barplot(x='Aggregate rating', y='Rating Count', hue='Rating color', data=ratings, palette=['blue', 'red', 'orange', 'yellow', 'green', 'green']) | code |
88090498/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0] | code |
88090498/cell_52 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size()
ratings = final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size().reset_index().rename(columns={0: 'Rating Count'})
plt.rcParams['figure.figsize'] = (12, 6)
plt.rcParams['figure.figsize'] = (12, 6)
sns.barplot(x='Aggregate rating', y='Rating Count', hue='Rating color', data=ratings) | code |
88090498/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns | code |
88090498/cell_49 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size()
ratings = final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size().reset_index().rename(columns={0: 'Rating Count'})
ratings.head() | code |
88090498/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts() | code |
88090498/cell_51 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size()
ratings = final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size().reset_index().rename(columns={0: 'Rating Count'})
plt.rcParams['figure.figsize'] = (12, 6)
sns.barplot(x='Aggregate rating', y='Rating Count', data=ratings) | code |
88090498/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum() | code |
88090498/cell_38 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
plt.pie(country_val[:3], labels=country_names[:3], autopct='%1.2f%%') | code |
88090498/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_names | code |
88090498/cell_43 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size() | code |
88090498/cell_46 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df.dtypes
final_df.Country.value_counts()
country_names = final_df.Country.value_counts().index
country_val = final_df.Country.value_counts().values
final_df.columns
final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size()
ratings = final_df.groupby(['Aggregate rating', 'Rating color', 'Rating text']).size().reset_index().rename(columns={0: 'Rating Count'})
ratings | code |
88090498/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country | code |
88090498/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.info() | code |
88090498/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.isnull().sum()
[features for features in df.columns if df[features].isnull().sum() > 0]
df_country = pd.read_excel('../input/zomato-restaurants-data/Country-Code.xlsx')
df_country
final_df = pd.merge(df, df_country, on='Country Code', how='left')
final_df | code |
88090498/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/zomato-restaurants-data/zomato.csv', encoding='latin-1')
df.columns
df.shape
df.describe() | code |
322662/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import datetime
from subprocess import check_output
def dateparse(x):
try:
return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
except TypeError as err:
return None
d = pd.read_csv('../input/trainView.csv', header=0, names=['train_id', 'status', 'next_station', 'service', 'dest', 'lon', 'lat', 'source', 'track_change', 'track', 'date', 'timeStamp0', 'timeStamp1'], dtype={'train_id': str, 'status': str, 'next_station': str, 'service': str, 'dest': str, 'lon': str, 'lat': str, 'source': str, 'track_change': str, 'track': str, 'date': str, 'timeStamp0': datetime.datetime, 'timeStamp1': datetime.datetime})
d.head()
d['timeStamp0'] = pd.to_datetime(d['timeStamp0'], format='%Y-%m-%d %H:%M:%S')
d['timeStamp1'] = pd.to_datetime(d['timeStamp1'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
d.head() | code |
322662/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import datetime
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import datetime
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
def dateparse(x):
try:
print('Inside DateParse')
return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
except TypeError as err:
print('My exception occurred, value:', err.value)
return None
d = pd.read_csv('../input/trainView.csv', header=0, names=['train_id', 'status', 'next_station', 'service', 'dest', 'lon', 'lat', 'source', 'track_change', 'track', 'date', 'timeStamp0', 'timeStamp1'], dtype={'train_id': str, 'status': str, 'next_station': str, 'service': str, 'dest': str, 'lon': str, 'lat': str, 'source': str, 'track_change': str, 'track': str, 'date': str, 'timeStamp0': datetime.datetime, 'timeStamp1': datetime.datetime}) | code |
322662/cell_3 | [
"text_plain_output_1.png"
] | """
def getDeltaTime(x):
r=(x[1] - x[0]).total_seconds()
return r
# It might make sense to add delta_s to the next version
d['delta_s']=d[['timeStamp0','timeStamp1']].apply(getDeltaTime, axis=1)
""" | code |
2000224/cell_4 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from subprocess import check_output
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
pakistan = pd.read_csv('../input/PakistanDroneAttacks.csv', encoding='ISO-8859-1')
pakistan.columns = pakistan.columns.str.lower()
pakistan = pakistan.dropna(subset=['date'])
separate = pakistan['date'].str.split(',')
day, month, years = zip(*separate)
pakistan['years'] = years
pakistan_years = np.asarray(pakistan['years'].unique())
pakistan_died = pakistan.groupby('years')['total died mix'].count()
pakistan_injured = pakistan.groupby('years')['injured max'].count()
labels = ['DIED', 'INJURED']
colors = ['rgb(255, 51, 0)', 'rgb(0, 51, 204)']
x_data = pakistan_years
y_data = [pakistan_died, pakistan_injured]
traces = []
for i in range(0, 2):
traces.append(go.Scatter(x=x_data, y=y_data[i], mode='splines', name=labels[i], line=dict(color=colors[i], width=1.5)))
layout = {'title': 'Died and Injured by Drone Acttack (2004-2017)', 'xaxis': {'title': 'Years'}, 'yaxis': {'title': 'People'}}
figure = dict(data=traces, layout=layout)
iplot(figure) | code |
2000224/cell_6 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from subprocess import check_output
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
pakistan = pd.read_csv('../input/PakistanDroneAttacks.csv', encoding='ISO-8859-1')
pakistan.columns = pakistan.columns.str.lower()
pakistan = pakistan.dropna(subset=['date'])
separate = pakistan['date'].str.split(',')
day, month, years = zip(*separate)
pakistan['years'] = years
pakistan_years = np.asarray(pakistan['years'].unique())
pakistan_died = pakistan.groupby('years')['total died mix'].count()
pakistan_injured = pakistan.groupby('years')['injured max'].count()
labels = ['DIED', 'INJURED']
colors = ['rgb(255, 51, 0)', 'rgb(0, 51, 204)']
x_data = pakistan_years
y_data = [pakistan_died, pakistan_injured]
traces = []
for i in range(0, 2):
traces.append(go.Scatter(x=x_data, y=y_data[i], mode='splines', name=labels[i], line=dict(color=colors[i], width=1.5)))
layout = {'title': 'Died and Injured by Drone Acttack (2004-2017)', 'xaxis': {'title': 'Years'}, 'yaxis': {'title': 'People'}}
figure = dict(data=traces, layout=layout)
pakistan['injured max'] = pakistan['injured max'].fillna(0)
pakistan['total died mix'] = pakistan['total died mix'].fillna(0)
pakistan['text'] = pakistan['date'] + '<br>' + pakistan['total died mix'].astype(str) + ' Killed, ' + pakistan['injured max'].astype(str) + ' Injured' + '<br>' + 'City: ' + pakistan['city'].astype(str) + '<br>' + 'Location: ' + pakistan['location'].astype(str)
died = dict(type='scattergeo', locationmode='Pakistan', lon=pakistan[pakistan['total died mix'] > 0]['longitude'], lat=pakistan[pakistan['total died mix'] > 0]['latitude'], text=pakistan[pakistan['total died mix'] > 0]['text'], mode='markers', name='DIED', hoverinfo='text+name', marker=dict(size=pakistan[pakistan['total died mix'] > 0]['total died mix'] ** 0.255 * 8, opacity=0.95, color='rgb(240, 140, 45)'))
injuries = dict(type='scattergeo', locationmode='Pakistan', lon=pakistan[pakistan['total died mix'] == 0]['longitude'], lat=pakistan[pakistan['total died mix'] == 0]['latitude'], text=pakistan[pakistan['total died mix'] == 0]['text'], mode='markers', name='INJURIES', hoverinfo='text+name', marker=dict(size=(pakistan[pakistan['total died mix'] == 0]['injured max'] + 1) ** 0.245 * 8, opacity=0.85, color='rgb(20, 150, 187)'))
layout = go.Layout(title='Drone Attacks by Latitude/Longitude in Pakistan (2004-2017)', showlegend=True, legend=dict(x=0.85, y=0.4), geo=dict(resolution=50, scope='Pakistan', showframe=False, showcoastlines=True, showland=True, showcountries=True, landcolor='rgb(200,200,200)', countrycolor='rgb(1, 1, 1)', coastlinecolor='rgb(1, 1, 1)', projection=dict(type='Mercator'), lonaxis=dict(range=[62.0, 78.0]), lataxis=dict(range=[24.0, 35]), domain=dict(x=[0, 1], y=[0, 1])))
data = [died, injuries]
figure = dict(data=data, layout=layout)
iplot(figure) | code |
2000224/cell_2 | [
"text_plain_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
from subprocess import check_output
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
pakistan = pd.read_csv('../input/PakistanDroneAttacks.csv', encoding='ISO-8859-1')
pakistan.columns = pakistan.columns.str.lower()
pakistan = pakistan.dropna(subset=['date'])
separate = pakistan['date'].str.split(',')
day, month, years = zip(*separate)
pakistan['years'] = years | code |
122253041/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
us_yt = pd.read_csv('../input/youtube-trending-video-dataset/US_youtube_trending_data.csv')
us_yt.categoryId.nunique()
us_yt[us_yt['view_count'].idxmax():us_yt['view_count'].idxmax() + 1] | code |
122253041/cell_2 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
us_yt = pd.read_csv('../input/youtube-trending-video-dataset/US_youtube_trending_data.csv')
display(us_yt.head())
print(us_yt.columns) | code |
122253041/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
us_yt = pd.read_csv('../input/youtube-trending-video-dataset/US_youtube_trending_data.csv')
us_yt.categoryId.nunique()
us_yt.head() | code |
122253041/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
us_yt = pd.read_csv('../input/youtube-trending-video-dataset/US_youtube_trending_data.csv')
us_yt.categoryId.nunique()
corrolation_list = ['view_count', 'likes', 'dislikes', 'comment_count']
hm_data = us_yt[corrolation_list].corr()
display(hm_data) | code |
2009496/cell_2 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing,cross_validation,neighbors
import numpy as np # linear algebra
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
def handle_non_numeric(df):
columns = df.columns.values
for col in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[col].dtype != np.int64 and df[col].dtype != np.float64:
column_contents = df[col].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[col] = list(map(convert_to_int, df[col]))
return df
df_o = pd.read_csv('../input/Family Income and Expenditure.csv')
quants = list(df_o['Total Household Income'].quantile([0.25, 0.5, 0.75]))
print('quantiles', quants)
income_cat = []
for i in df_o['Total Household Income']:
if i < quants[0]:
income_cat.append('P')
elif i >= quants[0] and i < quants[1]:
income_cat.append('LM')
elif i >= quants[1] and i < quants[2]:
income_cat.append('HM')
else:
income_cat.append('R')
df = df_o.drop('Total Household Income', 1)
df['Income'] = income_cat
df = handle_non_numeric(df)
X = np.array(df.drop('Income', 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['Income'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print('accuracy', accuracy) | code |
2009496/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
def handle_non_numeric(df):
columns = df.columns.values
for col in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[col].dtype != np.int64 and df[col].dtype != np.float64:
column_contents = df[col].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[col] = list(map(convert_to_int, df[col]))
return df
df_o = pd.read_csv('../input/Family Income and Expenditure.csv')
print(df_o.head()) | code |
2009496/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import preprocessing,cross_validation,neighbors
import numpy as np # linear algebra
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
def handle_non_numeric(df):
columns = df.columns.values
for col in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[col].dtype != np.int64 and df[col].dtype != np.float64:
column_contents = df[col].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[col] = list(map(convert_to_int, df[col]))
return df
df_o = pd.read_csv('../input/Family Income and Expenditure.csv')
quants = list(df_o['Total Household Income'].quantile([0.25, 0.5, 0.75]))
income_cat = []
for i in df_o['Total Household Income']:
if i < quants[0]:
income_cat.append('P')
elif i >= quants[0] and i < quants[1]:
income_cat.append('LM')
elif i >= quants[1] and i < quants[2]:
income_cat.append('HM')
else:
income_cat.append('R')
df = df_o.drop('Total Household Income', 1)
df['Income'] = income_cat
df = handle_non_numeric(df)
X = np.array(df.drop('Income', 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['Income'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
clf = KMeans(n_clusters=4)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print('accuracy', correct / len(X)) | code |
332299/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
sns.jointplot(x='SepalLengthCm', y='SepalWidthCm', data=iris, size=5) | code |
332299/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
sns.FacetGrid(iris, hue='Species', size=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
sns.boxplot(x='Species', y='PetalLengthCm', data=iris) | code |
332299/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
iris['Species'].value_counts() | code |
332299/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
iris.head() | code |
332299/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
sns.FacetGrid(iris, hue='Species', size=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
ax = sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
ax = sns.stripplot(x='Species', y='PetalLengthCm', data=iris, jitter=True, edgecolor='gray') | code |
332299/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
iris.plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm') | code |
332299/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('../input/Iris.csv')
sns.FacetGrid(iris, hue='Species', size=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() | code |
130017103/cell_9 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | import pandas as pd
import warnings
train_clinical_all = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
proteins_features = pd.pivot_table(proteins, values='NPX', index='visit_id', columns='UniProt', aggfunc='sum')
train_clinical_all = train_clinical_all.merge(proteins_features, left_on='visit_id', right_index=True, how='left')
import warnings
warnings.filterwarnings('ignore')
train_clinical_all[proteins_features.columns] = train_clinical_all.groupby('patient_id')[proteins_features.columns].transform(lambda x: x.fillna(x.median()))
train_clinical_all['pred_month'] = train_clinical_all['visit_month']
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[['patient_id', 'visit_month', 'pred_month', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']].copy()
train_shift['visit_month'] -= plus_month
train_shift.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_{plus_month}' for i in range(1, 5)}, inplace=True)
train_shift.rename(columns={'pred_month': f'pred_month_plus_{plus_month}'}, inplace=True)
train_clinical_all = train_clinical_all.merge(train_shift, how='left', on=['patient_id', 'visit_month'])
train_clinical_all.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_0' for i in range(1, 5)}, inplace=True)
train_clinical_all.rename(columns={'pred_month': f'pred_month_plus_0'}, inplace=True)
train_clinical_all | code |
130017103/cell_19 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | from scipy.optimize import minimize
from scipy.stats import mode
from tqdm.auto import tqdm
import numpy as np
import pandas as pd
import warnings
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
train_clinical_all = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
proteins_features = pd.pivot_table(proteins, values='NPX', index='visit_id', columns='UniProt', aggfunc='sum')
train_clinical_all = train_clinical_all.merge(proteins_features, left_on='visit_id', right_index=True, how='left')
import warnings
warnings.filterwarnings('ignore')
train_clinical_all[proteins_features.columns] = train_clinical_all.groupby('patient_id')[proteins_features.columns].transform(lambda x: x.fillna(x.median()))
train_clinical_all['pred_month'] = train_clinical_all['visit_month']
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[['patient_id', 'visit_month', 'pred_month', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']].copy()
train_shift['visit_month'] -= plus_month
train_shift.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_{plus_month}' for i in range(1, 5)}, inplace=True)
train_shift.rename(columns={'pred_month': f'pred_month_plus_{plus_month}'}, inplace=True)
train_clinical_all = train_clinical_all.merge(train_shift, how='left', on=['patient_id', 'visit_month'])
train_clinical_all.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_0' for i in range(1, 5)}, inplace=True)
train_clinical_all.rename(columns={'pred_month': f'pred_month_plus_0'}, inplace=True)
train_clinical_all
from scipy.stats import mode
fill_with_mode = lambda x: x.fillna(mode(x).mode[0])
def calculate_month_trend_predicitons(pred_month, trend):
if target == 'updrs_4':
pred_month = pred_month.clip(60, None)
return trend[0] + pred_month * trend[1]
target_to_trend = {'updrs_1': [5.394793062665313, 0.027091086167821344], 'updrs_2': [5.469498130092747, 0.02824188329658148], 'updrs_3': [21.182145576879183, 0.08897763331790556], 'updrs_4': [-4.434453480103724, 0.07531448585334258]}
def calculate_predicitons_protein(pred_month, protein_shift):
trend_pred_month = target_to_trend[target]
pred_month_trend = calculate_month_trend_predicitons(pred_month=pred_month, trend=trend_pred_month)
return np.round(pred_month_trend + protein_shift)
def function_to_minimize(x):
metric = smape_plus_1(y_true=y_true_array, y_pred=calculate_predicitons_protein(pred_month=pred_month_array, protein_shift=x[0]))
return metric
def find_best_const(train_clinical_all_filtered, target):
columns_with_target = [f'{target}_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
columns_with_pred_month = [f'pred_month_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
global y_true_array
global pred_month_array
global protein_array
y_true_array = train_clinical_all_filtered[columns_with_target].values.ravel()
pred_month_array = train_clinical_all_filtered[columns_with_pred_month].values.ravel()
result = minimize(fun=function_to_minimize, x0=[0.0], method='Powell').x[0]
return result
feature0 = 'O15240'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature0].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature0].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered0 = train_clinical_all[(train_clinical_all[feature0] >= quantile_low_value) & (train_clinical_all[feature0] <= quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered0, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot)
feature1 = 'O00533'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature1].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature1].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered1 = train_clinical_all[(train_clinical_all[feature1] >= quantile_low_value) & (train_clinical_all[feature1] < quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered1, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot)
feature2 = 'O15394'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature2].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature2].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered2 = train_clinical_all[(train_clinical_all[feature2] >= quantile_low_value) & (train_clinical_all[feature2] < quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered2, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot) | code |
130017103/cell_15 | [
"text_html_output_1.png"
] | from scipy.optimize import minimize
from scipy.stats import mode
from tqdm.auto import tqdm
import numpy as np
import pandas as pd
import warnings
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
train_clinical_all = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
proteins_features = pd.pivot_table(proteins, values='NPX', index='visit_id', columns='UniProt', aggfunc='sum')
train_clinical_all = train_clinical_all.merge(proteins_features, left_on='visit_id', right_index=True, how='left')
import warnings
warnings.filterwarnings('ignore')
train_clinical_all[proteins_features.columns] = train_clinical_all.groupby('patient_id')[proteins_features.columns].transform(lambda x: x.fillna(x.median()))
train_clinical_all['pred_month'] = train_clinical_all['visit_month']
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[['patient_id', 'visit_month', 'pred_month', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']].copy()
train_shift['visit_month'] -= plus_month
train_shift.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_{plus_month}' for i in range(1, 5)}, inplace=True)
train_shift.rename(columns={'pred_month': f'pred_month_plus_{plus_month}'}, inplace=True)
train_clinical_all = train_clinical_all.merge(train_shift, how='left', on=['patient_id', 'visit_month'])
train_clinical_all.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_0' for i in range(1, 5)}, inplace=True)
train_clinical_all.rename(columns={'pred_month': f'pred_month_plus_0'}, inplace=True)
train_clinical_all
from scipy.stats import mode
fill_with_mode = lambda x: x.fillna(mode(x).mode[0])
def calculate_month_trend_predicitons(pred_month, trend):
if target == 'updrs_4':
pred_month = pred_month.clip(60, None)
return trend[0] + pred_month * trend[1]
target_to_trend = {'updrs_1': [5.394793062665313, 0.027091086167821344], 'updrs_2': [5.469498130092747, 0.02824188329658148], 'updrs_3': [21.182145576879183, 0.08897763331790556], 'updrs_4': [-4.434453480103724, 0.07531448585334258]}
def calculate_predicitons_protein(pred_month, protein_shift):
trend_pred_month = target_to_trend[target]
pred_month_trend = calculate_month_trend_predicitons(pred_month=pred_month, trend=trend_pred_month)
return np.round(pred_month_trend + protein_shift)
def function_to_minimize(x):
metric = smape_plus_1(y_true=y_true_array, y_pred=calculate_predicitons_protein(pred_month=pred_month_array, protein_shift=x[0]))
return metric
def find_best_const(train_clinical_all_filtered, target):
columns_with_target = [f'{target}_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
columns_with_pred_month = [f'pred_month_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
global y_true_array
global pred_month_array
global protein_array
y_true_array = train_clinical_all_filtered[columns_with_target].values.ravel()
pred_month_array = train_clinical_all_filtered[columns_with_pred_month].values.ravel()
result = minimize(fun=function_to_minimize, x0=[0.0], method='Powell').x[0]
return result
feature0 = 'O15240'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature0].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature0].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered0 = train_clinical_all[(train_clinical_all[feature0] >= quantile_low_value) & (train_clinical_all[feature0] <= quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered0, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot) | code |
130017103/cell_17 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_3.png"
] | from scipy.optimize import minimize
from scipy.stats import mode
from tqdm.auto import tqdm
import numpy as np
import pandas as pd
import warnings
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
train_clinical_all = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
proteins_features = pd.pivot_table(proteins, values='NPX', index='visit_id', columns='UniProt', aggfunc='sum')
train_clinical_all = train_clinical_all.merge(proteins_features, left_on='visit_id', right_index=True, how='left')
import warnings
warnings.filterwarnings('ignore')
train_clinical_all[proteins_features.columns] = train_clinical_all.groupby('patient_id')[proteins_features.columns].transform(lambda x: x.fillna(x.median()))
train_clinical_all['pred_month'] = train_clinical_all['visit_month']
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[['patient_id', 'visit_month', 'pred_month', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']].copy()
train_shift['visit_month'] -= plus_month
train_shift.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_{plus_month}' for i in range(1, 5)}, inplace=True)
train_shift.rename(columns={'pred_month': f'pred_month_plus_{plus_month}'}, inplace=True)
train_clinical_all = train_clinical_all.merge(train_shift, how='left', on=['patient_id', 'visit_month'])
train_clinical_all.rename(columns={f'updrs_{i}': f'updrs_{i}_plus_0' for i in range(1, 5)}, inplace=True)
train_clinical_all.rename(columns={'pred_month': f'pred_month_plus_0'}, inplace=True)
train_clinical_all
from scipy.stats import mode
fill_with_mode = lambda x: x.fillna(mode(x).mode[0])
def calculate_month_trend_predicitons(pred_month, trend):
if target == 'updrs_4':
pred_month = pred_month.clip(60, None)
return trend[0] + pred_month * trend[1]
target_to_trend = {'updrs_1': [5.394793062665313, 0.027091086167821344], 'updrs_2': [5.469498130092747, 0.02824188329658148], 'updrs_3': [21.182145576879183, 0.08897763331790556], 'updrs_4': [-4.434453480103724, 0.07531448585334258]}
def calculate_predicitons_protein(pred_month, protein_shift):
trend_pred_month = target_to_trend[target]
pred_month_trend = calculate_month_trend_predicitons(pred_month=pred_month, trend=trend_pred_month)
return np.round(pred_month_trend + protein_shift)
def function_to_minimize(x):
metric = smape_plus_1(y_true=y_true_array, y_pred=calculate_predicitons_protein(pred_month=pred_month_array, protein_shift=x[0]))
return metric
def find_best_const(train_clinical_all_filtered, target):
columns_with_target = [f'{target}_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
columns_with_pred_month = [f'pred_month_plus_{plus_month}' for plus_month in [0, 6, 12, 24]]
global y_true_array
global pred_month_array
global protein_array
y_true_array = train_clinical_all_filtered[columns_with_target].values.ravel()
pred_month_array = train_clinical_all_filtered[columns_with_pred_month].values.ravel()
result = minimize(fun=function_to_minimize, x0=[0.0], method='Powell').x[0]
return result
feature0 = 'O15240'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature0].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature0].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered0 = train_clinical_all[(train_clinical_all[feature0] >= quantile_low_value) & (train_clinical_all[feature0] <= quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered0, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot)
feature1 = 'O00533'
quantiles = [0, 0.05, 0.95, 1.0]
df_plot = []
for quantile_low, quantile_high in tqdm(zip(quantiles[:-1], quantiles[1:])):
item = {'quantile_low': quantile_low, 'quantile_high': quantile_high, 'quantile_middle': (quantile_low + quantile_high) / 2}
quantile_low_value = train_clinical_all[feature1].quantile(quantile_low)
quantile_high_value = train_clinical_all[feature1].quantile(quantile_high)
item['quantile_low_value'] = quantile_low_value
item['quantile_high_value'] = quantile_high_value
if quantile_high == 1:
quantile_high_value += 1e-05
train_clinical_all_filtered1 = train_clinical_all[(train_clinical_all[feature1] >= quantile_low_value) & (train_clinical_all[feature1] < quantile_high_value)]
for target in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
item[f'{target}_shift'] = find_best_const(train_clinical_all_filtered1, target)
df_plot.append(item)
df_plot = pd.DataFrame(df_plot) | code |
128020295/cell_21 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25] | code |
128020295/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players['height'].corr(players['weight']) | code |
128020295/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players[players['height'] == players['height'].min()] | code |
128020295/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000] | code |
128020295/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum() | code |
128020295/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats['GS'].corr(seasons_stats['MP']) | code |
128020295/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000]
real_players = seasons_stats[seasons_stats.MP > 1000]
real_players.groupby('Pos').mean().sort_values('PTS', ascending=False)['PTS'].plot.pie() | code |
128020295/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
print(triples)
triples.plot(x='Year', y='3P', figsize=(12, 8)) | code |
128020295/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players.describe() | code |
128020295/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000]
real_players = seasons_stats[seasons_stats.MP > 1000]
real_players.groupby('Pos').mean().sort_values('AST', ascending=False)['AST'].plot.pie() | code |
128020295/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000]
real_players = seasons_stats[seasons_stats.MP > 1000]
real_players.groupby('Pos').mean().sort_values('TRB', ascending=False)['TRB'].plot.pie() | code |
128020295/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players[players['weight'] == players['weight'].min()] | code |
128020295/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
seasons_stats['3P'].corr(seasons_stats['3PA']) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.