path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105210042/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() df_v = pd.DataFrame(data['symboling'].value_counts()).reset_index().rename(columns={'index': 'symboling', 'symboling': 'count'}) sns.set_palette('bright') fuel = data['fueltype'].value_counts() labels = fuel.index sizes = fuel.values plt.figure(figsize=(12, 9)) colors = sns.color_palette('Paired') plt.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, colors=colors, startangle=90) plt.show()
code
105210042/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() sns.distplot(data['price'], kde=True)
code
105210042/cell_27
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression Lr = LinearRegression() Lr.fit(X_train, y_train) Lr.score(X_test, y_test)
code
105210042/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum() sns.boxplot(x='fueltype', y='price', data=data, palette='Pastel2')
code
105210042/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') data.isnull().sum()
code
122258576/cell_4
[ "text_plain_output_1.png" ]
import sys import sys sys.path.append('/kaggle/input/neutrinofiles') sys.path
code
122258576/cell_3
[ "text_plain_output_1.png" ]
# Move software to working disk import time start=time.time() !rm -r software !scp -r /kaggle/input/graphnet-and-dependencies/software . print(f'{time.time()-start:8.3f} copy') # Install dependencies !pip install /kaggle/working/software/dependencies/torch-1.11.0+cu115-cp37-cp37m-linux_x86_64.whl !pip install /kaggle/working/software/dependencies/torch_cluster-1.6.0-cp37-cp37m-linux_x86_64.whl !pip install /kaggle/working/software/dependencies/torch_scatter-2.0.9-cp37-cp37m-linux_x86_64.whl !pip install /kaggle/working/software/dependencies/torch_sparse-0.6.13-cp37-cp37m-linux_x86_64.whl !pip install /kaggle/working/software/dependencies/torch_geometric-2.0.4.tar.gz print(f'{time.time()-start:8.3f} install')
code
122258576/cell_5
[ "text_plain_output_1.png" ]
import torch import torch import torch_geometric as geometric import dogtrain import dynotrain torch.__version__
code
1003423/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') gender_submission = pd.read_csv('gender_submission.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1)
code
1003423/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') gender_submission = pd.read_csv('gender_submission.csv')
code
105201657/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts() df.Residence_type.value_counts() df[df.bmi.isna()] df = df[df.bmi > 15] df = df[df.bmi < 70] corr = df.corr() df.work_type.value_counts() fig, ax = plt.subplots(2, 2, figsize=(12, 12)) sns.countplot(ax=ax[0, 0], data=df[df.work_type == 'Private'], x='stroke') sns.countplot(ax=ax[0, 1], data=df[df.work_type == 'Govt_job'], x='stroke') sns.countplot(ax=ax[1, 0], data=df[df.work_type == 'Self-employed'], x='stroke') sns.countplot(ax=ax[1, 1], data=df[df.work_type == 'Never_worked'], x='stroke') plt.show()
code
105201657/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.describe()
code
105201657/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.head()
code
105201657/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts() df.Residence_type.value_counts() df[df.bmi.isna()] df = df[df.bmi > 15] df = df[df.bmi < 70] corr = df.corr() df.work_type.value_counts()
code
105201657/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts()
code
105201657/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts() df.Residence_type.value_counts() df[df.bmi.isna()] df = df[df.bmi > 15] df = df[df.bmi < 70] corr = df.corr() sns.heatmap(corr, annot=True, linewidths=0.5) plt.show()
code
105201657/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.info()
code
105201657/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts() df.Residence_type.value_counts()
code
105201657/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts() df.Residence_type.value_counts() df[df.bmi.isna()]
code
105201657/cell_3
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot')
code
105201657/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:] df = df[df.age > 18] df.smoking_status.value_counts()
code
105201657/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.drop('id', axis=1, inplace=True) df.gender.value_counts() df = df[df.gender != 'Other'] df.age.value_counts().iloc[-15:]
code
88092710/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from qlib.data.dataset.handler import DataHandlerLP from qlib.data.dataset.loader import StaticDataLoader from sklearn.model_selection import GroupKFold from typing import Union import gc import numpy as np import pandas as pd def read_data(path: Union[str, pd.DataFrame]='../input/train.pkl', proc_type='train'): """ Read data and turn it into Qlib's format""" df = pd.read_pickle(path) if isinstance(path, str) else path if proc_type == 'test': df['time_id'] = df['row_id'].apply(lambda x: int(x.split('_')[0])) del test_df['row_id'] else: assert proc_type == 'train' df = df.set_index(['time_id', 'investment_id']) df.columns = pd.MultiIndex.from_tuples([('label' if col == 'target' else 'feature', col) for col in df.columns]) df.index.names = ['datetime', 'instrument'] df = df.astype(np.float32) return df data_df = read_data('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') dh = DataHandlerLP(data_loader=StaticDataLoader(data_df), drop_raw=True) del data_df del dh.data_loader def split_kfold(idx, fold=5): kfold = GroupKFold(n_splits=fold) cv_index = [] for fold_id, (train_idx, valid_idx) in enumerate(kfold.split(idx, groups=idx.get_level_values('datetime'))): all_seg = {'train': idx[train_idx], 'valid': idx[valid_idx]} cv_index.append(all_seg) return cv_index idx = dh.fetch().index cv_index = split_kfold(idx) del idx gc.collect()
code
88092710/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from qlib.data.dataset.handler import DataHandlerLP from qlib.data.dataset.loader import StaticDataLoader from typing import Union import numpy as np import pandas as pd def read_data(path: Union[str, pd.DataFrame]='../input/train.pkl', proc_type='train'): """ Read data and turn it into Qlib's format""" df = pd.read_pickle(path) if isinstance(path, str) else path if proc_type == 'test': df['time_id'] = df['row_id'].apply(lambda x: int(x.split('_')[0])) del test_df['row_id'] else: assert proc_type == 'train' df = df.set_index(['time_id', 'investment_id']) df.columns = pd.MultiIndex.from_tuples([('label' if col == 'target' else 'feature', col) for col in df.columns]) df.index.names = ['datetime', 'instrument'] df = df.astype(np.float32) return df data_df = read_data('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') dh = DataHandlerLP(data_loader=StaticDataLoader(data_df), drop_raw=True) del data_df del dh.data_loader
code
88092710/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from qlib.contrib.model.pytorch_nn import DNNModelPytorch from qlib.data.dataset import DatasetH from qlib.data.dataset.handler import DataHandlerLP from qlib.data.dataset.loader import StaticDataLoader from qlib.workflow import R from sklearn.model_selection import GroupKFold from typing import Union import gc import numpy as np import pandas as pd def read_data(path: Union[str, pd.DataFrame]='../input/train.pkl', proc_type='train'): """ Read data and turn it into Qlib's format""" df = pd.read_pickle(path) if isinstance(path, str) else path if proc_type == 'test': df['time_id'] = df['row_id'].apply(lambda x: int(x.split('_')[0])) del test_df['row_id'] else: assert proc_type == 'train' df = df.set_index(['time_id', 'investment_id']) df.columns = pd.MultiIndex.from_tuples([('label' if col == 'target' else 'feature', col) for col in df.columns]) df.index.names = ['datetime', 'instrument'] df = df.astype(np.float32) return df data_df = read_data('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') dh = DataHandlerLP(data_loader=StaticDataLoader(data_df), drop_raw=True) del data_df del dh.data_loader def split_kfold(idx, fold=5): kfold = GroupKFold(n_splits=fold) cv_index = [] for fold_id, (train_idx, valid_idx) in enumerate(kfold.split(idx, groups=idx.get_level_values('datetime'))): all_seg = {'train': idx[train_idx], 'valid': idx[valid_idx]} cv_index.append(all_seg) return cv_index idx = dh.fetch().index cv_index = split_kfold(idx) del idx gc.collect() R.start_exp() kwargs = {'lr': 0.002, 'optimizer': 'adam', 'max_steps': 8000, 'batch_size': 8192, 'pt_model_kwargs': {'input_dim': 300, 'layers': (256,)}, 'scheduler': None} R.log_params(**kwargs) cv_models = [] for seg in cv_index: ds = DatasetH(handler=dh, segments=seg) m = DNNModelPytorch(**kwargs) m.fit(ds) cv_models.append(m) break R.save_objects(**{'cv_models.pkl': cv_models, 'handler.pkl': dh}) R.end_exp()
code
88092710/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from qlib.contrib.model.pytorch_nn import DNNModelPytorch from qlib.data.dataset import DatasetH from qlib.data.dataset.handler import DataHandlerLP from qlib.data.dataset.loader import StaticDataLoader from qlib.workflow import R from sklearn.model_selection import GroupKFold from typing import Union import gc import numpy as np import pandas as pd import ubiquant def read_data(path: Union[str, pd.DataFrame]='../input/train.pkl', proc_type='train'): """ Read data and turn it into Qlib's format""" df = pd.read_pickle(path) if isinstance(path, str) else path if proc_type == 'test': df['time_id'] = df['row_id'].apply(lambda x: int(x.split('_')[0])) del test_df['row_id'] else: assert proc_type == 'train' df = df.set_index(['time_id', 'investment_id']) df.columns = pd.MultiIndex.from_tuples([('label' if col == 'target' else 'feature', col) for col in df.columns]) df.index.names = ['datetime', 'instrument'] df = df.astype(np.float32) return df data_df = read_data('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') dh = DataHandlerLP(data_loader=StaticDataLoader(data_df), drop_raw=True) del data_df del dh.data_loader def split_kfold(idx, fold=5): kfold = GroupKFold(n_splits=fold) cv_index = [] for fold_id, (train_idx, valid_idx) in enumerate(kfold.split(idx, groups=idx.get_level_values('datetime'))): all_seg = {'train': idx[train_idx], 'valid': idx[valid_idx]} cv_index.append(all_seg) return cv_index idx = dh.fetch().index cv_index = split_kfold(idx) del idx gc.collect() R.start_exp() kwargs = {'lr': 0.002, 'optimizer': 'adam', 'max_steps': 8000, 'batch_size': 8192, 'pt_model_kwargs': {'input_dim': 300, 'layers': (256,)}, 'scheduler': None} R.log_params(**kwargs) cv_models = [] for seg in cv_index: ds = DatasetH(handler=dh, segments=seg) m = DNNModelPytorch(**kwargs) m.fit(ds) cv_models.append(m) break R.save_objects(**{'cv_models.pkl': cv_models, 'handler.pkl': dh}) R.end_exp() import ubiquant env = ubiquant.make_env() iter_test = env.iter_test() def get_avg(preds): return sum(preds) / len(preds) for test_df, sample_prediction_df in iter_test: dh.data_loader = StaticDataLoader(read_data(test_df, proc_type='test')) dh.setup_data(init_type=dh.IT_LS) ds = DatasetH(handler=dh, segments={'test': slice(None)}) preds = [] preds_nn = [] for m in cv_models: preds_nn.append(m.predict(ds).values) preds.append(get_avg(preds_nn)) sample_prediction_df.loc[:, 'target'] = get_avg(preds) env.predict(sample_prediction_df)
code
88092710/cell_5
[ "text_plain_output_1.png" ]
import qlib import gc from typing import Union import qlib from qlib.workflow import R import numpy as np import pandas as pd from qlib.data.dataset.handler import DataHandlerLP from qlib.data.dataset.loader import StaticDataLoader from qlib.data.dataset import DatasetH from qlib.contrib.model.pytorch_nn import DNNModelPytorch from sklearn.model_selection import GroupKFold qlib.init()
code
16117153/cell_25
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum() big.loc[big['Value'].notnull(), 'Value'].apply(int) big.isna().sum()
code
16117153/cell_4
[ "text_html_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape
code
16117153/cell_30
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum() big.loc[big['Value'].notnull(), 'Value'].apply(int) big.isna().sum() big.drop(columns=['Methods', 'Notes'], inplace=True) groupvalues = big.groupby('Indicator Category').sum().reset_index() groupvalues.head()
code
16117153/cell_20
[ "text_html_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum()
code
16117153/cell_6
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.info()
code
16117153/cell_2
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os print(os.listdir('../input')) big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv')
code
16117153/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16117153/cell_7
[ "text_html_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T
code
16117153/cell_18
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum()
code
16117153/cell_28
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum() big.loc[big['Value'].notnull(), 'Value'].apply(int) big.isna().sum() big.drop(columns=['Methods', 'Notes'], inplace=True) big['Indicator Category'].value_counts()
code
16117153/cell_8
[ "image_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.tail(10)
code
16117153/cell_3
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns
code
16117153/cell_22
[ "text_html_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum() big.loc[big['Value'].notnull(), 'Value'].apply(int)
code
16117153/cell_10
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum()
code
16117153/cell_27
[ "text_html_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.dropna(subset=['Value'], inplace=True) big.Value.fillna(big.Value.median(), inplace=True) big.duplicated().sum() big = big.drop_duplicates() big.duplicated().sum() big.loc[big['Value'].notnull(), 'Value'].apply(int) big.isna().sum() big.drop(columns=['Methods', 'Notes'], inplace=True) big.head(2)
code
16117153/cell_12
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os big = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') big.columns big.shape big.head(10).T big.isna().sum() big.describe(include='all')
code
33114647/cell_13
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import operator import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_x = pd.read_csv('/kaggle/input/iris/Iris.csv') def euclidian_distance(row1, row2, length): distance = 0 for x in range(length): distance += np.square(row1[x] - row2[x]) return np.sqrt(distance) def get_neighbors(dataset, sorted_distances, k): neighbors = [] for x in range(k): neighbors.append(sorted_distances[x][0]) return neighbors def get_sorted_distances(dataset, testInstance): distances = {} for x in range(len(dataset)): dist = euclidian_distance(testInstance, dataset.iloc[x], testInstance.shape[1]) distances[x] = dist[0] sorted_distances = sorted(distances.items(), key=operator.itemgetter(1)) return sorted_distances def get_sorted_votes(dataset, neighbors): class_votes = {} for x in range(len(neighbors)): response = dataset.iloc[neighbors[x]][-1] if response in class_votes: class_votes[response] += 1 else: class_votes[response] = 1 sorted_votes = sorted(class_votes.items(), key=operator.itemgetter(1), reverse=True) return sorted_votes def knn(dataset, testInstance, k): sorted_distances = get_sorted_distances(dataset, testInstance) neighbors = get_neighbors(dataset, sorted_distances, k) sorted_votes = get_sorted_votes(dataset, neighbors) return (sorted_votes[0][0], neighbors) iris = train_x[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'Species']] row = pd.DataFrame([list(iris.iloc[0].to_numpy()[0:-1])]) knn(iris, row, 3)
code
33114647/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_x = pd.read_csv('/kaggle/input/iris/Iris.csv') train_x.head()
code
33114647/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_x = pd.read_csv('/kaggle/input/iris/Iris.csv') iris = train_x[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'Species']] iris.head()
code
33114647/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33114647/cell_15
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import operator import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_x = pd.read_csv('/kaggle/input/iris/Iris.csv') def euclidian_distance(row1, row2, length): distance = 0 for x in range(length): distance += np.square(row1[x] - row2[x]) return np.sqrt(distance) def get_neighbors(dataset, sorted_distances, k): neighbors = [] for x in range(k): neighbors.append(sorted_distances[x][0]) return neighbors def get_sorted_distances(dataset, testInstance): distances = {} for x in range(len(dataset)): dist = euclidian_distance(testInstance, dataset.iloc[x], testInstance.shape[1]) distances[x] = dist[0] sorted_distances = sorted(distances.items(), key=operator.itemgetter(1)) return sorted_distances def get_sorted_votes(dataset, neighbors): class_votes = {} for x in range(len(neighbors)): response = dataset.iloc[neighbors[x]][-1] if response in class_votes: class_votes[response] += 1 else: class_votes[response] = 1 sorted_votes = sorted(class_votes.items(), key=operator.itemgetter(1), reverse=True) return sorted_votes def knn(dataset, testInstance, k): sorted_distances = get_sorted_distances(dataset, testInstance) neighbors = get_neighbors(dataset, sorted_distances, k) sorted_votes = get_sorted_votes(dataset, neighbors) return (sorted_votes[0][0], neighbors) iris = train_x[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'Species']] row = pd.DataFrame([list(iris.iloc[0].to_numpy()[0:-1])]) result = [] for i in range(len(iris)): row = pd.DataFrame([list(iris.iloc[i].to_numpy()[0:-1])]) result.append(knn(iris, row, 3)) result
code
72070216/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] fig = plt.figure(figsize=(18,16)) for index,col in enumerate(data.drop('Potability',axis=1).columns): plt.subplot(5,2,index+1) sns.distplot(data.drop('Potability', axis=1).loc[:,col].dropna(), kde=False) fig.tight_layout(pad=1.0) fig = plt.figure(figsize=(14,15)) for index,col in enumerate(data.drop('Potability', axis=1).columns): plt.subplot(5,2,index+1) sns.boxplot(y=col, data=data.drop('Potability', axis=1).dropna()) fig.tight_layout(pad=1.0) fig = plt.figure(figsize=(10,10)) sns.heatmap(data.corr(), annot=True, cmap='gray') data.isnull().sum()[data.isnull().sum() > 0]
code
72070216/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] fig = plt.figure(figsize=(18, 16)) for index, col in enumerate(data.drop('Potability', axis=1).columns): plt.subplot(5, 2, index + 1) sns.distplot(data.drop('Potability', axis=1).loc[:, col].dropna(), kde=False) fig.tight_layout(pad=1.0)
code
72070216/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0]
code
72070216/cell_34
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_30
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_33
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_44
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC')
code
72070216/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/water-potability/water_potability.csv') data.head()
code
72070216/cell_40
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) from sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier(estimators=[('lr', lr), ('knn', knn), ('rf', rf), ('gnb', gnb), ('svc', svc), ('xgb', xgb)], voting='soft') cv = cross_val_score(voting_clf, X_train, y_train, cv=5) from sklearn.metrics import accuracy_score voting_clf.fit(X_train, y_train) y_pred_vc_soft = voting_clf.predict(X_test).astype(int) accuracy_score(y_pred_vc_soft, y_test) voting_clf = VotingClassifier(estimators=[('rf', rf), ('svc', svc), ('xgb', xgb)], voting='soft') cv = cross_val_score(voting_clf, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_48
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') best_svc = best_clf_svc.best_estimator_.fit(X_train, y_train) y_pred = best_svc.predict(X_test) accuracy_score(y_pred, y_test) xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB') best_xgb = best_clf_xgb.best_estimator_.fit(X_train, y_train) y_pred = best_xgb.predict(X_test) accuracy_score(y_pred, y_test)
code
72070216/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] data['Potability'].value_counts()
code
72070216/cell_50
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') best_svc = best_clf_svc.best_estimator_.fit(X_train, y_train) y_pred = best_svc.predict(X_test) accuracy_score(y_pred, y_test) xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB') best_xgb = best_clf_xgb.best_estimator_.fit(X_train, y_train) y_pred = best_xgb.predict(X_test) accuracy_score(y_pred, y_test) rf = RandomForestClassifier(random_state=42) param_grid = {'n_estimators': [400, 450, 500, 550], 'criterion': ['gini', 'entropy'], 'bootstrap': [True], 'max_depth': [15, 20, 25], 'max_features': ['auto', 'sqrt', 10], 'min_samples_leaf': [2, 3], 'min_samples_split': [2, 3]} clf_rf = RandomizedSearchCV(rf, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_rf = clf_rf.fit(X_train, y_train) performance(best_clf_rf, 'Random Forest') best_rf = best_clf_rf.best_estimator_.fit(X_train, y_train) y_pred = best_rf.predict(X_test) accuracy_score(y_pred, y_test)
code
72070216/cell_52
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') best_svc = best_clf_svc.best_estimator_.fit(X_train, y_train) y_pred = best_svc.predict(X_test) accuracy_score(y_pred, y_test) xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB') best_xgb = best_clf_xgb.best_estimator_.fit(X_train, y_train) y_pred = best_xgb.predict(X_test) accuracy_score(y_pred, y_test) rf = RandomForestClassifier(random_state=42) param_grid = {'n_estimators': [400, 450, 500, 550], 'criterion': ['gini', 'entropy'], 'bootstrap': [True], 'max_depth': [15, 20, 25], 'max_features': ['auto', 'sqrt', 10], 'min_samples_leaf': [2, 3], 'min_samples_split': [2, 3]} clf_rf = RandomizedSearchCV(rf, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_rf = clf_rf.fit(X_train, y_train) performance(best_clf_rf, 'Random Forest') best_rf = best_clf_rf.best_estimator_.fit(X_train, y_train) y_pred = best_rf.predict(X_test) accuracy_score(y_pred, y_test) model = VotingClassifier(estimators=[('SVC', best_svc), ('XGB', best_xgb), ('RF', best_rf)], voting='hard') model.fit(X_train, y_train)
code
72070216/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/water-potability/water_potability.csv') data.describe()
code
72070216/cell_49
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB') rf = RandomForestClassifier(random_state=42) param_grid = {'n_estimators': [400, 450, 500, 550], 'criterion': ['gini', 'entropy'], 'bootstrap': [True], 'max_depth': [15, 20, 25], 'max_features': ['auto', 'sqrt', 10], 'min_samples_leaf': [2, 3], 'min_samples_split': [2, 3]} clf_rf = RandomizedSearchCV(rf, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_rf = clf_rf.fit(X_train, y_train) performance(best_clf_rf, 'Random Forest')
code
72070216/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] fig = plt.figure(figsize=(18,16)) for index,col in enumerate(data.drop('Potability',axis=1).columns): plt.subplot(5,2,index+1) sns.distplot(data.drop('Potability', axis=1).loc[:,col].dropna(), kde=False) fig.tight_layout(pad=1.0) fig = plt.figure(figsize=(14, 15)) for index, col in enumerate(data.drop('Potability', axis=1).columns): plt.subplot(5, 2, index + 1) sns.boxplot(y=col, data=data.drop('Potability', axis=1).dropna()) fig.tight_layout(pad=1.0)
code
72070216/cell_38
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) from sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier(estimators=[('lr', lr), ('knn', knn), ('rf', rf), ('gnb', gnb), ('svc', svc), ('xgb', xgb)], voting='soft') from sklearn.metrics import accuracy_score voting_clf.fit(X_train, y_train) y_pred_vc_soft = voting_clf.predict(X_test).astype(int) accuracy_score(y_pred_vc_soft, y_test)
code
72070216/cell_47
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB')
code
72070216/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] fig = plt.figure(figsize=(18,16)) for index,col in enumerate(data.drop('Potability',axis=1).columns): plt.subplot(5,2,index+1) sns.distplot(data.drop('Potability', axis=1).loc[:,col].dropna(), kde=False) fig.tight_layout(pad=1.0) fig = plt.figure(figsize=(14,15)) for index,col in enumerate(data.drop('Potability', axis=1).columns): plt.subplot(5,2,index+1) sns.boxplot(y=col, data=data.drop('Potability', axis=1).dropna()) fig.tight_layout(pad=1.0) fig = plt.figure(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, cmap='gray')
code
72070216/cell_35
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_31
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_46
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') best_svc = best_clf_svc.best_estimator_.fit(X_train, y_train) y_pred = best_svc.predict(X_test) accuracy_score(y_pred, y_test)
code
72070216/cell_53
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) def performance(classifier, model_name): pass svc = SVC(probability=True) param_grid = tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 1, 2, 5, 10], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [2, 3, 4, 5], 'C': [0.1, 1, 10, 100, 1000]}] clf_svc = RandomizedSearchCV(svc, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_svc = clf_svc.fit(X_train, y_train) performance(best_clf_svc, 'SVC') best_svc = best_clf_svc.best_estimator_.fit(X_train, y_train) y_pred = best_svc.predict(X_test) accuracy_score(y_pred, y_test) xgb = XGBClassifier(random_state=42) param_grid = {'n_estimators': [450, 500, 550], 'colsample_bytree': [0.75, 0.8, 0.85], 'max_depth': [None], 'reg_alpha': [1], 'reg_lambda': [2, 5, 10], 'subsample': [0.55, 0.6, 0.65], 'learning_rate': [0.5], 'gamma': [0.5, 1, 2], 'min_child_weight': [0.01], 'sampling_method': ['uniform']} clf_xgb = RandomizedSearchCV(xgb, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_xgb = clf_xgb.fit(X_train, y_train) performance(best_clf_xgb, 'XGB') best_xgb = best_clf_xgb.best_estimator_.fit(X_train, y_train) y_pred = best_xgb.predict(X_test) accuracy_score(y_pred, y_test) rf = RandomForestClassifier(random_state=42) param_grid = {'n_estimators': [400, 450, 500, 550], 'criterion': ['gini', 'entropy'], 'bootstrap': [True], 'max_depth': [15, 20, 25], 'max_features': ['auto', 'sqrt', 10], 'min_samples_leaf': [2, 3], 'min_samples_split': [2, 3]} clf_rf = RandomizedSearchCV(rf, param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_rf = clf_rf.fit(X_train, y_train) performance(best_clf_rf, 'Random Forest') best_rf = best_clf_rf.best_estimator_.fit(X_train, y_train) y_pred = best_rf.predict(X_test) accuracy_score(y_pred, y_test) model = VotingClassifier(estimators=[('SVC', best_svc), ('XGB', best_xgb), ('RF', best_rf)], voting='hard') model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy_score(y_pred, y_test)
code
72070216/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() data = pd.read_csv('../input/water-potability/water_potability.csv') data.isnull().sum()[data.isnull().sum() > 0] sns.countplot(data['Potability'])
code
72070216/cell_37
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) gnb = GaussianNB() cv = cross_val_score(gnb, X_train, y_train, cv=5) lr = LogisticRegression(max_iter=2000) cv = cross_val_score(lr, X_train, y_train, cv=5) knn = KNeighborsClassifier(n_neighbors=4) cv = cross_val_score(knn, X_train, y_train, cv=5) rf = RandomForestClassifier(random_state=42) cv = cross_val_score(rf, X_train, y_train, cv=5) svc = SVC(probability=True) cv = cross_val_score(svc, X_train, y_train, cv=5) from xgboost import XGBClassifier xgb = XGBClassifier(random_state=1) cv = cross_val_score(xgb, X_train, y_train, cv=5) from sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier(estimators=[('lr', lr), ('knn', knn), ('rf', rf), ('gnb', gnb), ('svc', svc), ('xgb', xgb)], voting='soft') cv = cross_val_score(voting_clf, X_train, y_train, cv=5) print(cv) print(cv.mean())
code
72070216/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/water-potability/water_potability.csv') data.info()
code
128021914/cell_4
[ "text_plain_output_1.png" ]
!sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B53DC80D13EDEF05
code
128021914/cell_6
[ "text_plain_output_1.png" ]
# install_path for ease of use on changing install location # "/kaggle" is best for all in one because of big storage ( 70 GB ++ ) # "/kaggle/working" is best for saving images and quickrun ( ~20 GB ) install_path= "/kaggle" !git clone https://github.com/lllyasviel/ControlNet-v1-1-nightly $install_path/cnet1.1 # you can add hashtag # on the controlnet model you don't use for faster downloading !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-full.safetensors -d $install_path/cnet1.1/models -o anything-v3-full.safetensors !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned.ckpt -d $install_path/cnet1.1/models -o v1-5-pruned.ckpt !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p.pth -d $install_path/cnet1.1/models -o control_v11e_sd15_ip2p.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle.pth -d $install_path/cnet1.1/models -o control_v11e_sd15_shuffle.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_canny.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_depth.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_inpaint.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_lineart.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_mlsd.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_normalbae.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_openpose.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_scribble.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_seg.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge.pth -d $install_path/cnet1.1/models -o control_v11p_sd15_softedge.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime.pth -d $install_path/cnet1.1/models -o control_v11p_sd15s2_lineart_anime.pth !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11u_sd15_tile.pth -d $install_path/cnet1.1/models -o control_v11u_sd15_tile.pth !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_annotator.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_canny.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_depth.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_inpaint.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_ip2p.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_lineart.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_lineart_anime.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_mlsd.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_normalbae.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_openpose.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_scribble.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_scribble_interactive.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_seg.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_shuffle.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_softedge.py !sed -i "s/block.launch(server_name='0.0.0.0')/block.launch(server_name='0.0.0.0', share=True)/g" gradio_tile.py
code
128021914/cell_8
[ "text_plain_output_1.png" ]
!python gradio_lineart_anime.py
code
128021914/cell_3
[ "text_plain_output_1.png" ]
!python --version
code
128034947/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) # Counting the number of H-1B visa petitions for each case status case_status_counts= df['CASE_STATUS'].value_counts() # Creating a figure and axis object to plot the pie chart fig, ax = plt.subplots(figsize=(8, 8)) # Plotting the pie chart with the case status counts and their respective percentages plt.pie(case_status_counts, labels=case_status_counts.index, autopct='%1.1f%%', textprops={'fontsize': 8}) # Setting the title of the chart plt.title('Petitions distributed by case statuses', y=1.1) # Ensuring that the chart is properly displayed and not cut off plt.tight_layout() # Displaying the chart plt.show() top_employers = df['EMPLOYER_NAME'].value_counts().nlargest(20) colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] plt.xticks(rotation=90) top_companies = df.groupby('EMPLOYER_NAME').size().sort_values(ascending=False).head(15) plt.barh(top_companies.index, top_companies.values, color=colors) plt.title('Top 15 companies that filed the most visa apllication') plt.show()
code
128034947/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) case_status_counts = df['CASE_STATUS'].value_counts() fig, ax = plt.subplots(figsize=(8, 8)) plt.pie(case_status_counts, labels=case_status_counts.index, autopct='%1.1f%%', textprops={'fontsize': 8}) plt.title('Petitions distributed by case statuses', y=1.1) plt.tight_layout() plt.show()
code
128034947/cell_6
[ "image_output_1.png" ]
import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) df.head(5)
code
128034947/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) # Counting the number of H-1B visa petitions for each case status case_status_counts= df['CASE_STATUS'].value_counts() # Creating a figure and axis object to plot the pie chart fig, ax = plt.subplots(figsize=(8, 8)) # Plotting the pie chart with the case status counts and their respective percentages plt.pie(case_status_counts, labels=case_status_counts.index, autopct='%1.1f%%', textprops={'fontsize': 8}) # Setting the title of the chart plt.title('Petitions distributed by case statuses', y=1.1) # Ensuring that the chart is properly displayed and not cut off plt.tight_layout() # Displaying the chart plt.show() top_employers = df['EMPLOYER_NAME'].value_counts().nlargest(20) colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] plt.bar(top_employers.index, top_employers.values, color=colors) plt.xticks(rotation=90) plt.title('Top employers filing the petitions') plt.show()
code
128034947/cell_7
[ "image_output_1.png" ]
import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) df.info()
code
128034947/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd path = '/kaggle/input/h1bcsv/h1b.csv' df = pd.read_csv(path) # Counting the number of H-1B visa petitions for each case status case_status_counts= df['CASE_STATUS'].value_counts() # Creating a figure and axis object to plot the pie chart fig, ax = plt.subplots(figsize=(8, 8)) # Plotting the pie chart with the case status counts and their respective percentages plt.pie(case_status_counts, labels=case_status_counts.index, autopct='%1.1f%%', textprops={'fontsize': 8}) # Setting the title of the chart plt.title('Petitions distributed by case statuses', y=1.1) # Ensuring that the chart is properly displayed and not cut off plt.tight_layout() # Displaying the chart plt.show() top_employers = df['EMPLOYER_NAME'].value_counts().nlargest(20) colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] plt.xticks(rotation=90) top_companies = df.groupby('EMPLOYER_NAME').size().sort_values(ascending=False).head(15) plt.barh(top_companies.index, top_companies.values, color=colors) top_wage_employers = df.groupby('EMPLOYER_NAME')['PREVAILING_WAGE'].mean().nlargest(10) plt.barh(top_wage_employers.index, top_wage_employers.values, color=colors) plt.title('Top employers grantingmaximum prevailing wages') plt.show()
code
323931/cell_21
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 21, 26)
code
323931/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns)
code
323931/cell_9
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) train = pd.merge(act_train, people, on='people_id', how='left') print(train.shape) test = pd.merge(act_test, people, on='people_id', how='left') print(test.shape) print(train.columns)
code
323931/cell_25
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam100k, 8, 8, 0, 5, 8.0)
code
323931/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() print('{0} duplicate people rows'.format(people.drop('people_id', axis=1).duplicated().sum())) print('{0} duplicate people ids'.format(people['people_id'].duplicated().sum())) print('{0} duplicate train rows'.format(act_train.drop('activity_id', axis=1).duplicated().sum())) print('{0} duplicate train rows with different outcome'.format(act_train.drop(['activity_id'], axis=1).drop_duplicates().drop('outcome', axis=1).duplicated().sum())) print('{0} duplicate train activity id'.format(act_train['activity_id'].duplicated().sum())) print('{0} duplicate test rows'.format(act_test.drop('activity_id', axis=1).duplicated().sum())) print('{0} duplicate test activity id'.format(act_test['activity_id'].duplicated().sum()))
code
323931/cell_23
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam100k, 3, 6, 0, 6, 8.0)
code
323931/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 16, 21)
code
323931/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] print(trainUnique.shape) testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] print(testUnique.shape)
code
323931/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 11, 16)
code
323931/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 6, 11)
code
323931/cell_3
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() print(people.shape) print(act_train.shape) print(act_test.shape)
code
323931/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 0, 6)
code
323931/cell_24
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam100k, 6, 7, 0, 5, 5.0)
code
323931/cell_22
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) def addPrefix(df, suffix, exclude): for c in df.columns: if c not in exclude: df.rename(columns={c: suffix + c}, inplace=True) train = pd.merge(act_train, people, on='people_id', how='left') test = pd.merge(act_test, people, on='people_id', how='left') trainUnique = train[~train.drop(['people_id', 'activity_id'], axis=1).duplicated()] testUnique = test[~test.drop(['people_id', 'activity_id'], axis=1).duplicated()] nonCategoricalColumns = ['people_id', 'activity_id', 'outcome', 'ppl_char_38', 'ppl_date', 'act_date'] valCounts = {} def calcCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = len(df[c].value_counts()) valCounts[c] = cnt calcCountSuffix(trainUnique, nonCategoricalColumns) def addCountSuffix(df, exclude): for c in df.columns: if c not in exclude: cnt = valCounts[c] df.rename(columns={c: c + '_cnt_' + str(cnt)}, inplace=True) addCountSuffix(train, nonCategoricalColumns) addCountSuffix(test, nonCategoricalColumns) addCountSuffix(trainUnique, nonCategoricalColumns) addCountSuffix(testUnique, nonCategoricalColumns) def getColumnsBySuffix(df, minValue, maxValue, exclude): return [c for c in df.columns if c not in exclude if int(c.split('_')[-1]) >= minValue and int(c.split('_')[-1]) <= maxValue] def drawViolin(df, minCnt, maxCnt, indexFrom, indexTo, size=3.5): g = sns.PairGrid(df, x_vars=getColumnsBySuffix(train, minCnt, maxCnt, nonCategoricalColumns)[indexFrom:indexTo], y_vars=['outcome'], aspect=0.75, size=size) g.map(sns.violinplot, palette='pastel') sam10k = trainUnique.sample(10000) sam100k = trainUnique.sample(100000) sam500k = trainUnique.sample(500000) drawViolin(sam10k, 2, 2, 26, 31)
code
323931/cell_5
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns baseDir = '../input/' people = pd.read_csv('{0}people.csv'.format(baseDir)).drop_duplicates() act_train = pd.read_csv('{0}act_train.csv'.format(baseDir)).drop_duplicates() act_test = pd.read_csv('{0}act_test.csv'.format(baseDir)).drop_duplicates() unqTrain = act_train.drop(['activity_id', 'outcome'], axis=1).drop_duplicates() unqTest = act_test.drop(['activity_id'], axis=1).drop_duplicates() total = pd.concat([unqTrain, unqTest], axis=0) print('{0} rows duplicated between train and test'.format(len(total) - len(total.drop_duplicates()))) print('{0} columns diff between train and test'.format([c for c in act_train.columns if c not in act_test.columns and c != 'outcome']))
code