path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128031091/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts()
numerical = [var for var in dataset.columns if dataset[var].dtype != 'O']
dataset[numerical].isnull().sum() | code |
128031091/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts() | code |
128031091/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts() | code |
128031091/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
dataset.head() | code |
128031091/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts()
numerical = [var for var in dataset.columns if dataset[var].dtype != 'O']
dataset[numerical].head() | code |
128031091/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
print('There are {} categorical variables\n'.format(len(categorical)))
print('The categorical variables are :\n\n', categorical)
dataset[categorical].head() | code |
128031091/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
128031091/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique() | code |
128031091/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts()
numerical = [var for var in dataset.columns if dataset[var].dtype != 'O']
print('There are {} numerical variables\n'.format(len(numerical)))
print('The numerical variables are :', numerical) | code |
128031091/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128031091/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset[categorical].isnull().sum() | code |
128031091/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts()
for var in categorical:
print(var, ' contains ', len(dataset[var].unique()), ' labels') | code |
128031091/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique() | code |
128031091/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts() | code |
128031091/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts() | code |
128031091/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique()
dataset.native_country.value_counts()
dataset['native_country'].replace('?', np.NaN, inplace=True)
dataset.native_country.value_counts()
dataset[categorical].isnull().sum() | code |
128031091/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts()
dataset['occupation'].replace('?', np.NaN, inplace=True)
dataset.occupation.value_counts()
dataset.native_country.unique() | code |
128031091/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts() | code |
128031091/cell_12 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
categorical = [var for var in dataset.columns if dataset[var].dtype == 'O']
dataset.workclass.unique()
dataset.workclass.value_counts()
dataset['workclass'].replace('?', np.NaN, inplace=True)
dataset.workclass.value_counts()
dataset.occupation.unique()
dataset.occupation.value_counts() | code |
128031091/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = '/kaggle/input/adult-dataset/adult.csv'
dataset = pd.read_csv(dataset, header=None, sep=',\\s')
dataset.shape
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income']
dataset.columns = col_names
dataset.columns
dataset.head() | code |
72072461/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id')
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train = pd.get_dummies(X_train)
X_valid = pd.get_dummies(X_valid)
X_test = pd.get_dummies(X_test)
X_train, X_valid = X_train.align(X_valid, join='left', axis=1)
X_train, X_test = X_train.align(X_test, join='left', axis=1)
from xgboost import XGBRegressor
my_model_1 = XGBRegressor(random_state=0)
my_model_1.fit(X_train, y_train) | code |
72072461/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id')
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train = pd.get_dummies(X_train)
X_valid = pd.get_dummies(X_valid)
X_test = pd.get_dummies(X_test)
X_train, X_valid = X_train.align(X_valid, join='left', axis=1)
X_train, X_test = X_train.align(X_test, join='left', axis=1)
my_model_2 = XGBRegressor(n_estimators=900, learning_rate=0.09)
my_model_2.fit(X_train, y_train)
predictions_2 = my_model_2.predict(X_valid)
mae_2 = mean_absolute_error(predictions_2, y_valid)
print('Mean Absolute Error:', mae_2) | code |
72072461/cell_2 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id')
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train.head() | code |
72072461/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
X_full = pd.read_csv('../input/housingdataset/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/housingdataset/test.csv', index_col='Id')
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train = pd.get_dummies(X_train)
X_valid = pd.get_dummies(X_valid)
X_test = pd.get_dummies(X_test)
X_train, X_valid = X_train.align(X_valid, join='left', axis=1)
X_train, X_test = X_train.align(X_test, join='left', axis=1)
from xgboost import XGBRegressor
my_model_1 = XGBRegressor(random_state=0)
my_model_1.fit(X_train, y_train)
from sklearn.metrics import mean_absolute_error
predictions_1 = my_model_1.predict(X_valid)
mae_1 = mean_absolute_error(predictions_1, y_valid)
print('Mean Absolute Error:', mae_1) | code |
2028129/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/flights.csv')
df = df[df['MONTH'] == 1]
df.head() | code |
2028129/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot') | code |
2028129/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('../input/flights.csv')
df = df[df['MONTH'] == 1]
airlineList = df['AIRLINE'].unique()
airlineList = airlineList.tolist()
def calculate_Airline_D_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['DEPARTURE_DELAY'] > 0]
li = d['DEPARTURE_DELAY'].tolist()
li = np.array(li)
return li
def calculate_Airline_A_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['ARRIVAL_DELAY'] > 0]
li = d['ARRIVAL_DELAY'].tolist()
li = np.array(li)
return li
avgAirlineDD = []
avgAirlineAD = []
for a in airlineList:
avgAirlineDD.append(calculate_Airline_D_Delays(a).mean())
avgAirlineAD.append(calculate_Airline_A_Delays(a).mean())
n_groups = len(airlineList)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.25
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, avgAirlineDD, bar_width,
alpha=opacity,
color='b',
error_kw=error_config,
label='Departure')
rects2 = plt.bar(index + bar_width, avgAirlineAD, bar_width,
alpha=opacity,
color='r',
error_kw=error_config,
label='Arrival')
plt.margins(0.01)
plt.xlabel('Airlines')
plt.ylabel('Average Delays (Min)')
plt.title('Comparison of Departure/Arrival Delays')
plt.xticks(index + bar_width / 2, airlineList)
plt.legend(loc = 'upper left')
plt.tight_layout()
plt.show()
def calculate_Airport_D_Delays(airportName):
d = df[df['ORIGIN_AIRPORT'] == airportName]
d = d[d['DEPARTURE_DELAY'] > 0]
li = d['DEPARTURE_DELAY'].tolist()
li = np.array(li)
return li
def calculate_Airport_A_Delays(airportName):
d = df[df['DESTINATION_AIRPORT'] == airportName]
d = d[d['ARRIVAL_DELAY'] > 0]
li = d['ARRIVAL_DELAY'].tolist()
li = np.array(li)
return li
airportDepList = df['ORIGIN_AIRPORT'].unique()
airportDepList = airportDepList.tolist()
airportArrList = df['DESTINATION_AIRPORT'].unique()
airportArrList = airportArrList.tolist()
avgAirportDD = []
avgAirportAD = []
for a in airportDepList:
avgAirportDD.append(calculate_Airport_D_Delays(a).mean())
for a in airportArrList:
avgAirportAD.append(calculate_Airport_A_Delays(a).mean())
x = zip(airportDepList, avgAirportDD)
x = sorted(x, key=lambda item: item[1])
names = []
values = []
x = x[-20:]
for i, j in x:
names.append(i)
values.append(j)
n_groups = len(names)
index = np.arange(n_groups)
bar_width = 0.6
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, values, bar_width, alpha=opacity, color='b', error_kw=error_config, label='Departure')
plt.margins(0.01)
plt.xlabel('Airports')
plt.ylabel('Average Delays (Min)')
plt.title('Top 20 Airports with most Departure Delays')
plt.xticks(index + bar_width / 2, names)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show() | code |
2028129/cell_14 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('../input/flights.csv')
df = df[df['MONTH'] == 1]
airlineList = df['AIRLINE'].unique()
airlineList = airlineList.tolist()
def calculate_Airline_D_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['DEPARTURE_DELAY'] > 0]
li = d['DEPARTURE_DELAY'].tolist()
li = np.array(li)
return li
def calculate_Airline_A_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['ARRIVAL_DELAY'] > 0]
li = d['ARRIVAL_DELAY'].tolist()
li = np.array(li)
return li
avgAirlineDD = []
avgAirlineAD = []
for a in airlineList:
avgAirlineDD.append(calculate_Airline_D_Delays(a).mean())
avgAirlineAD.append(calculate_Airline_A_Delays(a).mean())
n_groups = len(airlineList)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.25
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, avgAirlineDD, bar_width,
alpha=opacity,
color='b',
error_kw=error_config,
label='Departure')
rects2 = plt.bar(index + bar_width, avgAirlineAD, bar_width,
alpha=opacity,
color='r',
error_kw=error_config,
label='Arrival')
plt.margins(0.01)
plt.xlabel('Airlines')
plt.ylabel('Average Delays (Min)')
plt.title('Comparison of Departure/Arrival Delays')
plt.xticks(index + bar_width / 2, airlineList)
plt.legend(loc = 'upper left')
plt.tight_layout()
plt.show()
def calculate_Airport_D_Delays(airportName):
d = df[df['ORIGIN_AIRPORT'] == airportName]
d = d[d['DEPARTURE_DELAY'] > 0]
li = d['DEPARTURE_DELAY'].tolist()
li = np.array(li)
return li
def calculate_Airport_A_Delays(airportName):
d = df[df['DESTINATION_AIRPORT'] == airportName]
d = d[d['ARRIVAL_DELAY'] > 0]
li = d['ARRIVAL_DELAY'].tolist()
li = np.array(li)
return li
airportDepList = df['ORIGIN_AIRPORT'].unique()
airportDepList = airportDepList.tolist()
airportArrList = df['DESTINATION_AIRPORT'].unique()
airportArrList = airportArrList.tolist()
avgAirportDD = []
avgAirportAD = []
for a in airportDepList:
avgAirportDD.append(calculate_Airport_D_Delays(a).mean())
for a in airportArrList:
avgAirportAD.append(calculate_Airport_A_Delays(a).mean()) | code |
2028129/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('../input/flights.csv')
df = df[df['MONTH'] == 1]
airlineList = df['AIRLINE'].unique()
airlineList = airlineList.tolist()
def calculate_Airline_D_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['DEPARTURE_DELAY'] > 0]
li = d['DEPARTURE_DELAY'].tolist()
li = np.array(li)
return li
def calculate_Airline_A_Delays(airlineName):
d = df[df['AIRLINE'] == airlineName]
d = d[d['ARRIVAL_DELAY'] > 0]
li = d['ARRIVAL_DELAY'].tolist()
li = np.array(li)
return li
avgAirlineDD = []
avgAirlineAD = []
for a in airlineList:
avgAirlineDD.append(calculate_Airline_D_Delays(a).mean())
avgAirlineAD.append(calculate_Airline_A_Delays(a).mean())
n_groups = len(airlineList)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.25
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, avgAirlineDD, bar_width, alpha=opacity, color='b', error_kw=error_config, label='Departure')
rects2 = plt.bar(index + bar_width, avgAirlineAD, bar_width, alpha=opacity, color='r', error_kw=error_config, label='Arrival')
plt.margins(0.01)
plt.xlabel('Airlines')
plt.ylabel('Average Delays (Min)')
plt.title('Comparison of Departure/Arrival Delays')
plt.xticks(index + bar_width / 2, airlineList)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show() | code |
2025927/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
df = pd.read_csv('../input/SkillCraft.csv')
y = df.LeagueIndex.astype(int)
X = df.drop(['LeagueIndex', 'GameID'], axis=1) | code |
2025927/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
import time
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2025927/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | set(y_train) | code |
2025927/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
df = pd.read_csv('../input/SkillCraft.csv')
y = df.LeagueIndex.astype(int)
X = df.drop(['LeagueIndex', 'GameID'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) | code |
2025927/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
df = pd.read_csv('../input/SkillCraft.csv')
print(df.shape)
df.head() | code |
2025927/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import pandas as pd
df = pd.read_csv('../input/SkillCraft.csv')
y = df.LeagueIndex.astype(int)
X = df.drop(['LeagueIndex', 'GameID'], axis=1)
classifiers = [GradientBoostingClassifier(n_estimators=150, max_depth=4), RandomForestClassifier(n_estimators=200, max_depth=9), KNeighborsClassifier(15)]
target_names = list(set(y))
for classifier in classifiers:
print(classifier.__class__.__name__)
start = time.time()
classifier.fit(X_train, y_train)
print(' -> Training time:', time.time() - start)
preds = classifier.predict(X_test)
print()
print(classification_report(y_test, preds, target_names=target_names)) | code |
128049391/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
def summary(df):
print(f'Dataset Shape: {df.shape}')
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Feature Name'] = summary['index']
summary = summary[['Feature Name', 'dtypes']]
summary['missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
return summary
summary(df) | code |
128049391/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
df.head(5) | code |
128049391/cell_29 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
def summary(df):
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Feature Name'] = summary['index']
summary = summary[['Feature Name', 'dtypes']]
summary['missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
return summary
summary(df)
df.drop(['Customer Id', 'Artist Name'], axis=1, inplace=True)
df.drop(['Scheduled Date', 'Delivery Date', 'duration'], axis=1, inplace=True)
df['State'] = df['Customer Location'].str.split(' ').str[-2]
df.drop(['Customer Location'], axis=1, inplace=True)
imputed_height_values = np.random.choice(df[~df['Height'].isna()]['Height'].values, size=df['Height'].isna().sum())
height_null_indices = df[df['Height'].isna()].index
df.loc[height_null_indices, 'Height'] = imputed_height_values
imputed_width_values = np.random.choice(df[~df['Width'].isna()]['Width'].values, size=df['Width'].isna().sum())
width_null_indices = df[df['Width'].isna()].index
df.loc[width_null_indices, 'Width'] = imputed_width_values
imputed_artist_values = np.random.choice(df[~df['Artist Reputation'].isna()]['Artist Reputation'].values, size=df['Artist Reputation'].isna().sum())
artist_null_indices = df[df['Artist Reputation'].isna()].index
df.loc[artist_null_indices, 'Artist Reputation'] = imputed_artist_values
df.describe() | code |
128049391/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128049391/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
def summary(df):
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Feature Name'] = summary['index']
summary = summary[['Feature Name', 'dtypes']]
summary['missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
return summary
summary(df)
df.drop(['Customer Id', 'Artist Name'], axis=1, inplace=True)
df['duration'].value_counts(sort=True) | code |
128049391/cell_38 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
def summary(df):
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Feature Name'] = summary['index']
summary = summary[['Feature Name', 'dtypes']]
summary['missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
return summary
summary(df)
df.drop(['Customer Id', 'Artist Name'], axis=1, inplace=True)
df['Scheduled Date'] = pd.to_datetime(df['Scheduled Date'])
df['Delivery Date'] = pd.to_datetime(df['Delivery Date'])
df.drop(['Scheduled Date', 'Delivery Date', 'duration'], axis=1, inplace=True)
df['State'] = df['Customer Location'].str.split(' ').str[-2]
df.drop(['Customer Location'], axis=1, inplace=True)
imputed_height_values = np.random.choice(df[~df['Height'].isna()]['Height'].values, size=df['Height'].isna().sum())
height_null_indices = df[df['Height'].isna()].index
df.loc[height_null_indices, 'Height'] = imputed_height_values
imputed_width_values = np.random.choice(df[~df['Width'].isna()]['Width'].values, size=df['Width'].isna().sum())
width_null_indices = df[df['Width'].isna()].index
df.loc[width_null_indices, 'Width'] = imputed_width_values
imputed_artist_values = np.random.choice(df[~df['Artist Reputation'].isna()]['Artist Reputation'].values, size=df['Artist Reputation'].isna().sum())
artist_null_indices = df[df['Artist Reputation'].isna()].index
df.loc[artist_null_indices, 'Artist Reputation'] = imputed_artist_values
df.corr()
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')
df_encoded = encoder.fit_transform(df[['Material', 'International', 'Express Shipment', 'Installation Included', 'Transport', 'Fragile', 'Customer Information', 'Remote Location']])
df_encoded = pd.DataFrame(df_encoded, columns=encoder.get_feature_names_out(['Material', 'International', 'Express Shipment', 'Installation Included', 'Transport', 'Fragile', 'Customer Information', 'Remote Location']))
df_encoded.sample(5) | code |
128049391/cell_31 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
def summary(df):
summary = pd.DataFrame(df.dtypes, columns=['dtypes'])
summary = summary.reset_index()
summary['Feature Name'] = summary['index']
summary = summary[['Feature Name', 'dtypes']]
summary['missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
return summary
summary(df)
df.drop(['Customer Id', 'Artist Name'], axis=1, inplace=True)
df.drop(['Scheduled Date', 'Delivery Date', 'duration'], axis=1, inplace=True)
df['State'] = df['Customer Location'].str.split(' ').str[-2]
df.drop(['Customer Location'], axis=1, inplace=True)
imputed_height_values = np.random.choice(df[~df['Height'].isna()]['Height'].values, size=df['Height'].isna().sum())
height_null_indices = df[df['Height'].isna()].index
df.loc[height_null_indices, 'Height'] = imputed_height_values
imputed_width_values = np.random.choice(df[~df['Width'].isna()]['Width'].values, size=df['Width'].isna().sum())
width_null_indices = df[df['Width'].isna()].index
df.loc[width_null_indices, 'Width'] = imputed_width_values
imputed_artist_values = np.random.choice(df[~df['Artist Reputation'].isna()]['Artist Reputation'].values, size=df['Artist Reputation'].isna().sum())
artist_null_indices = df[df['Artist Reputation'].isna()].index
df.loc[artist_null_indices, 'Artist Reputation'] = imputed_artist_values
df.corr() | code |
128049391/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
df.info() | code |
128049391/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_columns', None)
import datetime
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
df = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/train.csv')
df_test = pd.read_csv('/kaggle/input/hackerearth-machine-learning-exhibit-art/dataset/test.csv')
df.describe() | code |
320866/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from dateutil.parser import parse
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pandas as pd
data = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt')
import matplotlib.pyplot as plt
from dateutil.parser import parse
years = []
for i in range(len(data)):
years.append(parse(data.Date[i]).year)
data.Fatalities = data.Fatalities.fillna(data.Fatalities.mean())
temp = zip(years, data.Fatalities)
temp = [(x, y) for x, y in temp if y > 50]
temp = pd.DataFrame(temp, columns=['massive_years', 'Fatalities'])
counts = temp.massive_years.value_counts()
plt.figure(figsize=(11, 7))
plt.bar(counts.index, counts.values)
plt.ylabel('Number of Massive Crashes(fatalities>50)', fontsize=15)
plt.xlabel('Year', fontsize=15)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15) | code |
32071213/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidhosp = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
covidhosp1 = covidhosp.drop([1, 36], axis=0)
covidindi = pd.read_csv('/kaggle/input/covid19-in-india/IndividualDetails.csv')
covidindi['current_status'].unique()
covidpopu = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
covidpopu.tail() | code |
32071213/cell_13 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
fig = px.bar(covidstatestpivot1, x='State', y='TotalSamples', hover_data=['Negative', 'Positive'], color='Positive', labels={'TotalSamples': 'Total Samples'}, height=400)
fig.show() | code |
32071213/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidpivot.plot() | code |
32071213/cell_4 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.figure(figsize=(23, 10))
plt.bar(covid.Date, covid.Confirmed, label='Confirmed')
plt.xlabel('Date')
plt.ylabel('Count')
plt.xticks(rotation=45)
plt.legend(frameon=True, fontsize=12)
plt.title('Confrim', fontsize=30)
plt.show()
plt.figure(figsize=(23, 10))
plt.bar(covid.Date, covid.Cured, label='Cured')
plt.xlabel('Date')
plt.ylabel('Count')
plt.xticks(rotation=90)
plt.legend(frameon=True, fontsize=12)
plt.title('Cured', fontsize=30)
plt.show()
plt.figure(figsize=(23, 10))
plt.bar(covid.Date, covid.Deaths, label='Deaths')
plt.xlabel('Date')
plt.ylabel('Count')
plt.xticks(rotation=45)
plt.legend(frameon=True, fontsize=12)
plt.title('Deaths', fontsize=30)
plt.show() | code |
32071213/cell_20 | [
"text_html_output_2.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidhosp = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
covidhosp1 = covidhosp.drop([1, 36], axis=0)
covidindi = pd.read_csv('/kaggle/input/covid19-in-india/IndividualDetails.csv')
covidindi['current_status'].unique()
covidindigrp = covidindi.groupby(['current_status'])
covidindigrp.head() | code |
32071213/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage | code |
32071213/cell_11 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatest.tail() | code |
32071213/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidhosp = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
covidhosp1 = covidhosp.drop([1, 36], axis=0)
covidindi = pd.read_csv('/kaggle/input/covid19-in-india/IndividualDetails.csv')
covidindi.tail()
covidindi['current_status'].unique() | code |
32071213/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32071213/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
plt.figure(figsize=(23, 10))
plt.bar(covidage.AgeGroup, covidage.TotalCases, label='Age Group')
plt.xlabel('Age Group')
plt.ylabel('Cases')
plt.legend(frameon=True, fontsize=25)
plt.title('Affected Age Group', fontsize=30)
plt.show() | code |
32071213/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
fig = px.bar(covidstatestpivot1, x='State', y='TotalSamples',
hover_data=['Negative', 'Positive'], color='Positive',labels={'TotalSamples':'Total Samples'},
height=400)
fig.show()
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidtestgrp = covidtest.groupby('state').count()
covidtestgrp = covidtestgrp.reset_index('state')
covidtestgrp
import plotly.graph_objects as go
fig = go.Figure([go.Bar(x=covidtestgrp['state'], y=covidtestgrp['lab'])])
fig.update_layout(title_text='Number of Testing Labs in Each State')
covidhosp = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
covidhosp1 = covidhosp.drop([1, 36], axis=0)
fig = go.Figure()
fig.add_trace(go.Bar(x=covidhosp1['State/UT'], y=covidhosp1['NumUrbanHospitals_NHP18'], name='Urban Hospitals', marker_color='indianred'))
fig.add_trace(go.Bar(x=covidhosp1['State/UT'], y=covidhosp1['NumRuralHospitals_NHP18'], name='Rural Hospitals', marker_color='lightsalmon'))
fig.add_trace(go.Bar(x=covidhosp1['State/UT'], y=covidhosp1['TotalPublicHealthFacilities_HMIS'], name='Total Public Hospitals', marker_color='green'))
fig.update_layout(barmode='group', xaxis_tickangle=-45, title_text='Number of Urban, Rural and Total Public Hospitals in Each State')
fig.show() | code |
32071213/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot | code |
32071213/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidtestgrp = covidtest.groupby('state').count()
covidtestgrp = covidtestgrp.reset_index('state')
covidtestgrp | code |
32071213/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
fig = px.bar(covidstatestpivot1, x='State', y='TotalSamples',
hover_data=['Negative', 'Positive'], color='Positive',labels={'TotalSamples':'Total Samples'},
height=400)
fig.show()
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidtestgrp = covidtest.groupby('state').count()
covidtestgrp = covidtestgrp.reset_index('state')
covidtestgrp
import plotly.graph_objects as go
fig = go.Figure([go.Bar(x=covidtestgrp['state'], y=covidtestgrp['lab'])])
fig.update_layout(title_text='Number of Testing Labs in Each State')
fig.show() | code |
32071213/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
covid.tail() | code |
32071213/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidhosp = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
covidhosp1 = covidhosp.drop([1, 36], axis=0)
covidhosp1.tail() | code |
32071213/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
covidstatest = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
covidstatestpivot = pd.pivot_table(covidstatest, ['TotalSamples', 'Negative', 'Positive'], 'State', aggfunc=sum)
covidstatestpivot1 = covidstatestpivot.reset_index('State')
covidtest = pd.read_csv('/kaggle/input/covid19-in-india/ICMRTestingLabs.csv')
covidtest.tail() | code |
32071213/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23,10))
plt.bar(covid.Date, covid.Confirmed,label="Confirmed")
plt.bar(covid.Date, covid.Cured,label="Cured")
plt.bar(covid.Date, covid.Deaths,label="Deaths")
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths',fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23,10))
ax=sns.scatterplot(x="Date", y="Confirmed", data=covid,
color="black",label = "Confirm")
ax=sns.scatterplot(x="Date", y="Cured", data=covid,
color="red",label = "Cured")
ax=sns.scatterplot(x="Date", y="Deaths", data=covid,
color="blue",label = "Deaths")
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed,zorder=1,color="black")
plt.plot(covid.Date, covid.Cured,zorder=1,color="red")
plt.plot(covid.Date, covid.Deaths,zorder=1,color="blue")
covidage = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
covidage
covidpivot = pd.pivot_table(covid, ['Cured', 'Confirmed', 'Deaths'], 'State/UnionTerritory', aggfunc=sum)
covidpivot
cm = sns.light_palette('orange', as_cmap=True)
covidpivot.style.background_gradient(cmap=cm) | code |
32071213/cell_5 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import plotly as py
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
pd.set_option('display.max_rows', None)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
plt.xticks(rotation=45)
plt.xticks(rotation=90)
plt.xticks(rotation=45)
plt.figure(figsize=(23, 10))
plt.bar(covid.Date, covid.Confirmed, label='Confirmed')
plt.bar(covid.Date, covid.Cured, label='Cured')
plt.bar(covid.Date, covid.Deaths, label='Deaths')
plt.xlabel('Date')
plt.xticks(rotation=90)
plt.ylabel('Count')
plt.legend(frameon=True, fontsize=12)
plt.title('Confrimed vs Cured vs Deaths', fontsize=30)
plt.show()
f, ax = plt.subplots(figsize=(23, 10))
ax = sns.scatterplot(x='Date', y='Confirmed', data=covid, color='black', label='Confirm')
ax = sns.scatterplot(x='Date', y='Cured', data=covid, color='red', label='Cured')
ax = sns.scatterplot(x='Date', y='Deaths', data=covid, color='blue', label='Deaths')
plt.xticks(rotation=90)
plt.plot(covid.Date, covid.Confirmed, zorder=1, color='black')
plt.plot(covid.Date, covid.Cured, zorder=1, color='red')
plt.plot(covid.Date, covid.Deaths, zorder=1, color='blue') | code |
90127412/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | code |
|
90127412/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
from torch import nn
from torch.utils.data import Dataset, ConcatDataset
from torchmetrics.functional import accuracy, f1_score, precision, recall
import pandas as pd
import pytorch_lightning as pl
import torch
import torch_optimizer as optim
import transformers as T
TRAIN_DATASET = '../input/starpredict-dataset/train.parquet'
VAL_DATASET = '../input/starpredict-dataset/val.parquet'
TEST_DATASET = '../input/starpredict-dataset/test.parquet'
SAMPLE_DATASET = '../input/starpredict-dataset/sample.parquet'
class YelpDataset(Dataset):
def __init__(self, path: Path):
super().__init__()
self.data = pd.read_parquet(path)
def __getitem__(self, key):
row = self.data.iloc[key]
return {'user_id': row['user_id_encode'], 'product_id': row['business_id_encode'], 'input_ids': torch.tensor(row['input_ids']), 'attention_mask': torch.tensor(row['attention_mask']), 'stars': row['stars_transform']}
def __len__(self):
return self.data.shape[0]
train_dataset = YelpDataset(TRAIN_DATASET)
val_dataset = YelpDataset(VAL_DATASET)
test_dataset = YelpDataset(TEST_DATASET)
sample_dataset = YelpDataset(SAMPLE_DATASET)
class StarPredictSystem(pl.LightningModule):
def __init__(self, num_users, num_products, merge_size=512, lr=0.001, num_classes=5, from_='bert-base-uncased'):
super().__init__()
self.lr = lr
self.num_classes = num_classes
self.bert = T.DistilBertModel.from_pretrained('distilbert-base-uncased')
self.bert_classifier = nn.Linear(self.bert.config.hidden_size, merge_size)
self.user_embedding = nn.Embedding(num_users, merge_size)
self.product_embedding = nn.Embedding(num_products, merge_size)
self.classifier = nn.Sequential(nn.Linear(merge_size * 3, 64), nn.ReLU(), nn.Linear(64, num_classes), nn.Softmax(dim=1))
def forward(self, x):
user_x = x['user_id']
user_x = self.user_embedding(user_x)
product_x = x['product_id']
product_x = self.product_embedding(product_x)
text_x = self.bert(input_ids=x['input_ids'], attention_mask=x['attention_mask']).last_hidden_state[:, 0]
text_x = self.bert_classifier(text_x)
x = torch.cat([user_x, product_x, text_x], dim=-1)
x = self.classifier(x)
return x
def configure_optimizers(self):
return optim.Lamb(self.parameters(), lr=self.lr, weight_decay=0.02)
def training_step(self, batch, batch_idx):
y = batch['stars'].long()
y_hat = self(batch)
loss = F.cross_entropy(y_hat, y)
acc = accuracy(y_hat, y)
self.log('acc', acc, prog_bar=True, batch_size=batch['stars'].shape[0])
return loss
def validation_step(self, batch, batch_idx):
y = batch['stars'].long()
y_hat = self(batch)
loss = F.cross_entropy(y_hat, y)
metrics = {'val_loss': loss, 'val_acc': accuracy(y_hat, y), 'val_f1': f1_score(y_hat, y), 'val_prec': precision(y_hat, y), 'val_recall': recall(y_hat, y)}
self.log_dict(metrics, batch_size=batch['stars'].shape[0])
return metrics
def test_step(self, batch, batch_idx):
y = batch['stars'].long() - 1
y_hat = self(batch)
loss = F.cross_entropy(y_hat, y)
metrics = {'test_loss': loss, 'test_acc': accuracy(y_hat, y), 'test_f1': f1_score(y_hat, y), 'test_prec': precision(y_hat, y), 'test_recall': recall(y_hat, y)}
self.log_dict(metrics, batch_size=batch['stars'].shape[0])
return metrics
BATCH_SIZE = 8
LEARNING_RATE = 0.0003
EPOCHS = 3
MERGE_SIZE = 128
NUM_CLASSES = 5
NUM_WORKERS = 2
datasets = [train_dataset, val_dataset, test_dataset]
num_users = max((dataset.data['user_id_encode'].max() for dataset in datasets))
num_products = max((dataset.data['business_id_encode'].max() for dataset in datasets))
model = StarPredictSystem(num_users=num_users, num_products=num_products, merge_size=MERGE_SIZE, lr=LEARNING_RATE, num_classes=NUM_CLASSES, from_='bert-base-uncased')
datamodule = pl.LightningDataModule.from_datasets(train_dataset=train_dataset, val_dataset=val_dataset, test_dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS)
trainer = pl.Trainer(max_epochs=EPOCHS, gpus=-1)
trainer.fit(model, datamodule=datamodule) | code |
90127412/cell_12 | [
"text_plain_output_1.png"
] | from pathlib import Path
from torch.utils.data import Dataset, ConcatDataset
import pandas as pd
import torch
TRAIN_DATASET = '../input/starpredict-dataset/train.parquet'
VAL_DATASET = '../input/starpredict-dataset/val.parquet'
TEST_DATASET = '../input/starpredict-dataset/test.parquet'
SAMPLE_DATASET = '../input/starpredict-dataset/sample.parquet'
class YelpDataset(Dataset):
def __init__(self, path: Path):
super().__init__()
self.data = pd.read_parquet(path)
def __getitem__(self, key):
row = self.data.iloc[key]
return {'user_id': row['user_id_encode'], 'product_id': row['business_id_encode'], 'input_ids': torch.tensor(row['input_ids']), 'attention_mask': torch.tensor(row['attention_mask']), 'stars': row['stars_transform']}
def __len__(self):
return self.data.shape[0]
train_dataset = YelpDataset(TRAIN_DATASET)
val_dataset = YelpDataset(VAL_DATASET)
test_dataset = YelpDataset(TEST_DATASET)
sample_dataset = YelpDataset(SAMPLE_DATASET)
BATCH_SIZE = 8
LEARNING_RATE = 0.0003
EPOCHS = 3
MERGE_SIZE = 128
NUM_CLASSES = 5
NUM_WORKERS = 2
datasets = [train_dataset, val_dataset, test_dataset]
num_users = max((dataset.data['user_id_encode'].max() for dataset in datasets))
num_products = max((dataset.data['business_id_encode'].max() for dataset in datasets))
model = StarPredictSystem(num_users=num_users, num_products=num_products, merge_size=MERGE_SIZE, lr=LEARNING_RATE, num_classes=NUM_CLASSES, from_='bert-base-uncased') | code |
73061961/cell_21 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix,classification_report
y_train.value_counts()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
from imblearn.under_sampling import NearMiss
nr = NearMiss()
X_train_miss, y_train_miss = nr.fit_resample(X_train, y_train.ravel())
lr2 = LogisticRegression()
lr2.fit(X_train_miss, y_train_miss.ravel())
predictions2 = lr2.predict(X_valid)
print(classification_report(y_valid, predictions2)) | code |
73061961/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
y_train.value_counts()
lr = LogisticRegression()
lr.fit(X_train, y_train) | code |
73061961/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
pd.set_option('display.max_columns', None)
data.drop(['Time', 'Amount'], axis=1, inplace=True)
data | code |
73061961/cell_20 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
y_train.value_counts()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
print("Before Undersampling, counts of label '1': {}".format(sum(y_train == 1)))
print("Before Undersampling, counts of label '0': {} \n".format(sum(y_train == 0)))
from imblearn.under_sampling import NearMiss
nr = NearMiss()
X_train_miss, y_train_miss = nr.fit_resample(X_train, y_train.ravel())
print('After Undersampling, the shape of train_X: {}'.format(X_train_miss.shape))
print('After Undersampling, the shape of train_y: {} \n'.format(y_train_miss.shape))
print("After Undersampling, counts of label '1': {}".format(sum(y_train_miss == 1)))
print("After Undersampling, counts of label '0': {}".format(sum(y_train_miss == 0))) | code |
73061961/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
pd.set_option('display.max_columns', None)
data['Class'].value_counts() | code |
73061961/cell_19 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix,classification_report
y_train.value_counts()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
lr1 = LogisticRegression()
lr1.fit(X_train_res, y_train_res)
predictions1 = lr1.predict(X_valid)
confusion_matrix(y_valid, predictions1) | code |
73061961/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73061961/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix,classification_report
y_train.value_counts()
lr = LogisticRegression()
lr.fit(X_train, y_train)
predictions = lr.predict(X_valid)
confusion_matrix(y_valid, predictions) | code |
73061961/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
pd.set_option('display.max_columns', None)
data.head(10) | code |
73061961/cell_17 | [
"text_html_output_1.png"
] | from imblearn.over_sampling import SMOTE
y_train.value_counts()
print("Before OverSampling, counts of label '1': {}".format(sum(y_train == 1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train == 0)))
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
print('After OverSampling, the shape of train_X: {}'.format(X_train_res.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train_res.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train_res == 1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train_res == 0))) | code |
73061961/cell_22 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix,classification_report
y_train.value_counts()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_resample(X_train, y_train.ravel())
from imblearn.under_sampling import NearMiss
nr = NearMiss()
X_train_miss, y_train_miss = nr.fit_resample(X_train, y_train.ravel())
lr2 = LogisticRegression()
lr2.fit(X_train_miss, y_train_miss.ravel())
predictions2 = lr2.predict(X_valid)
confusion_matrix(y_valid, predictions2) | code |
73061961/cell_12 | [
"text_html_output_1.png"
] | y_train.value_counts() | code |
73061961/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
pd.set_option('display.max_columns', None)
data.info() | code |
2035023/cell_9 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid)
fig = QQ.qqplot(alpha=0.5, markersize=5, line='s')
plt.title('QQ plot');
model_norm_resid = model.get_influence().resid_studentized_internal
model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid))
model_leverage = model.get_influence().hat_matrix_diag
plt.xlim(xmin=-0.0005, xmax=0.013)
model.summary() | code |
2035023/cell_6 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
QQ = ProbPlot(model.resid)
fig = QQ.qqplot(alpha=0.5, markersize=5, line='s')
plt.title('QQ plot') | code |
2035023/cell_2 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df.head() | code |
2035023/cell_11 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid)
fig = QQ.qqplot(alpha=0.5, markersize=5, line='s')
plt.title('QQ plot');
model_norm_resid = model.get_influence().resid_studentized_internal
model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid))
model_leverage = model.get_influence().hat_matrix_diag
plt.xlim(xmin=-0.0005, xmax=0.013)
model.summary()
model2 = sm.GLM(y, x, family=sm.families.Gaussian()).fit()
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_partregress_grid(model, fig=fig) | code |
2035023/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.graphics.gofplots import ProbPlot | code |
2035023/cell_7 | [
"text_html_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid)
fig = QQ.qqplot(alpha=0.5, markersize=5, line='s')
plt.title('QQ plot');
model_norm_resid = model.get_influence().resid_studentized_internal
model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid))
sns.regplot(model.fittedvalues, model_norm_resid_abs_sqrt, lowess=True, line_kws={'color': 'r', 'lw': 1})
plt.xlabel('Fitted values')
plt.ylabel('Sqrt abs standardized residuals')
plt.title('Scale-location') | code |
2035023/cell_8 | [
"text_plain_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid)
fig = QQ.qqplot(alpha=0.5, markersize=5, line='s')
plt.title('QQ plot');
model_norm_resid = model.get_influence().resid_studentized_internal
model_norm_resid_abs_sqrt = np.sqrt(np.abs(model_norm_resid))
model_leverage = model.get_influence().hat_matrix_diag
sns.regplot(model_leverage, model.resid_pearson, fit_reg=False)
plt.xlim(xmin=-0.0005, xmax=0.013)
plt.xlabel('Leverage')
plt.ylabel('Pearson residuals')
plt.title('Residuals vs leverage') | code |
2035023/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
model2 = sm.GLM(y, x, family=sm.families.Gaussian()).fit()
print('Null deviance: {:.1f}'.format(model2.null_deviance))
print('Residual deviance: {:.1f}'.format(model2.deviance)) | code |
2035023/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/ehresp_2014.csv', usecols=['erbmi', 'euexfreq', 'euwgt', 'euhgt', 'ertpreat'])
df = df[df['erbmi'] > 0]
x = df[['euexfreq', 'euwgt', 'euhgt', 'ertpreat']]
y = df['erbmi']
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
sns.residplot(model.fittedvalues, df['erbmi'], lowess=True, line_kws={'color': 'r', 'lw': 1})
plt.title('Residual plot')
plt.xlabel('Predicted values')
plt.ylabel('Residuals') | code |
72083691/cell_33 | [
"text_html_output_1.png"
] | from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax
def compute_association_rule(rule_matrix, metric='lift', min_thresh=1):
"""
Compute the final association rule
rule_matrix: the corresponding algorithms matrix
metric: the metric to be used (default is lift)
min_thresh: the minimum threshold (default is 1)
Returns
Rules:: Information for each transaction satisfying the given metric & threshold
"""
rules = association_rules(rule_matrix, metric=metric, min_threshold=min_thresh)
return rules
fp_growth_rule = compute_association_rule(fpgrowth_matrix, metric='confidence', min_thresh=1)
fp_growth_rule.head() | code |
72083691/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import mlxtend as ml
import mlxtend as ml
print(ml.__version__) | code |
72083691/cell_40 | [
"text_html_output_1.png"
] | from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax
def compute_association_rule(rule_matrix, metric='lift', min_thresh=1):
"""
Compute the final association rule
rule_matrix: the corresponding algorithms matrix
metric: the metric to be used (default is lift)
min_thresh: the minimum threshold (default is 1)
Returns
Rules:: Information for each transaction satisfying the given metric & threshold
"""
rules = association_rules(rule_matrix, metric=metric, min_threshold=min_thresh)
return rules
apriori_rule_lift = compute_association_rule(apriori_matrix)
apriori_rule_lift.head() | code |
72083691/cell_26 | [
"text_plain_output_1.png"
] | from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.preprocessing import TransactionEncoder
import pandas as pd
import time
data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv')
data.shape
all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))]
trans_encoder = TransactionEncoder()
trans_encoder_matrix = trans_encoder.fit(all_transactions).transform(all_transactions)
trans_encoder_matrix = pd.DataFrame(trans_encoder_matrix, columns=trans_encoder.columns_)
def perform_rule_calculation(transact_items_matrix, rule_type, min_support=0.001):
"""
excution time for the corresponding algorithm
"""
start_time = 0
total_execution = 0
if rule_type == 'fpmax':
start_time = time.time()
rule_items = fpmax(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
if rule_type == 'apriori':
start_time = time.time()
rule_items = apriori(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
if rule_type == 'Fpgrowth':
start_time = time.time()
rule_items = fpgrowth(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
rule_items['number_of_items'] = rule_items['itemsets'].apply(lambda x: len(x))
return (rule_items, total_execution)
fpgrowth_matrix, fp_growth_exec_time = perform_rule_calculation(trans_encoder_matrix, rule_type='Fpgrowth')
print('Fp Growth execution took: {} seconds'.format(fp_growth_exec_time)) | code |
72083691/cell_48 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
def plot_metrics_relationship(rule_matrix, col1, col2):
"""
shows the relationship between the two input columns
"""
fit = np.polyfit(rule_matrix[col1], rule_matrix[col2], 1)
fit_funt = np.poly1d(fit)
def compare_time_exec(algo1=list, algo2=list, algo3=list):
"""
- Algo1 list contains first algo details.
- Algo2 list having the details of second algorithm
- Algo3 list have the data of third algorithm
"""
execution_times = [algo1[1], algo2[1], algo3[1]]
algo_names = (algo1[0], algo2[0], algo3[0])
y = np.arange(len(algo_names))
plt.xticks(y, algo_names)
algo1 = ['Fp Growth', fp_growth_exec_time]
algo2 = ['Apriori', apriori_exec_time]
algo3 = ['Fpmax', fpmax_exec_time]
compare_time_exec(algo1, algo2, algo3) | code |
72083691/cell_41 | [
"text_html_output_1.png"
] | from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax
import matplotlib.pyplot as plt
import numpy as np
def compute_association_rule(rule_matrix, metric='lift', min_thresh=1):
"""
Compute the final association rule
rule_matrix: the corresponding algorithms matrix
metric: the metric to be used (default is lift)
min_thresh: the minimum threshold (default is 1)
Returns
Rules:: Information for each transaction satisfying the given metric & threshold
"""
rules = association_rules(rule_matrix, metric=metric, min_threshold=min_thresh)
return rules
def plot_metrics_relationship(rule_matrix, col1, col2):
"""
shows the relationship between the two input columns
"""
fit = np.polyfit(rule_matrix[col1], rule_matrix[col2], 1)
fit_funt = np.poly1d(fit)
apriori_rule_lift = compute_association_rule(apriori_matrix)
plot_metrics_relationship(apriori_rule_lift, col1='lift', col2='confidence') | code |
72083691/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv')
data.head()
data.shape | code |
72083691/cell_45 | [
"text_html_output_1.png"
] | from mlxtend.frequent_patterns import apriori, association_rules,fpgrowth,fpmax
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.preprocessing import TransactionEncoder
import pandas as pd
import time
data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv')
data.shape
all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))]
trans_encoder = TransactionEncoder()
trans_encoder_matrix = trans_encoder.fit(all_transactions).transform(all_transactions)
trans_encoder_matrix = pd.DataFrame(trans_encoder_matrix, columns=trans_encoder.columns_)
def perform_rule_calculation(transact_items_matrix, rule_type, min_support=0.001):
"""
excution time for the corresponding algorithm
"""
start_time = 0
total_execution = 0
if rule_type == 'fpmax':
start_time = time.time()
rule_items = fpmax(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
if rule_type == 'apriori':
start_time = time.time()
rule_items = apriori(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
if rule_type == 'Fpgrowth':
start_time = time.time()
rule_items = fpgrowth(transact_items_matrix, min_support=min_support, use_colnames=True)
total_execution = time.time() - start_time
rule_items['number_of_items'] = rule_items['itemsets'].apply(lambda x: len(x))
return (rule_items, total_execution)
fpmax_matrix, fpmax_exec_time = perform_rule_calculation(trans_encoder_matrix, rule_type='fpmax')
print('fpmax Execuation took: {} seconds'.format(fpmax_exec_time)) | code |
72083691/cell_28 | [
"text_html_output_1.png"
] | fpgrowth_matrix.tail() | code |
72083691/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv')
data.shape
all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))]
all_transactions[0:15] | code |
72083691/cell_17 | [
"text_plain_output_1.png"
] | from mlxtend.preprocessing import TransactionEncoder
from mlxtend.preprocessing import TransactionEncoder
import pandas as pd
data = pd.read_csv('../input/groceries-dataset/Groceries_dataset.csv')
data.shape
all_transactions = [transaction[1]['itemDescription'].tolist() for transaction in list(data.groupby(['Member_number', 'Date']))]
trans_encoder = TransactionEncoder()
trans_encoder_matrix = trans_encoder.fit(all_transactions).transform(all_transactions)
trans_encoder_matrix = pd.DataFrame(trans_encoder_matrix, columns=trans_encoder.columns_)
trans_encoder_matrix.head() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.