path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
90133854/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T
ge.isnull().sum()
ge1 = ge.copy()
cat_ge = list(ge.select_dtypes(exclude='float64').columns)
cat_ge | code |
90133854/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T
ge.isnull().sum() | code |
90133854/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T
ge.isnull().sum()
ge1 = ge.copy()
cat_ge = list(ge.select_dtypes(exclude='float64').columns)
num_ge = list(ge.select_dtypes(include='float64').columns)
i = ['gender']
num_ge = num_ge + i
for i in cat_ge:
plt.figure()
sns.countplot(x=i, data=ge1[cat_ge], hue='gender')
plt.title(i) | code |
90133854/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T
ge.isnull().sum()
ge1 = ge.copy()
cat_ge = list(ge.select_dtypes(exclude='float64').columns)
num_ge = list(ge.select_dtypes(include='float64').columns)
i = ['gender']
num_ge = num_ge + i
num_ge | code |
90133854/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T
ge.isnull().sum()
ge1 = ge.copy()
cat_ge = list(ge.select_dtypes(exclude='float64').columns)
num_ge = list(ge.select_dtypes(include='float64').columns)
i = ['gender']
num_ge = num_ge + i
sns.pairplot(data=ge1[num_ge], hue='gender', diag_kind='kde') | code |
90133854/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge | code |
90133854/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv')
ge
x = 0
for i in ge.columns:
x = x + 1
ge.describe().round(2).T | code |
2022945/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features] | code |
2022945/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features]
all_data.loc[[949, 1488, 2349], 'BsmtExposure'] = 'Av'
all_data.loc[333][bsmt_num_features] | code |
2022945/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features]
all_data.loc[[949, 1488, 2349], 'BsmtExposure'] = 'Av'
all_data.loc[333][bsmt_num_features]
all_data.loc[333, 'BsmtFinType2'] = 'Unf'
print(all_data[all_data['MiscFeature'] == 'Gar2'].index)
print(all_data[all_data['SaleCondition'] == 'Alloca'].index) | code |
2022945/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
bsmt_cat_features = ['BsmtCond', 'BsmtQual', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
print(all_data[all_data['BsmtExposure'].isnull() & all_data['BsmtCond'].notnull()][bsmt_cat_features]) | code |
2022945/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features]
all_data.loc[[949, 1488, 2349], 'BsmtExposure'] = 'Av'
all_data.loc[333][bsmt_num_features]
all_data.loc[333, 'BsmtFinType2'] = 'Unf'
grg_num_features = ['GarageCars', 'GarageArea', 'GarageYrBlt']
grg_cat_features = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']
all_data[all_data['GarageYrBlt'].isnull() & all_data['GarageType'].notnull()][grg_cat_features + grg_num_features] | code |
2022945/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data['MasVnrType'].value_counts() | code |
2022945/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
for n in nums:
if all_data[n].isnull().values.sum() > 0:
print(n, all_data[n].isnull().sum()) | code |
2022945/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features] | code |
2022945/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
for c in cat:
if all_data[c].isnull().values.sum() > 0:
print(c, all_data[c].isnull().sum()) | code |
2022945/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
bsmt_cat_features = ['BsmtCond', 'BsmtQual', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']
print(all_data[all_data['BsmtCond'].isnull() & all_data['BsmtQual'].notnull()][bsmt_cat_features])
print(all_data[all_data['BsmtCond'].notnull() & all_data['BsmtQual'].isnull()][bsmt_cat_features]) | code |
2022945/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
bsmt_cat_features = ['BsmtCond', 'BsmtQual', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features]
all_data.loc[[949, 1488, 2349], 'BsmtExposure'] = 'Av'
all_data[all_data['BsmtFinType2'].isnull() & all_data['BsmtFinType1'].notnull()][bsmt_cat_features] | code |
2022945/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features] | code |
2022945/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data.loc[2611, 'MasVnrType'] = 'BrkFace'
all_data['MasVnrType'].fillna('None', inplace=True)
all_data['MasVnrArea'].fillna(0, inplace=True)
bsmt_num_features = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
all_data[all_data['BsmtFullBath'].isnull()][bsmt_num_features]
all_data.loc[[2121, 2189], bsmt_num_features] = 0
all_data.loc[[2041, 2186, 2525, 2218, 2219]][bsmt_num_features]
all_data.loc[[2041, 2186, 2525], 'BsmtCond'] = all_data.loc[[2041, 2186, 2525], 'BsmtQual']
all_data.loc[[2218, 2219], 'BsmtQual'] = all_data.loc[[2218, 2219], 'BsmtCond']
all_data.loc[[949, 1488, 2349]][bsmt_num_features]
all_data[all_data['BsmtExposure'].notnull() & (all_data['BsmtExposure'] != 'No')]['BsmtExposure'].value_counts() | code |
2022945/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv', index_col=0)
test = pd.read_csv('../input/test.csv', index_col=0)
train = train[train['GrLivArea'] < 4000]
labels = train['SalePrice']
train = train.drop('SalePrice', axis=1)
all_data = pd.concat([train, test])
nums = all_data.select_dtypes(exclude=['object']).columns
cat = all_data.select_dtypes(include=['object']).columns
all_data[all_data['MasVnrType'].isnull() & all_data['MasVnrArea'].notnull()][['MasVnrType', 'MasVnrArea']] | code |
106196332/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
pokemon = pd.read_csv('../input/pokemon/Pokemon.csv')
print(pokemon.info())
print(pokemon.describe()) | code |
72063406/cell_13 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMRegressor
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
categorical_cols = [col for col in train.columns if 'cat' in col]
new_train = pd.get_dummies(train, columns=categorical_cols, prefix_sep='_')
new_test = pd.get_dummies(test, columns=categorical_cols, prefix_sep='_')
new_test = new_test.drop('id', axis=1)
new_train['kfold'] = -1
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=47)
for k, (train_idx, valid_idx) in enumerate(kf.split(X=new_train)):
new_train.loc[valid_idx, 'kfold'] = k
def train_test_data(df, fold):
x_train = df[df.kfold != fold].reset_index(drop=True)
x_valid = df[df.kfold == fold].reset_index(drop=True)
y_train = x_train.target
y_valid = x_valid.target
x_train = x_train.drop(['id', 'target', 'kfold'], axis=1)
x_valid = x_valid.drop(['id', 'target', 'kfold'], axis=1)
return {'x_train': x_train, 'y_train': y_train, 'x_valid': x_valid, 'y_valid': y_valid}
def n_trees_get_models():
models = dict()
n_trees = [10, 50, 100, 500, 1000]
for n in n_trees:
models[str(n)] = LGBMRegressor(n_estimators=n)
return models
def n_depth_get_models():
models = dict()
for i in range(1, 11):
models[str(i)] = LGBMRegressor(max_depth=i, num_leaves=2 ** i)
return models
def n_lr_get_models():
models = dict()
rates = [0.0001, 0.001, 0.01, 0.1, 1.0]
for r in rates:
key = '%.4f' % r
models[key] = LGBMRegressor(learning_rate=r)
return models
def n_boosting_types_get_models():
models = dict()
boosting_types = ['gbdt', 'dart', 'goss']
for t in boosting_types:
models[t] = LGBMRegressor(boosting_type=t)
return models
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_trees_get_models()
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_depth_get_models()
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_lr_get_models()
print('************ FOLD: ' + str(fold + 1) + ' ************')
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
print('For learning rate: ' + str(name))
print('RMSE Error for fold', fold + 1, ': ', rmse) | code |
72063406/cell_11 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMRegressor
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
categorical_cols = [col for col in train.columns if 'cat' in col]
new_train = pd.get_dummies(train, columns=categorical_cols, prefix_sep='_')
new_test = pd.get_dummies(test, columns=categorical_cols, prefix_sep='_')
new_test = new_test.drop('id', axis=1)
new_train['kfold'] = -1
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=47)
for k, (train_idx, valid_idx) in enumerate(kf.split(X=new_train)):
new_train.loc[valid_idx, 'kfold'] = k
def train_test_data(df, fold):
x_train = df[df.kfold != fold].reset_index(drop=True)
x_valid = df[df.kfold == fold].reset_index(drop=True)
y_train = x_train.target
y_valid = x_valid.target
x_train = x_train.drop(['id', 'target', 'kfold'], axis=1)
x_valid = x_valid.drop(['id', 'target', 'kfold'], axis=1)
return {'x_train': x_train, 'y_train': y_train, 'x_valid': x_valid, 'y_valid': y_valid}
def n_trees_get_models():
models = dict()
n_trees = [10, 50, 100, 500, 1000]
for n in n_trees:
models[str(n)] = LGBMRegressor(n_estimators=n)
return models
def n_depth_get_models():
models = dict()
for i in range(1, 11):
models[str(i)] = LGBMRegressor(max_depth=i, num_leaves=2 ** i)
return models
def n_lr_get_models():
models = dict()
rates = [0.0001, 0.001, 0.01, 0.1, 1.0]
for r in rates:
key = '%.4f' % r
models[key] = LGBMRegressor(learning_rate=r)
return models
def n_boosting_types_get_models():
models = dict()
boosting_types = ['gbdt', 'dart', 'goss']
for t in boosting_types:
models[t] = LGBMRegressor(boosting_type=t)
return models
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_trees_get_models()
print('************ FOLD: ' + str(fold + 1) + ' ************')
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
print('For ' + str(name) + ' trees: ')
print('RMSE Error for fold', fold + 1, ': ', rmse) | code |
72063406/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72063406/cell_12 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMRegressor
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
sample_sub = pd.read_csv('../input/30-days-of-ml/sample_submission.csv')
categorical_cols = [col for col in train.columns if 'cat' in col]
new_train = pd.get_dummies(train, columns=categorical_cols, prefix_sep='_')
new_test = pd.get_dummies(test, columns=categorical_cols, prefix_sep='_')
new_test = new_test.drop('id', axis=1)
new_train['kfold'] = -1
kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=47)
for k, (train_idx, valid_idx) in enumerate(kf.split(X=new_train)):
new_train.loc[valid_idx, 'kfold'] = k
def train_test_data(df, fold):
x_train = df[df.kfold != fold].reset_index(drop=True)
x_valid = df[df.kfold == fold].reset_index(drop=True)
y_train = x_train.target
y_valid = x_valid.target
x_train = x_train.drop(['id', 'target', 'kfold'], axis=1)
x_valid = x_valid.drop(['id', 'target', 'kfold'], axis=1)
return {'x_train': x_train, 'y_train': y_train, 'x_valid': x_valid, 'y_valid': y_valid}
def n_trees_get_models():
models = dict()
n_trees = [10, 50, 100, 500, 1000]
for n in n_trees:
models[str(n)] = LGBMRegressor(n_estimators=n)
return models
def n_depth_get_models():
models = dict()
for i in range(1, 11):
models[str(i)] = LGBMRegressor(max_depth=i, num_leaves=2 ** i)
return models
def n_lr_get_models():
models = dict()
rates = [0.0001, 0.001, 0.01, 0.1, 1.0]
for r in rates:
key = '%.4f' % r
models[key] = LGBMRegressor(learning_rate=r)
return models
def n_boosting_types_get_models():
models = dict()
boosting_types = ['gbdt', 'dart', 'goss']
for t in boosting_types:
models[t] = LGBMRegressor(boosting_type=t)
return models
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_trees_get_models()
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
for fold in range(5):
datasets = train_test_data(new_train, fold)
x_train = datasets['x_train']
y_train = datasets['y_train']
x_valid = datasets['x_valid']
y_valid = datasets['y_valid']
models = n_depth_get_models()
print('************ FOLD: ' + str(fold + 1) + ' ************')
for name, model in models.items():
model.fit(x_train, y_train)
preds = model.predict(x_valid)
preds_test = model.predict(new_test)
rmse = mean_squared_error(y_valid, preds, squared=False)
print('For ' + str(name) + ' depth: ')
print('RMSE Error for fold', fold + 1, ': ', rmse) | code |
18144904/cell_6 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, validation_split=0.2)
def get_generator(path, subset):
return train_datagen.flow_from_directory(path, target_size=(200, 400), batch_size=32, class_mode='categorical', subset=subset, color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/transmittancy/', 'training')
validation_generator = get_generator('../input/transmittancy/transmittancy/', 'validation')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=3, activation='relu', strides=(2, 4), input_shape=(200, 400, 1)))
model.add(Conv2D(64, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Conv2D(32, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=validation_generator, validation_steps=15)
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show() | code |
18144904/cell_2 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, validation_split=0.2)
def get_generator(path, subset):
return train_datagen.flow_from_directory(path, target_size=(200, 400), batch_size=32, class_mode='categorical', subset=subset, color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/transmittancy/', 'training')
validation_generator = get_generator('../input/transmittancy/transmittancy/', 'validation') | code |
18144904/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator | code |
18144904/cell_8 | [
"image_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, validation_split=0.2)
def get_generator(path, subset):
return train_datagen.flow_from_directory(path, target_size=(200, 400), batch_size=32, class_mode='categorical', subset=subset, color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/transmittancy/', 'training')
validation_generator = get_generator('../input/transmittancy/transmittancy/', 'validation')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=3, activation='relu', strides=(2, 4), input_shape=(200, 400, 1)))
model.add(Conv2D(64, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Conv2D(32, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=validation_generator, validation_steps=15)
import matplotlib.pyplot as plt
def plot_batch(batch):
fig, axes = plt.subplots(4, 8, sharex=True, sharey=True, figsize=(16, 8))
for ind, ax in enumerate(axes.flatten()):
ax.imshow(batch[ind].reshape(200, 400), vmin=0, vmax=1, interpolation=None, cmap='gray')
fig.tight_layout()
plt.show()
batch, _ = train_generator.next()
plot_batch(batch) | code |
18144904/cell_5 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Conv2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adagrad
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, validation_split=0.2)
def get_generator(path, subset):
return train_datagen.flow_from_directory(path, target_size=(200, 400), batch_size=32, class_mode='categorical', subset=subset, color_mode='grayscale')
train_generator = get_generator('../input/transmittancy/transmittancy/', 'training')
validation_generator = get_generator('../input/transmittancy/transmittancy/', 'validation')
from keras.optimizers import Adagrad
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
adagrad = Adagrad(decay=0.001, lr=0.005)
earlyStopping = EarlyStopping(monitor='val_acc', patience=8, verbose=1, mode='min')
mcp_save = ModelCheckpoint('best_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten
model = Sequential()
model.add(Conv2D(128, kernel_size=3, activation='relu', strides=(2, 4), input_shape=(200, 400, 1)))
model.add(Conv2D(64, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Conv2D(32, kernel_size=3, activation='relu', strides=(2, 4)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['acc'])
model.summary()
history = model.fit_generator(train_generator, steps_per_epoch=150, epochs=40, validation_data=validation_generator, validation_steps=15) | code |
128034943/cell_4 | [
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, StratifiedKFold, RandomizedSearchCV
import glob
import librosa
import numpy as np
import os
import pandas as pd
import random
def load_metadata(file_path):
return pd.read_csv(file_path)
def process_audio_file(filename, species, train_audio_path, segment_duration=5):
file_path = os.path.join(train_audio_path, species, filename)
try:
audio_data, sr = librosa.load(file_path)
except Exception as e:
print(f'Não foi possível carregar o arquivo {file_path}: {e}')
return None
segment_length = segment_duration * sr
segments = [audio_data[i:i + segment_length] for i in range(0, len(audio_data), segment_length)]
results = []
for segment in segments:
spectrogram = librosa.feature.melspectrogram(y=segment, sr=sr, n_mels=32)
mfccs = librosa.feature.mfcc(y=segment, sr=sr, n_mfcc=12)
mean_mfccs = mfccs.mean(axis=1)
file_features = np.concatenate([spectrogram.flatten(), mean_mfccs])
results.append((species, filename, file_features))
return results
def load_and_process_audio_files(metadata_df, selected_species_list, train_audio_path, sample_size=60):
audio_files = []
for species in selected_species_list:
species_path = os.path.join(train_audio_path, species, '*.ogg')
species_audio_files = glob.glob(species_path)
audio_files.extend([(os.path.basename(f), species) for f in species_audio_files])
if len(audio_files) < sample_size:
audio_files_sample = audio_files
else:
audio_files_sample = random.sample(audio_files, sample_size)
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
results_list = list(executor.map(lambda x: process_audio_file(x[0], x[1], train_audio_path), audio_files_sample))
for result in results_list:
if result is not None:
results.extend(result)
species_list, filenames, features_list = zip(*results)
max_features = max([len(features) for features in features_list])
padded_features = []
for features in features_list:
padding_length = max(0, max_features - len(features))
padded_features.append(np.pad(features, (0, padding_length), mode='constant'))
results_df = pd.DataFrame({'primary_label': species_list, 'filename': filenames})
features_df = pd.DataFrame(padded_features, columns=[f'feature_{i}' for i in range(len(padded_features[0]))])
metadata_features_df = pd.concat([results_df, features_df], axis=1)
X = metadata_features_df.drop(['primary_label', 'filename'], axis=1)
y = metadata_features_df['primary_label']
return (X, y)
def train_model(X_train, y_train, param_dist, stratified_kfold, n_iter=300, n_jobs=-1):
clf = RandomForestClassifier(random_state=42)
random_search = RandomizedSearchCV(estimator=clf, param_distributions=param_dist, n_iter=n_iter, cv=stratified_kfold, verbose=2, random_state=42, n_jobs=n_jobs)
random_search.fit(X_train, y_train)
best_clf = random_search.best_estimator_
return best_clf
def evaluate_model(model, X_test, y_test):
y_pred = model.predict(X_test)
return accuracy_score(y_test, y_pred)
def process_test_audio_file(filename, test_audio_path, segment_duration=5):
file_path = os.path.join(test_audio_path, filename)
try:
audio_data, sr = librosa.load(file_path)
except Exception as e:
print(f'Não foi possível carregar o arquivo {file_path}: {e}')
return None
segment_length = segment_duration * sr
segments = [audio_data[i:i + segment_length] for i in range(0, len(audio_data), segment_length)]
results = []
for segment in segments:
spectrogram = librosa.feature.melspectrogram(y=segment, sr=sr, n_mels=32)
mfccs = librosa.feature.mfcc(y=segment, sr=sr, n_mfcc=12)
mean_mfccs = mfccs.mean(axis=1)
file_features = np.concatenate([spectrogram.flatten(), mean_mfccs])
results.append(file_features)
return results
def predict(model, test_features, max_features, species_columns):
test_padding_length = max(0, max_features - len(test_features))
test_features_padded = np.pad(test_features, (0, test_padding_length), mode='constant')
predictions = model.predict_proba(test_features_padded.reshape(1, -1))[0]
class_probabilities = dict(zip(model.classes_, predictions))
predictions_aligned = [class_probabilities.get(species, 0) for species in species_columns]
return predictions_aligned
def main():
metadata_file_path = os.path.join('/kaggle/input/birdclef-2023/train_metadata.csv')
metadata_df = load_metadata(metadata_file_path)
selected_species_list = metadata_df['primary_label'].unique()
train_audio_path = os.path.join('/kaggle/input/birdclef-2023/train_audio')
X, y = load_and_process_audio_files(metadata_df, selected_species_list, train_audio_path)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
param_dist = {'n_estimators': range(20, 400), 'max_depth': [None] + list(range(1, 10)), 'min_samples_split': range(2, 11), 'min_samples_leaf': range(1, 5), 'bootstrap': [True, False]}
stratified_kfold = StratifiedKFold(n_splits=4, shuffle=True, random_state=42)
best_clf = train_model(X_train, y_train, param_dist, stratified_kfold)
acc = evaluate_model(best_clf, X_test, y_test)
print('Acurácia do modelo:', acc)
test_audio_path = os.path.join('/kaggle/input/birdclef-2023/test_soundscapes')
test_filename = 'soundscape_29201.ogg'
test_features_list = process_test_audio_file(test_filename, test_audio_path)
species_columns = sorted(metadata_df['primary_label'].unique())
submission_df = pd.DataFrame(columns=['row_id'] + species_columns)
for i, test_features in enumerate(test_features_list):
if test_features is not None:
predictions = predict(best_clf, test_features, X.shape[1], species_columns)
temp_df = pd.DataFrame([predictions], columns=species_columns)
test_filename_no_ext = os.path.splitext(test_filename)[0]
temp_df.insert(0, 'row_id', f'{test_filename_no_ext}_{(i + 1) * 5}')
submission_df = submission_df.append(temp_df, ignore_index=True)
else:
print(f'Não foi possível processar o segmento {i + 1} do arquivo de áudio de teste.')
submission_df.to_csv('submission.csv', index=False)
if __name__ == '__main__':
main() | code |
128034943/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import glob
import random
import concurrent.futures
import numpy as np
import pandas as pd
import librosa
from sklearn.model_selection import train_test_split, StratifiedKFold, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from joblib import dump, load | code |
122257222/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.describe() | code |
122257222/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
target = features['price']
target.head() | code |
122257222/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
sns.heatmap(corr) | code |
122257222/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.head() | code |
122257222/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
target = features['price']
features.drop(columns=['price'], inplace=True)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Ridge
model = make_pipeline(OneHotEncoder(), Ridge())
model.fit(features, target) | code |
122257222/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data.info() | code |
122257222/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
sns.heatmap(corr) | code |
122257222/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122257222/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.tail() | code |
122257222/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
features.select_dtypes('object').head() | code |
122257222/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape | code |
122257222/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique() | code |
122257222/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.head() | code |
122257222/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns | code |
122257222/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.info() | code |
122257222/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()] | code |
122257222/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns | code |
122257222/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample() | code |
122257222/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/car-price-prediction/car_price.csv')
data.shape
data.sample()
data[data.duplicated()]
data.nunique()
features = data.copy()
features.drop(columns=['fueltype', 'aspiration', 'doornumber', 'drivewheel', 'enginelocation', 'symboling'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['wheelbase', 'enginesize', 'boreratio', 'highwaympg'], inplace=True)
features.columns
corr = features.select_dtypes('number').drop(columns=['price']).corr()
features.drop(columns=['price'], inplace=True)
features.head() | code |
128020140/cell_6 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV
import pandas as pd
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 1000
image_size = 72
patch_size = 6
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [projection_dim * 2, projection_dim]
transformer_layers = 8
mlp_head_units = [2048, 1024]
input_shape = (72, 72, 3)
num_classes = 7
image_dir = '/kaggle/input/balanced-datasets/Smote_dataset'
df = pd.read_csv('/kaggle/input/balanced-datasets/Smote_dataset/labels.csv')
train_val_df, test_df = train_test_split(df, stratify=df['label'], test_size=0.1, random_state=42)
train_df, validation_df = train_test_split(train_val_df, stratify=train_val_df['label'], test_size=0.2, random_state=42)
augmented_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = augmented_datagen.flow_from_dataframe(train_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
val_generator = datagen.flow_from_dataframe(validation_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
test_generator = datagen.flow_from_dataframe(test_df, directory=image_dir, x_col='filename', y_col='label', target_size=(image_size, image_size), batch_size=20, class_mode='categorical') | code |
128020140/cell_16 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 1000
image_size = 72
patch_size = 6
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [projection_dim * 2, projection_dim]
transformer_layers = 8
mlp_head_units = [2048, 1024]
input_shape = (72, 72, 3)
num_classes = 7
image_dir = '/kaggle/input/balanced-datasets/Smote_dataset'
df = pd.read_csv('/kaggle/input/balanced-datasets/Smote_dataset/labels.csv')
train_val_df, test_df = train_test_split(df, stratify=df['label'], test_size=0.1, random_state=42)
train_df, validation_df = train_test_split(train_val_df, stratify=train_val_df['label'], test_size=0.2, random_state=42)
augmented_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = augmented_datagen.flow_from_dataframe(train_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
val_generator = datagen.flow_from_dataframe(validation_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
test_generator = datagen.flow_from_dataframe(test_df, directory=image_dir, x_col='filename', y_col='label', target_size=(image_size, image_size), batch_size=20, class_mode='categorical')
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding='VALID')
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(input_dim=num_patches, output_dim=projection_dim)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
patches = Patches(patch_size)(inputs)
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
for _ in range(transformer_layers):
x1 = layers.LayerNormalization(epsilon=1e-06)(encoded_patches)
attention_output = layers.MultiHeadAttention(num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1, x1)
x2 = layers.Add()([attention_output, encoded_patches])
x3 = layers.LayerNormalization(epsilon=1e-06)(x2)
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
encoded_patches = layers.Add()([x3, x2])
representation = layers.LayerNormalization(epsilon=1e-06)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
logits = layers.Dense(num_classes)(features)
model = keras.Model(inputs=inputs, outputs=logits)
return model
def run_experiment(model):
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay)
model.compile(optimizer=optimizer, loss=keras.losses.CategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.CategoricalAccuracy(name='accuracy'), keras.metrics.TopKCategoricalAccuracy(3, name='top-3-accuracy')])
mc = keras.callbacks.ModelCheckpoint(filepath='/tmp/checkpoint', monitor='val_accuracy', save_best_only=True, save_weights_only=True)
cb = [mc]
history = model.fit(train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=num_epochs, validation_data=val_generator, validation_steps=len(validation_df) // batch_size, callbacks=cb)
model.load_weights('/tmp/checkpoint')
_, accuracy, top_5_accuracy = model.evaluate(test_generator)
print(f'Test accuracy: {round(accuracy * 100, 2)}%')
print(f'Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%')
return history
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier) | code |
128020140/cell_17 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 1000
image_size = 72
patch_size = 6
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [projection_dim * 2, projection_dim]
transformer_layers = 8
mlp_head_units = [2048, 1024]
input_shape = (72, 72, 3)
num_classes = 7
image_dir = '/kaggle/input/balanced-datasets/Smote_dataset'
df = pd.read_csv('/kaggle/input/balanced-datasets/Smote_dataset/labels.csv')
train_val_df, test_df = train_test_split(df, stratify=df['label'], test_size=0.1, random_state=42)
train_df, validation_df = train_test_split(train_val_df, stratify=train_val_df['label'], test_size=0.2, random_state=42)
augmented_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = augmented_datagen.flow_from_dataframe(train_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
val_generator = datagen.flow_from_dataframe(validation_df, directory=image_dir, batch_size=batch_size, target_size=(image_size, image_size), x_col='filename', y_col='label', class_mode='categorical', shuffle=True)
test_generator = datagen.flow_from_dataframe(test_df, directory=image_dir, x_col='filename', y_col='label', target_size=(image_size, image_size), batch_size=20, class_mode='categorical')
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding='VALID')
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(input_dim=num_patches, output_dim=projection_dim)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
patches = Patches(patch_size)(inputs)
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
for _ in range(transformer_layers):
x1 = layers.LayerNormalization(epsilon=1e-06)(encoded_patches)
attention_output = layers.MultiHeadAttention(num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1, x1)
x2 = layers.Add()([attention_output, encoded_patches])
x3 = layers.LayerNormalization(epsilon=1e-06)(x2)
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
encoded_patches = layers.Add()([x3, x2])
representation = layers.LayerNormalization(epsilon=1e-06)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
logits = layers.Dense(num_classes)(features)
model = keras.Model(inputs=inputs, outputs=logits)
return model
def run_experiment(model):
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay)
model.compile(optimizer=optimizer, loss=keras.losses.CategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.CategoricalAccuracy(name='accuracy'), keras.metrics.TopKCategoricalAccuracy(3, name='top-3-accuracy')])
mc = keras.callbacks.ModelCheckpoint(filepath='/tmp/checkpoint', monitor='val_accuracy', save_best_only=True, save_weights_only=True)
cb = [mc]
history = model.fit(train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=num_epochs, validation_data=val_generator, validation_steps=len(validation_df) // batch_size, callbacks=cb)
model.load_weights('/tmp/checkpoint')
_, accuracy, top_5_accuracy = model.evaluate(test_generator)
return history
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
epochs = range(1, len(loss_values) + 1)
plt.subplot(1, 2, 1)
plt.plot(epochs, smooth_curve(loss_values), 'bo', label='training loss')
plt.plot(epochs, smooth_curve(val_loss_values), 'b', label='validation loss')
plt.title('training and validation loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, smooth_curve(acc_values), 'ro', label='taining accuracy')
plt.plot(epochs, smooth_curve(val_acc_values), 'r', label='validation accuracy')
plt.title('training and validation accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show() | code |
121153835/cell_13 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True)
predictions = pd.DataFrame(np.array(model_cv['cvbooster'].predict(df_test)).T)
sns.kdeplot(y, label='true value')
sns.kdeplot(predictions.mean(axis=1), label='prediction')
plt.legend() | code |
121153835/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
df_test.head() | code |
121153835/cell_2 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
print(f'the competition dataset shape is {df.shape}') | code |
121153835/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True)
plt.plot(model_cv['train rmse-mean'], label='train RMSE')
plt.plot(model_cv['valid rmse-mean'], label='valid RMSE')
plt.xlabel('no. iteration')
plt.ylabel('score')
plt.legend() | code |
121153835/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import mean_squared_error
import lightgbm as lgbm
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
121153835/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
df.head() | code |
121153835/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True)
predictions = pd.DataFrame(np.array(model_cv['cvbooster'].predict(df_test)).T)
subs = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv')
subs['Strength'] = predictions.mean(axis=1)
subs.head() | code |
121153835/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
print(f'the addition dataset shape is {df_add.shape}')
df = pd.concat([df, df_add])
print(f'the new dataset shape is {df.shape}') | code |
121153835/cell_14 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True)
predictions = pd.DataFrame(np.array(model_cv['cvbooster'].predict(df_test)).T)
subs = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv')
subs.head() | code |
121153835/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True) | code |
121153835/cell_12 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split, KFold
import lightgbm as lgbm
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['coarse_fine'] = df.CoarseAggregateComponent / df.FineAggregateComponent
df['Aggregate'] = df['CoarseAggregateComponent'] + df['FineAggregateComponent']
df['Slag_Cement'] = df['BlastFurnaceSlag'] / df['CementComponent']
df['Ash_Cement'] = df['FlyAshComponent'] / df['CementComponent']
df['Plastic_Cement'] = df['SuperplasticizerComponent'] / df['CementComponent']
df['Age_Water'] = df['AgeInDays'] / df['WaterComponent']
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['coarse_fine'] = df_test.CoarseAggregateComponent / df_test.FineAggregateComponent
df_test['Aggregate'] = df_test['CoarseAggregateComponent'] + df_test['FineAggregateComponent']
df_test['Slag_Cement'] = df_test['BlastFurnaceSlag'] / df_test['CementComponent']
df_test['Ash_Cement'] = df_test['FlyAshComponent'] / df_test['CementComponent']
df_test['Plastic_Cement'] = df_test['SuperplasticizerComponent'] / df_test['CementComponent']
df_test['Age_Water'] = df_test['AgeInDays'] / df_test['WaterComponent']
splitter = KFold(n_splits=5, shuffle=True, random_state=231)
df_lgbm = lgbm.Dataset(df, label=y)
model_cv = lgbm.cv({'random_state': 97}, train_set=df_lgbm, num_boost_round=50, folds=splitter, nfold=5, metrics='rmse', return_cvbooster=True, eval_train_metric=True)
predictions = pd.DataFrame(np.array(model_cv['cvbooster'].predict(df_test)).T)
predictions.head() | code |
121153835/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df.head() | code |
106198993/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/a-dataset-of-art-and-history-book-pruchases/ArtHistBooks.csv')
df.describe() | code |
106198993/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106198993/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from scipy.stats import binom
import numpy as np # linear algebra
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set(rc={'figure.figsize': (1.6 * 8, 8)})
from scipy.stats import binom
x = np.arange(0, 1, 0.01)
L = binom.pmf(k=301, n=1000, p=x)
prior_prob = 1 / len(L)
delta_theta = 0.01
D = np.sum(L * prior_prob * delta_theta)
P = L * prior_prob / D
ax = sns.lineplot(x, P)
ax.set(xlabel='x', ylabel='f(x)', title=f'Probability Density Function for p (constant prior)') | code |
106198993/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/a-dataset-of-art-and-history-book-pruchases/ArtHistBooks.csv')
df | code |
106198993/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/a-dataset-of-art-and-history-book-pruchases/ArtHistBooks.csv')
df_ArtPurchase = df.loc[df['ArtBooks'] > 0]
df_ArtPurchase | code |
72094873/cell_4 | [
"image_output_1.png"
] | import os
flairs = t1ws = t2ws = t1gds = 0
study = {}
for p in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'):
for i in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p):
study[p] = {}
if i == 'FLAIR':
flairs = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1w':
t1ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T2w':
t2ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1wCE':
t1gds = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
study[p]['FLAIR'] = flairs
study[p]['T1w'] = t1ws
study[p]['T2w'] = t2ws
study[p]['T1wCE'] = t1gds
print(f"Total of {len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'))} patients")
print('Study Directory Created') | code |
72094873/cell_18 | [
"text_html_output_1.png"
] | from ipywidgets import interact
import matplotlib.pyplot as plt
import os
import pandas as pd
import pydicom as dcm
import seaborn as sns
flairs = t1ws = t2ws = t1gds = 0
study = {}
for p in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'):
for i in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p):
study[p] = {}
if i == 'FLAIR':
flairs = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1w':
t1ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T2w':
t2ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1wCE':
t1gds = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
study[p]['FLAIR'] = flairs
study[p]['T1w'] = t1ws
study[p]['T2w'] = t2ws
study[p]['T1wCE'] = t1gds
study = pd.DataFrame(study).transpose()
tmp = study.sort_values('FLAIR', ascending=False)['FLAIR'][:10]
tmp = study.sort_values('T1w', ascending=False)['T1w'][:10]
tmp = study.sort_values('T2w', ascending=False)['T2w'][:10]
tmp = study.sort_values('T1wCE', ascending=False)['T1wCE'][:10]
tmp = ['FLAIR', 'T1w', 'T2w', 'T1wCE']
tmp2 = []
for col in tmp:
tmp2.append(sum(study[col]))
df = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
df.columns = ['id', 'mgmt']
df['id'] = df['id'].apply(lambda x: str(x).zfill(5))
df = df.set_index('id')
def crit(x):
return int(x.split('-')[1].split('.')[0])
def imread(path):
return dcm.dcmread(path)
def imshow(arr):
plt.axis('off')
s = Study('00000')
s = Study('00000')
arr3d = s.get_3d('FLAIR')
def explore_3d(layer):
imshow(arr3d[:, :, layer])
return layer
interact(explore_3d, layer=(1, 400)) | code |
72094873/cell_8 | [
"image_png_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
flairs = t1ws = t2ws = t1gds = 0
study = {}
for p in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'):
for i in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p):
study[p] = {}
if i == 'FLAIR':
flairs = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1w':
t1ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T2w':
t2ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1wCE':
t1gds = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
study[p]['FLAIR'] = flairs
study[p]['T1w'] = t1ws
study[p]['T2w'] = t2ws
study[p]['T1wCE'] = t1gds
study = pd.DataFrame(study).transpose()
tmp = study.sort_values('FLAIR', ascending=False)['FLAIR'][:10]
tmp = study.sort_values('T1w', ascending=False)['T1w'][:10]
tmp = study.sort_values('T2w', ascending=False)['T2w'][:10]
tmp = study.sort_values('T1wCE', ascending=False)['T1wCE'][:10]
tmp = ['FLAIR', 'T1w', 'T2w', 'T1wCE']
tmp2 = []
for col in tmp:
tmp2.append(sum(study[col]))
df = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
df.columns = ['id', 'mgmt']
df['id'] = df['id'].apply(lambda x: str(x).zfill(5))
df = df.set_index('id')
df.head() | code |
72094873/cell_17 | [
"image_output_2.png",
"image_output_1.png"
] | s = Study('00000')
s.show('FLAIR', 100) | code |
72094873/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
flairs = t1ws = t2ws = t1gds = 0
study = {}
for p in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'):
for i in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p):
study[p] = {}
if i == 'FLAIR':
flairs = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1w':
t1ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T2w':
t2ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1wCE':
t1gds = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
study[p]['FLAIR'] = flairs
study[p]['T1w'] = t1ws
study[p]['T2w'] = t2ws
study[p]['T1wCE'] = t1gds
study = pd.DataFrame(study).transpose()
tmp = study.sort_values('FLAIR', ascending=False)['FLAIR'][:10]
tmp = study.sort_values('T1w', ascending=False)['T1w'][:10]
tmp = study.sort_values('T2w', ascending=False)['T2w'][:10]
tmp = study.sort_values('T1wCE', ascending=False)['T1wCE'][:10]
tmp = ['FLAIR', 'T1w', 'T2w', 'T1wCE']
tmp2 = []
for col in tmp:
tmp2.append(sum(study[col]))
df = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv')
df.columns = ['id', 'mgmt']
df['id'] = df['id'].apply(lambda x: str(x).zfill(5))
df = df.set_index('id')
sns.countplot(x=df['mgmt'])
plt.show() | code |
72094873/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
flairs = t1ws = t2ws = t1gds = 0
study = {}
for p in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train'):
for i in os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p):
study[p] = {}
if i == 'FLAIR':
flairs = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1w':
t1ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T2w':
t2ws = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
elif i == 'T1wCE':
t1gds = len(os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/' + p + '/' + i))
study[p]['FLAIR'] = flairs
study[p]['T1w'] = t1ws
study[p]['T2w'] = t2ws
study[p]['T1wCE'] = t1gds
study = pd.DataFrame(study).transpose()
plt.figure(figsize=(30, 5))
tmp = study.sort_values('FLAIR', ascending=False)['FLAIR'][:10]
plt.subplot(141)
sns.barplot(x=tmp.index, y=tmp)
plt.title('FLAIR TOP 10')
tmp = study.sort_values('T1w', ascending=False)['T1w'][:10]
plt.subplot(142)
sns.barplot(x=tmp.index, y=tmp)
plt.title('T1w TOP 10')
tmp = study.sort_values('T2w', ascending=False)['T2w'][:10]
plt.subplot(143)
sns.barplot(x=tmp.index, y=tmp)
plt.title('T2w TOP 10')
tmp = study.sort_values('T1wCE', ascending=False)['T1wCE'][:10]
plt.subplot(144)
sns.barplot(x=tmp.index, y=tmp)
plt.title('T1wCE TOP 10')
plt.show()
plt.figure(figsize=(20, 5))
tmp = ['FLAIR', 'T1w', 'T2w', 'T1wCE']
tmp2 = []
for col in tmp:
tmp2.append(sum(study[col]))
sns.barplot(x=tmp, y=tmp2)
plt.show() | code |
88099239/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import numpy as np
import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data | code |
88099239/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
print('max missing = {}\n min missing = {}\n mean missing = {}'.format(customer_data.isnull().sum(axis=1).max(), customer_data.isnull().sum(axis=1).min(), customer_data.isnull().sum(axis=1).mean())) | code |
88099239/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC, SVC # Linear Support Vector Classification
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import LinearSVC, SVC
RANDOM_STATE = 1234
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
X = data[:, 1:]
y = data[:, 0]
linear_model = LinearSVC(random_state=RANDOM_STATE)
cross_val_score(linear_model, X, y, cv=3, n_jobs=-1, scoring='accuracy')
pred_tags = cross_val_predict(linear_model, X, y, cv=3, n_jobs=-1, method='predict')
poly_svc = SVC(kernel='poly', random_state=RANDOM_STATE)
pred_tags = cross_val_predict(poly_svc, X, y, cv=3, n_jobs=-1, method='predict')
confusion_matrix(y, pred_tags)
gaussian_svc = SVC(kernel='rbf', random_state=RANDOM_STATE)
pred_tags = cross_val_predict(gaussian_svc, X, y, cv=3, n_jobs=-1, method='predict')
confusion_matrix(y, pred_tags) | code |
88099239/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC, SVC # Linear Support Vector Classification
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import LinearSVC, SVC
RANDOM_STATE = 1234
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
X = data[:, 1:]
y = data[:, 0]
linear_model = LinearSVC(random_state=RANDOM_STATE)
cross_val_score(linear_model, X, y, cv=3, n_jobs=-1, scoring='accuracy') | code |
88099239/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
for prod_code in customer_data['prod_code'].unique():
customer_data['prod_code_{}'.format(prod_code)] = customer_data['prod_code'] == prod_code
for feature_id in [1, 3, 5, 6, 7, 9]:
for value in customer_data['fea_{}'.format(feature_id)].unique():
customer_data['feature_{}_{}'.format(feature_id, value)] = customer_data['fea_{}'.format(feature_id)] == value
customer_data | code |
88099239/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data['label'].value_counts() | code |
88099239/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0) | code |
88099239/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC, SVC # Linear Support Vector Classification
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import LinearSVC, SVC
RANDOM_STATE = 1234
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
X = data[:, 1:]
y = data[:, 0]
linear_model = LinearSVC(random_state=RANDOM_STATE)
cross_val_score(linear_model, X, y, cv=3, n_jobs=-1, scoring='accuracy')
pred_tags = cross_val_predict(linear_model, X, y, cv=3, n_jobs=-1, method='predict')
poly_svc = SVC(kernel='poly', random_state=RANDOM_STATE)
pred_tags = cross_val_predict(poly_svc, X, y, cv=3, n_jobs=-1, method='predict')
confusion_matrix(y, pred_tags) | code |
88099239/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import numpy as np
import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data
(data == np.nan).any() | code |
88099239/cell_22 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC, SVC # Linear Support Vector Classification
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import LinearSVC, SVC
RANDOM_STATE = 1234
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data = customer_data.drop(['prod_limit', 'report_date', 'update_date', 'prod_code', 'fea_1', 'fea_3', 'fea_5', 'fea_6', 'fea_7', 'fea_9'], axis=1)
data = customer_data.to_numpy(na_value=np.nan).astype(float)
imputer = SimpleImputer(verbose=1)
data = imputer.fit_transform(data)
data
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
X = data[:, 1:]
y = data[:, 0]
linear_model = LinearSVC(random_state=RANDOM_STATE)
cross_val_score(linear_model, X, y, cv=3, n_jobs=-1, scoring='accuracy')
pred_tags = cross_val_predict(linear_model, X, y, cv=3, n_jobs=-1, method='predict')
confusion_matrix(y, pred_tags) | code |
88099239/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data
customer_data.isnull().sum(axis=0)
customer_data['prod_code'].value_counts() | code |
88099239/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
payments = pd.read_csv('../input/credit-risk-classification-dataset/payment_data.csv')
payments = payments.set_index('id')
customers = pd.read_csv('../input/credit-risk-classification-dataset/customer_data.csv')
customers = customers.set_index('id')
customer_data = customers.join(payments)
customer_data | code |
72068232/cell_25 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts()
possible_labels = df.sentiment.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
df['label'] = df.sentiment.replace(label_dict)
X_train, X_val, y_train, y_val = train_test_split(df.index.values, df.label.values, test_size=0.15, stratify=df.label.values)
df['data_type'] = ['not_set'] * df.shape[0]
df.loc[X_train, 'data_type'] = 'train'
df.loc[X_val, 'data_type'] = 'val'
df.groupby(['sentiment', 'label', 'data_type']).count() | code |
72068232/cell_34 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from gingerit.gingerit import GingerIt
from tqdm.notebook import tqdm
from symspellpy.symspellpy import SymSpell, Verbosity
import pkg_resources
import re, string, json
import spacy
def normalization_pipeline(sentences):
sentences = simplify_punctuation_and_whitespace(sentences)
sentences = normalize_contractions(sentences)
return sentences
def simplify_punctuation_and_whitespace(sentence_list):
"""
using more than 4 ALLCAPS words will add EMPW and puntuation like !!!!! will get EMPP
"""
norm_sents = []
for sentence in tqdm(sentence_list):
sent = _replace_urls(sentence)
sent = _mention_hash(sent)
sent = _simplify_punctuation(sent)
sent = _reduce_repetitions(sent)
sent = _normalize_whitespace(sent)
norm_sents.append(sent)
return norm_sents
def _replace_urls(text):
url_regex = '(https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})'
text = re.sub(url_regex, '-URL-', text)
return text
def _mention_hash(in_str):
"""
replacing @MENTION and #HASHTAG
BEWARE OF USES OF # AND @ AND SPACES BETWEEN THEM
"""
in_str = str(in_str)
in_str = re.sub('@', '@MEN ', in_str, flags=re.IGNORECASE)
in_str = re.sub('#', '#HAS ', in_str, flags=re.IGNORECASE)
return in_str.strip()
def _simplify_punctuation(text):
"""
This function simplifies doubled or more complex punctuation. The exception is '...'. #?! ??? !!!
"""
corrected = str(text)
corrected = re.sub('([!?,;])\\1+', '\\1\\1 <-EMPP', corrected)
corrected = re.sub('\\.{2,}', '...', corrected)
return corrected
def _reduce_repetitions(text):
"""
Auxiliary function to help with exxagerated words.
Examples:
woooooords -> woords
yaaaaaaaaaaaaaaay -> yaay
door -> dor
"""
correction = str(text)
for index, words in enumerate(str(text).split()):
if _is_EMP_word(words) == True:
correction = correction.replace(words, words + ' <-EMPW')
if (len(words) > 4) & words.isupper() == True:
correction = correction.replace(words, words + ' <-EMPU')
return re.sub('([\\w])\\1+', '\\1\\1', correction)
def _is_EMP_word(word):
"""
True/ False: checks if the word has 3 consecutive characters"""
count = 1
if len(word) > 1:
for i in range(1, len(word)):
if word[i] in string.punctuation:
return False
if word[i - 1] == word[i]:
count += 1
if count >= 3:
return True
else:
if count >= 3:
return True
count = 1
else:
return False
return False
def _normalize_whitespace(text):
"""
This function normalizes whitespaces, removing duplicates.
"""
corrected = str(text)
corrected = re.sub('//t', '\\t', corrected)
corrected = re.sub('( )\\1+', '\\1', corrected)
corrected = re.sub('(\\n)\\1+', '\\1', corrected)
corrected = re.sub('(\\r)\\1+', '\\1', corrected)
corrected = re.sub('(\\t)\\1+', '\\1', corrected)
return corrected.strip(' ')
def normalize_contractions(sentence_list):
contraction_list = json.loads(open('/kaggle/input/english-contractions/english_contractions.json.txt', 'r').read())
character_entity = {'<3': 'heart', '&': 'and', '"': ' quote '}
contraction_list = {**contraction_list, **character_entity}
norm_sents = []
for sentence in tqdm(sentence_list):
norm_sents.append(_normalize_contractions_slang_emoji_entity(sentence, contraction_list))
return norm_sents
def _normalize_contractions_slang_emoji_entity(text, contractions):
"""
part1:normalizes english contractions.
"""
for word in text.split():
if word.lower() in contractions:
text = text.replace(word, contractions[word.lower()])
'\n part 2: using gingerit slang correction:\n '
parser = GingerIt()
result = parser.parse(text)
sentence = result['result']
'\n part3: emoji and character entity reference conversion to meaning\n '
emoticons = emot_obj.emoticons(sentence)
printing = False
for i in range(0, len(emoticons['value'])):
sentence = sentence.replace(emoticons['value'][i], emoticons['mean'][i])
return sentence
original_examples = ['hi @someone WATCH me #proud :) ;) ...... !!!!! wanna go tHeRe bc my finls clooooose "bananas" & ']
preprocessed_examples = normalization_pipeline(original_examples)
for example_index, example in enumerate(preprocessed_examples):
print(original_examples[example_index])
print(example) | code |
72068232/cell_23 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts()
possible_labels = df.sentiment.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
df['label'] = df.sentiment.replace(label_dict)
X_train, X_val, y_train, y_val = train_test_split(df.index.values, df.label.values, test_size=0.15, stratify=df.label.values)
df['data_type'] = ['not_set'] * df.shape[0]
df.head() | code |
72068232/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import Counter
from gingerit.gingerit import GingerIt
from sklearn.model_selection import train_test_split
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts()
possible_labels = df.sentiment.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
df['label'] = df.sentiment.replace(label_dict)
X_train, X_val, y_train, y_val = train_test_split(df.index.values, df.label.values, test_size=0.15, stratify=df.label.values)
df['data_type'] = ['not_set'] * df.shape[0]
df.loc[X_train, 'data_type'] = 'train'
df.loc[X_val, 'data_type'] = 'val'
df.groupby(['sentiment', 'label', 'data_type']).count()
from symspellpy.symspellpy import SymSpell, Verbosity
import pkg_resources
import re, string, json
import spacy
def normalization_pipeline(sentences):
sentences = simplify_punctuation_and_whitespace(sentences)
sentences = normalize_contractions(sentences)
return sentences
def simplify_punctuation_and_whitespace(sentence_list):
"""
using more than 4 ALLCAPS words will add EMPW and puntuation like !!!!! will get EMPP
"""
norm_sents = []
for sentence in tqdm(sentence_list):
sent = _replace_urls(sentence)
sent = _mention_hash(sent)
sent = _simplify_punctuation(sent)
sent = _reduce_repetitions(sent)
sent = _normalize_whitespace(sent)
norm_sents.append(sent)
return norm_sents
def _replace_urls(text):
url_regex = '(https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})'
text = re.sub(url_regex, '-URL-', text)
return text
def _mention_hash(in_str):
"""
replacing @MENTION and #HASHTAG
BEWARE OF USES OF # AND @ AND SPACES BETWEEN THEM
"""
in_str = str(in_str)
in_str = re.sub('@', '@MEN ', in_str, flags=re.IGNORECASE)
in_str = re.sub('#', '#HAS ', in_str, flags=re.IGNORECASE)
return in_str.strip()
def _simplify_punctuation(text):
"""
This function simplifies doubled or more complex punctuation. The exception is '...'. #?! ??? !!!
"""
corrected = str(text)
corrected = re.sub('([!?,;])\\1+', '\\1\\1 <-EMPP', corrected)
corrected = re.sub('\\.{2,}', '...', corrected)
return corrected
def _reduce_repetitions(text):
"""
Auxiliary function to help with exxagerated words.
Examples:
woooooords -> woords
yaaaaaaaaaaaaaaay -> yaay
door -> dor
"""
correction = str(text)
for index, words in enumerate(str(text).split()):
if _is_EMP_word(words) == True:
correction = correction.replace(words, words + ' <-EMPW')
if (len(words) > 4) & words.isupper() == True:
correction = correction.replace(words, words + ' <-EMPU')
return re.sub('([\\w])\\1+', '\\1\\1', correction)
def _is_EMP_word(word):
"""
True/ False: checks if the word has 3 consecutive characters"""
count = 1
if len(word) > 1:
for i in range(1, len(word)):
if word[i] in string.punctuation:
return False
if word[i - 1] == word[i]:
count += 1
if count >= 3:
return True
else:
if count >= 3:
return True
count = 1
else:
return False
return False
def _normalize_whitespace(text):
"""
This function normalizes whitespaces, removing duplicates.
"""
corrected = str(text)
corrected = re.sub('//t', '\\t', corrected)
corrected = re.sub('( )\\1+', '\\1', corrected)
corrected = re.sub('(\\n)\\1+', '\\1', corrected)
corrected = re.sub('(\\r)\\1+', '\\1', corrected)
corrected = re.sub('(\\t)\\1+', '\\1', corrected)
return corrected.strip(' ')
def normalize_contractions(sentence_list):
contraction_list = json.loads(open('/kaggle/input/english-contractions/english_contractions.json.txt', 'r').read())
character_entity = {'<3': 'heart', '&': 'and', '"': ' quote '}
contraction_list = {**contraction_list, **character_entity}
norm_sents = []
for sentence in tqdm(sentence_list):
norm_sents.append(_normalize_contractions_slang_emoji_entity(sentence, contraction_list))
return norm_sents
def _normalize_contractions_slang_emoji_entity(text, contractions):
"""
part1:normalizes english contractions.
"""
for word in text.split():
if word.lower() in contractions:
text = text.replace(word, contractions[word.lower()])
'\n part 2: using gingerit slang correction:\n '
parser = GingerIt()
result = parser.parse(text)
sentence = result['result']
'\n part3: emoji and character entity reference conversion to meaning\n '
emoticons = emot_obj.emoticons(sentence)
printing = False
for i in range(0, len(emoticons['value'])):
sentence = sentence.replace(emoticons['value'][i], emoticons['mean'][i])
return sentence
import matplotlib.pyplot as plt
from collections import Counter
tokenizee = []
for words in tqdm(range(1, len(df.content) - 1)):
tokenizee.append(spacy_process(df.content[words]))
words = Counter()
for s in tokenizee:
for w in s:
words[w] += 1
sorted_words = list(words.keys())
sorted_words.sort(key=lambda w: words[w], reverse=True)
print(f'Number of different Tokens in our Dataset: {len(sorted_words)}')
print(sorted_words[:100])
count_occurences = sum(words.values())
accumulated = 0
counter = 0
while accumulated < count_occurences * 0.8:
accumulated += words[sorted_words[counter]]
counter += 1
print(f'The {counter * 100 / len(words)}% most common words account for the {accumulated * 100 / count_occurences}% of the occurrences')
plt.bar(range(100), [words[w] for w in sorted_words[:100]])
plt.show() | code |
72068232/cell_11 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:] | code |
72068232/cell_18 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts()
possible_labels = df.sentiment.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
df['label'] = df.sentiment.replace(label_dict)
df.head() | code |
72068232/cell_28 | [
"text_plain_output_1.png"
] | # Install spaCy (run in terminal/prompt)
import sys
!{sys.executable} -m pip install spacy
# Download spaCy's 'en' Model
!{sys.executable} -m spacy download en
!pip install -U symspellpy
!pip install gingerit
from gingerit.gingerit import GingerIt
#emoticons
!pip install emot --upgrade
import emot
emot_obj = emot.core.emot() | code |
72068232/cell_17 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts()
possible_labels = df.sentiment.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
label_dict | code |
72068232/cell_14 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.content.iloc[-10:]
df.sentiment.value_counts()
df = df[df.sentiment != 'anger']
df = df[df.sentiment != 'boredom']
df = df[df.sentiment != 'enthusiasm']
df = df[df.sentiment != 'empty']
df = df[df.sentiment != 'sentiment']
df.sentiment.value_counts() | code |
72068232/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv')
df.head() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.