path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73080198/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False)
test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False)
fig, ax = plt.subplots(figsize=(12, 6))
bars = ax.hist(train["target"],
bins=40,
range=(0,11),
color="orange",
edgecolor="black")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(train["target"],
bins=3500,
range=(6.9,10.4),
color="orange",
edgecolor="orange")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(train["target"],
bins=100,
range=(8.05,8.15),
color="orange",
edgecolor="black")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
inverse_log = np.exp(train['target'])
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(inverse_log, range=(0, 40000), bins=4000, color='orange', edgecolor='orange')
ax.set_title('Target distribution', fontsize=20, pad=15)
ax.set_ylabel('Amount of values', fontsize=14, labelpad=15)
ax.set_xlabel('Target value', fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis='y')
plt.show() | code |
73080198/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False)
test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False)
train.describe() | code |
73080198/cell_27 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False)
test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False)
predictions_base = pd.read_csv('/kaggle/input/submissionstevenferrercsv/submissionStevenFerrer.csv', low_memory=False)
fig, ax = plt.subplots(figsize=(12, 6))
bars = ax.hist(train["target"],
bins=40,
range=(0,11),
color="orange",
edgecolor="black")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(train["target"],
bins=3500,
range=(6.9,10.4),
color="orange",
edgecolor="orange")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(train["target"],
bins=100,
range=(8.05,8.15),
color="orange",
edgecolor="black")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
# inverse_log=np.power(10, train["target"])
inverse_log=np.exp(train["target"]) # comment this out, uncomment line above to see 10^train["target"] instead of e^train["target"]
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(inverse_log,
range=(0,40000),
bins=4000,
color="orange",
edgecolor="orange")
ax.set_title("Target distribution", fontsize=20, pad=15)
ax.set_ylabel("Amount of values", fontsize=14, labelpad=15)
ax.set_xlabel("Target value", fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis="y")
plt.show();
fig, ax = plt.subplots(figsize=(24, 12))
bars = ax.hist(predictions_base['target'], bins=3500, range=(6.9, 10.4), color='orange', edgecolor='orange')
ax.set_title('Prediction distribution', fontsize=20, pad=15)
ax.set_ylabel('Amount of values', fontsize=14, labelpad=15)
ax.set_xlabel('Prediction value', fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis='y')
plt.show() | code |
73080198/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False)
test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False)
fig, ax = plt.subplots(figsize=(12, 6))
bars = ax.hist(train['target'], bins=40, range=(0, 11), color='orange', edgecolor='black')
ax.set_title('Target distribution', fontsize=20, pad=15)
ax.set_ylabel('Amount of values', fontsize=14, labelpad=15)
ax.set_xlabel('Target value', fontsize=14, labelpad=10)
ax.margins(0.025, 0.12)
ax.grid(axis='y')
plt.show() | code |
2041151/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.svm import SVC
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
X_train = np.array(train[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass', 'Sex', 'Alone', 'Family']])
y_train = np.array(train[['Survived']]).reshape(-1)
X_test = np.array(test[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass', 'Sex', 'Alone', 'Family']])
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold
splitter = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
c_param = [1, 2, 5, 10, 20]
param_grid = dict(C=c_param)
svc = SVC()
gs = GridSearchCV(svc, param_grid=param_grid, scoring='accuracy', verbose=1, cv=splitter)
gs.fit(X_train, y_train)
print('Best: %f using %s' % (gs.best_score_, gs.best_params_)) | code |
122250913/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df['MarketSize'].unique() | code |
122250913/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count() | code |
122250913/cell_25 | [
"text_plain_output_1.png"
] | from scipy.stats import ttest_1samp, shapiro, levene, ttest_ind, mannwhitneyu, \
import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
small_market.describe().T
for promo in list(small_market['Promotion'].unique()):
pvalue = shapiro(small_market.loc[small_market['Promotion'] == promo, 'SalesInThousands'])[1]
test_stat, pvalue = levene(small_market.loc[small_market['Promotion'] == 1, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 2, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 3, 'SalesInThousands'])
print('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue)) | code |
122250913/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
small_market.describe().T | code |
122250913/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
large_market['MarketSize'].nunique() | code |
122250913/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.head() | code |
122250913/cell_26 | [
"text_plain_output_1.png"
] | from scipy.stats import ttest_1samp, shapiro, levene, ttest_ind, mannwhitneyu, \
import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
small_market.describe().T
for promo in list(small_market['Promotion'].unique()):
pvalue = shapiro(small_market.loc[small_market['Promotion'] == promo, 'SalesInThousands'])[1]
test_stat, pvalue = levene(small_market.loc[small_market['Promotion'] == 1, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 2, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 3, 'SalesInThousands'])
f_oneway(small_market.loc[small_market['Promotion'] == 1, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 2, 'SalesInThousands'], small_market.loc[small_market['Promotion'] == 3, 'SalesInThousands']) | code |
122250913/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df_age_sales.corr() | code |
122250913/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
medium_market['MarketSize'].nunique() | code |
122250913/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.info() | code |
122250913/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
small_market['MarketSize'].nunique() | code |
122250913/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T | code |
122250913/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week | code |
122250913/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean() | code |
122250913/cell_24 | [
"text_plain_output_1.png"
] | from scipy.stats import ttest_1samp, shapiro, levene, ttest_ind, mannwhitneyu, \
import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df_medium_week = df.groupby(['MarketSize', 'week', 'Promotion'])['SalesInThousands'].mean()
df_medium_week
df.groupby(['MarketSize', 'Promotion'])['SalesInThousands'].mean()
small_market = df[df['MarketSize'] == 'Small']
medium_market = df[df['MarketSize'] == 'Medium']
large_market = df[df['MarketSize'] == 'Large']
small_market.describe().T
for promo in list(small_market['Promotion'].unique()):
pvalue = shapiro(small_market.loc[small_market['Promotion'] == promo, 'SalesInThousands'])[1]
print('For the Small Markets Promotion : ' + str(promo), 'p-value: %.4f' % pvalue) | code |
122250913/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean()
df['MarketSize'].value_counts() | code |
122250913/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales | code |
122250913/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df_ = pd.read_csv('/kaggle/input/fast-food-marketing-campaign-ab-test/WA_Marketing-Campaign.csv')
df = df_.copy()
df.describe().T
df.groupby('Promotion')['Promotion'].count()
df_age_sales = df.groupby(['MarketID', 'LocationID']).agg({'AgeOfStore': 'mean', 'SalesInThousands': 'mean'})
df_age_sales
df.groupby(['MarketID', 'Promotion'])['SalesInThousands'].mean() | code |
2022426/cell_13 | [
"text_plain_output_1.png"
] | from keras.preprocessing import text, sequence
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
test = ['This is your last warning. You will be blocked from editing the next time you vandalize a page, as you did with this edit to Geb. |Parlez ici ']
tokenizer.fit_on_texts(list(test))
test_token = tokenizer.texts_to_sequences(test)
test_2 = sequence.pad_sequences(test_token, maxlen=maxlen) | code |
2022426/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
max_features = 20000
maxlen = 100
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary() | code |
2022426/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
print(train.head(10))
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values | code |
2022426/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Embedding, Input, GRU
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint | code |
2022426/cell_11 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary()
from sklearn.model_selection import train_test_split
any_category_positive = np.sum(y, 1)
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
batch_size = 128
epochs = 3
file_path = 'model_best.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_loss', mode='min', patience=20)
callbacks_list = [checkpoint, early]
model.fit(X_t_train, y_train, validation_data=(X_t_test, y_test), batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks_list)
model.save('Whole_model.h5') | code |
2022426/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2022426/cell_15 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
from keras.preprocessing import text, sequence
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary()
from sklearn.model_selection import train_test_split
any_category_positive = np.sum(y, 1)
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
batch_size = 128
epochs = 3
file_path = 'model_best.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_loss', mode='min', patience=20)
callbacks_list = [checkpoint, early]
model.fit(X_t_train, y_train, validation_data=(X_t_test, y_test), batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks_list)
model.save('Whole_model.h5')
model.load_weights(file_path)
y_test = model.predict(X_te)
sample_submission = pd.read_csv('../input/sample_submission.csv')
sample_submission[list_classes] = y_test
sample_submission.to_csv('predictions.csv', index=False)
test = ['This is your last warning. You will be blocked from editing the next time you vandalize a page, as you did with this edit to Geb. |Parlez ici ']
tokenizer.fit_on_texts(list(test))
test_token = tokenizer.texts_to_sequences(test)
test_2 = sequence.pad_sequences(test_token, maxlen=maxlen)
np.argmax(model.predict(test_2)) | code |
2022426/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
from keras.preprocessing import text, sequence
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary()
from sklearn.model_selection import train_test_split
any_category_positive = np.sum(y, 1)
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
batch_size = 128
epochs = 3
file_path = 'model_best.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_loss', mode='min', patience=20)
callbacks_list = [checkpoint, early]
model.fit(X_t_train, y_train, validation_data=(X_t_test, y_test), batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks_list)
model.save('Whole_model.h5')
model.load_weights(file_path)
y_test = model.predict(X_te)
sample_submission = pd.read_csv('../input/sample_submission.csv')
sample_submission[list_classes] = y_test
sample_submission.to_csv('predictions.csv', index=False)
test = ['This is your last warning. You will be blocked from editing the next time you vandalize a page, as you did with this edit to Geb. |Parlez ici ']
tokenizer.fit_on_texts(list(test))
test_token = tokenizer.texts_to_sequences(test)
test_2 = sequence.pad_sequences(test_token, maxlen=maxlen)
np.argmax(model.predict(test_2))
model.predict(test_2) | code |
2022426/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary()
from sklearn.model_selection import train_test_split
any_category_positive = np.sum(y, 1)
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
batch_size = 128
epochs = 3
file_path = 'model_best.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_loss', mode='min', patience=20)
callbacks_list = [checkpoint, early]
model.fit(X_t_train, y_train, validation_data=(X_t_test, y_test), batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks_list)
model.save('Whole_model.h5')
model.load_weights(file_path)
y_test = model.predict(X_te)
sample_submission = pd.read_csv('../input/sample_submission.csv')
sample_submission[list_classes] = y_test
sample_submission.to_csv('predictions.csv', index=False)
pred = pd.read_csv('predictions.csv')
pred.head() | code |
2022426/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
from sklearn.model_selection import train_test_split
print('Positive Labels ')
any_category_positive = np.sum(y, 1)
print(pd.value_counts(any_category_positive))
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
print('Training:', X_t_train.shape)
print('Testing:', X_t_test.shape) | code |
2022426/cell_12 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPool1D, Dropout, concatenate
from keras.layers import Dense, Embedding, Input,GRU
from keras.models import Model
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
max_features = 20000
maxlen = 100
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
def cnn_rnn():
embed_size = 256
inp = Input(shape=(maxlen,))
main = Embedding(max_features, embed_size)(inp)
main = Dropout(0.2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(main)
main = MaxPooling1D(pool_size=2)(main)
main = GRU(32)(main)
main = Dense(16, activation='relu')(main)
main = Dense(6, activation='sigmoid')(main)
model = Model(inputs=inp, outputs=main)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_rnn()
model.summary()
from sklearn.model_selection import train_test_split
any_category_positive = np.sum(y, 1)
X_t_train, X_t_test, y_train, y_test = train_test_split(X_t, y, test_size=0.1)
batch_size = 128
epochs = 3
file_path = 'model_best.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor='val_loss', mode='min', patience=20)
callbacks_list = [checkpoint, early]
model.fit(X_t_train, y_train, validation_data=(X_t_test, y_test), batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks_list)
model.save('Whole_model.h5')
model.load_weights(file_path)
y_test = model.predict(X_te)
sample_submission = pd.read_csv('../input/sample_submission.csv')
sample_submission[list_classes] = y_test
sample_submission.to_csv('predictions.csv', index=False) | code |
2022426/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
list_sentences_train = train['comment_text'].fillna('unknown').values
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = train[list_classes].values
list_sentences_test = test['comment_text'].fillna('unknown').values
print(list_sentences_train[0])
y[0] | code |
122262044/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
df.corr()
df.isnull().sum()
df = df.drop(['Client_Num'], axis=1)
df['Gender'] = df['Gender'].replace({'M': 0, 'F': 1})
df = pd.get_dummies(df, columns=['Education_Level', 'Marital_Status', 'Income_Category', 'Card_Category'], drop_first=True)
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df[['Customer_Age', 'Dependent_Count', 'Months_on_Book', 'Total_Relationship_Count', 'Months_Inactive_12_mon', 'Contacts_Count_12_mon', 'Credit_Limit', 'Total_Revolving_Bal', 'Avg_Open_To_Buy', 'Total_Amt_Chng_Q4_Q1', 'Total_Trans_Amt', 'Total_Trans_Ct', 'Total_Ct_Chng_Q4_Q1', 'Avg_Utilization_Ratio']])
df_scaled = pd.DataFrame(scaled_features, columns=['Customer_Age', 'Dependent_Count', 'Months_on_Book', 'Total_Relationship_Count', 'Months_Inactive_12_mon', 'Contacts_Count_12_mon', 'Credit_Limit', 'Total_Revolving_Bal', 'Avg_Open_To_Buy', 'Total_Amt_Chng_Q4_Q1', 'Total_Trans_Amt', 'Total_Trans_Ct', 'Total_Ct_Chng_Q4_Q1', 'Avg_Utilization_Ratio'])
df.drop(['Customer_Age', 'Dependent_Count', 'Months_on_Book', 'Total_Relationship_Count', 'Months_Inactive_12_mon', 'Contacts_Count_12_mon', 'Credit_Limit', 'Total_Revolving_Bal', 'Avg_Open_To_Buy', 'Total_Amt_Chng_Q4_Q1', 'Total_Trans_Amt', 'Total_Trans_Ct', 'Total_Ct_Chng_Q4_Q1', 'Avg_Utilization_Ratio'], axis=1, inplace=True)
df = pd.concat([df_scaled, df], axis=1)
df.head() | code |
122262044/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
import seaborn as sns
sns.scatterplot(x='Total_Trans_Amt', y='Total_Trans_Ct', data=df) | code |
122262044/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='Attrition_Flag', hue='Education_Level')
plt.title('Count of Attrition Flag by Education Level')
plt.xlabel('Attrition Flag')
plt.ylabel('Count')
plt.show() | code |
122262044/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
df.head() | code |
122262044/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
import seaborn as sns
sns.countplot(x='Attrition_Flag', data=df) | code |
122262044/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
plt.figure(figsize=(10, 6))
sns.kdeplot(data=df, x='Customer_Age', hue='Attrition_Flag', shade=True, alpha=0.8)
plt.title('Distribution of Age by Attrition Flag')
plt.xlabel('Age')
plt.ylabel('Density')
plt.show() | code |
122262044/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='Attrition_Flag', hue='Gender')
plt.title('Count of Attrition Flag by Gender')
plt.xlabel('Attrition Flag')
plt.ylabel('Count')
plt.show() | code |
122262044/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
df.corr() | code |
122262044/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
df.corr()
df.isnull().sum() | code |
122262044/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
df.corr()
df.isnull().sum()
df.head() | code |
122262044/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=42)
dt.fit(X_train, y_train)
y_predtrain_dt = cross_val_predict(estimator=dt, X=X_train, y=y_train, cv=10)
y_pred_dt = dt.predict(X_test)
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
y_predtrain_rf = cross_val_predict(estimator=rf, X=X_train, y=y_train, cv=10)
y_pred_rf = rf.predict(X_test)
print('Accuracy Score (Decision Tree):', accuracy_score(y_test, y_pred_dt))
print('Validation Report (Decision Tree):\n ', classification_report(y_train, y_predtrain_dt))
print('Evaluation Report (Decision Tree):\n', classification_report(y_test, y_pred_dt))
print('Confusion Matrix (Decision Tree):\n', confusion_matrix(y_test, y_pred_dt))
print('\nAccuracy Score (Random Forest):', accuracy_score(y_test, y_pred_rf))
print('Validation Report (Decision Tree):\n ', classification_report(y_train, y_predtrain_rf))
print('Evaluation Report (Random Forest):\n', classification_report(y_test, y_pred_rf))
print('Confusion Matrix (Random Forest):\n', confusion_matrix(y_test, y_pred_rf)) | code |
122262044/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
import seaborn as sns
sns.boxplot(x='Attrition_Flag', y='Total_Relationship_Count', data=df) | code |
122262044/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
plt.figure(figsize=(12, 8))
sns.heatmap(df.corr(), annot=True, cmap='coolwarm')
plt.title('Correlation Matrix')
plt.show() | code |
122262044/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
sns.set_style('whitegrid')
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df = df.rename(columns={'CLIENTNUM': 'Client_Num', 'Dependent_count': 'Dependent_Count', 'Months_on_book': 'Months_on_Book', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1': 'Naive_Bayes_Classifier_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2': 'Naive_Bayes_Classifier_2'})
import matplotlib.pyplot as plt
plt.hist(x='Customer_Age', data=df, bins=10)
plt.show() | code |
122262044/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv')
df.info() | code |
129016882/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
quartiles = dataset['year'].describe()[['min', '25%', '50%', '75%', 'max']]
print(quartiles) | code |
129016882/cell_9 | [
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
dataset.describe() | code |
129016882/cell_4 | [
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.head() | code |
129016882/cell_6 | [
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
print(f'number of duplicate entries : {dataset.duplicated().sum()}') | code |
129016882/cell_2 | [
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.head() | code |
129016882/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
print(value_counts_df) | code |
129016882/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore') | code |
129016882/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
print(f'number of records in dataset : {len(dataset)}') | code |
129016882/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
q1 = dataset['age'].quantile(0.25)
q3 = dataset['age'].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
outliers = dataset[(dataset['age'] < lower_bound) | (dataset['age'] > upper_bound)]
print('Outliers:\n', outliers) | code |
129016882/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
dataset.info() | code |
129016882/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
# pie chart to display percentage of each class of features
fig, ax = plt.subplots(2, 3, figsize = (12, 8))
ax[0][0].pie(dataset['released'].value_counts().values, labels=dataset['released'].value_counts().index, autopct='%1.1f%%')
ax[0][0].set_title('released counts')
ax[0][1].pie(dataset['colour'].value_counts().values, labels=dataset['colour'].value_counts().index, autopct='%1.1f%%')
ax[0][1].set_title('colour counts')
ax[0][2].pie(dataset['sex'].value_counts().values, labels=dataset['sex'].value_counts().index, autopct='%1.1f%%')
ax[0][2].set_title('sex counts')
ax[1][0].pie(dataset['employed'].value_counts().values, labels=dataset['employed'].value_counts().index, autopct='%1.1f%%')
ax[1][0].set_title('employed counts')
ax[1][1].pie(dataset['citizen'].value_counts().values, labels=dataset['citizen'].value_counts().index, autopct='%1.1f%%')
ax[1][1].set_title('citizen counts')
ax[1][2].axis('off')
plt.suptitle('value counts of each category')
plt.show()
plt.close()
sns.histplot(data=dataset, x='age')
plt.show()
plt.close() | code |
129016882/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
# pie chart to display percentage of each class of features
fig, ax = plt.subplots(2, 3, figsize = (12, 8))
ax[0][0].pie(dataset['released'].value_counts().values, labels=dataset['released'].value_counts().index, autopct='%1.1f%%')
ax[0][0].set_title('released counts')
ax[0][1].pie(dataset['colour'].value_counts().values, labels=dataset['colour'].value_counts().index, autopct='%1.1f%%')
ax[0][1].set_title('colour counts')
ax[0][2].pie(dataset['sex'].value_counts().values, labels=dataset['sex'].value_counts().index, autopct='%1.1f%%')
ax[0][2].set_title('sex counts')
ax[1][0].pie(dataset['employed'].value_counts().values, labels=dataset['employed'].value_counts().index, autopct='%1.1f%%')
ax[1][0].set_title('employed counts')
ax[1][1].pie(dataset['citizen'].value_counts().values, labels=dataset['citizen'].value_counts().index, autopct='%1.1f%%')
ax[1][1].set_title('citizen counts')
ax[1][2].axis('off')
plt.suptitle('value counts of each category')
plt.show()
plt.close()
plt.close()
sns.displot(data=dataset, x='age', kind='kde')
sns.rugplot(data=dataset, x='age')
plt.show()
plt.close() | code |
129016882/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset.head() | code |
129016882/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
# quartiles of year
quartiles = dataset['year'].describe()[['min', '25%', '50%', '75%', 'max']]
print(quartiles)
quartiles = dataset['age'].describe()[['min', '25%', '50%', '75%', 'max']]
print(quartiles) | code |
129016882/cell_14 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
# pie chart to display percentage of each class of features
fig, ax = plt.subplots(2, 3, figsize = (12, 8))
ax[0][0].pie(dataset['released'].value_counts().values, labels=dataset['released'].value_counts().index, autopct='%1.1f%%')
ax[0][0].set_title('released counts')
ax[0][1].pie(dataset['colour'].value_counts().values, labels=dataset['colour'].value_counts().index, autopct='%1.1f%%')
ax[0][1].set_title('colour counts')
ax[0][2].pie(dataset['sex'].value_counts().values, labels=dataset['sex'].value_counts().index, autopct='%1.1f%%')
ax[0][2].set_title('sex counts')
ax[1][0].pie(dataset['employed'].value_counts().values, labels=dataset['employed'].value_counts().index, autopct='%1.1f%%')
ax[1][0].set_title('employed counts')
ax[1][1].pie(dataset['citizen'].value_counts().values, labels=dataset['citizen'].value_counts().index, autopct='%1.1f%%')
ax[1][1].set_title('citizen counts')
ax[1][2].axis('off')
plt.suptitle('value counts of each category')
plt.show()
sns.boxplot(data=dataset, x='year')
plt.title('box plot for year feature')
plt.show()
plt.close() | code |
129016882/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
dataset.isna().sum()
duplicated_entries_df = dataset[dataset.duplicated()]
cat_cols = dataset.select_dtypes(include=['object']).columns.tolist()
value_counts_df = pd.DataFrame(columns=['Column Name', 'Value', 'Count'])
for col in cat_cols:
value_counts = dataset[col].value_counts().reset_index()
value_counts.columns = ['Value', 'Count']
value_counts['Column Name'] = col
value_counts_df = pd.concat([value_counts_df, value_counts], ignore_index=True)
fig, ax = plt.subplots(2, 3, figsize=(12, 8))
ax[0][0].pie(dataset['released'].value_counts().values, labels=dataset['released'].value_counts().index, autopct='%1.1f%%')
ax[0][0].set_title('released counts')
ax[0][1].pie(dataset['colour'].value_counts().values, labels=dataset['colour'].value_counts().index, autopct='%1.1f%%')
ax[0][1].set_title('colour counts')
ax[0][2].pie(dataset['sex'].value_counts().values, labels=dataset['sex'].value_counts().index, autopct='%1.1f%%')
ax[0][2].set_title('sex counts')
ax[1][0].pie(dataset['employed'].value_counts().values, labels=dataset['employed'].value_counts().index, autopct='%1.1f%%')
ax[1][0].set_title('employed counts')
ax[1][1].pie(dataset['citizen'].value_counts().values, labels=dataset['citizen'].value_counts().index, autopct='%1.1f%%')
ax[1][1].set_title('citizen counts')
ax[1][2].axis('off')
plt.suptitle('value counts of each category')
plt.show() | code |
129016882/cell_5 | [
"image_output_1.png"
] | import pandas as pd
dataset = pd.read_csv('/kaggle/input/arrests-for-marijuana-possession/Arrests.csv')
dataset.drop(dataset.columns[0], axis=1, inplace=True)
dataset['year'] = pd.to_datetime(dataset['year'], format='%Y').dt.year
print('number of empty records in each features : ')
dataset.isna().sum() | code |
18110097/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png"
] | import datetime as dt
import pandas as pd
def astype_cat(dd, cols):
for col in cols:
if isinstance(col, tuple):
col, idx1, idx2 = col
for idx in range(idx1, idx2 + 1):
full_col = col + str(idx)
dd[full_col] = dd[full_col].astype('category')
else:
dd[col] = dd[col].astype('category')
dd = pd.read_csv('../input/train_transaction.csv')
astype_cat(dd, ['ProductCD', ('card', 1, 6), 'addr1', 'addr2', 'P_emaildomain', 'R_emaildomain', ('M', 1, 9)])
ddid = pd.read_csv('../input/train_identity.csv')
astype_cat(ddid, ['DeviceType', 'DeviceInfo', ('id_', 12, 38)])
dd = dd.merge(ddid, 'left', 'TransactionID')
dd['datetime'] = dd['TransactionDT'].apply(lambda x: dt.timedelta(seconds=x) + pd.Timestamp('2017-11-30'))
del ddid
dd.head() | code |
18110097/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
from tqdm import tqdm_notebook as tqdm
from itertools import combinations
import seaborn as sns
from functools import partial
import os
print(os.listdir('../input')) | code |
18110097/cell_11 | [
"text_html_output_1.png"
] | from functools import partial
from itertools import combinations
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
import datetime as dt
import pandas as pd
import seaborn as sns
def astype_cat(dd, cols):
for col in cols:
if isinstance(col, tuple):
col, idx1, idx2 = col
for idx in range(idx1, idx2 + 1):
full_col = col + str(idx)
dd[full_col] = dd[full_col].astype('category')
else:
dd[col] = dd[col].astype('category')
dd = pd.read_csv('../input/train_transaction.csv')
astype_cat(dd, ['ProductCD', ('card', 1, 6), 'addr1', 'addr2', 'P_emaildomain', 'R_emaildomain', ('M', 1, 9)])
ddid = pd.read_csv('../input/train_identity.csv')
astype_cat(ddid, ['DeviceType', 'DeviceInfo', ('id_', 12, 38)])
dd = dd.merge(ddid, 'left', 'TransactionID')
dd['datetime'] = dd['TransactionDT'].apply(lambda x: dt.timedelta(seconds=x) + pd.Timestamp('2017-11-30'))
del ddid
cat_cols = dd.dtypes.loc[lambda x: x == 'category'].index
def calc_scores(score_func):
scores = []
for col1, col2 in tqdm(list(combinations(cat_cols, 2))):
score = score_func(dd[col1].cat.codes, dd[col2].cat.codes)
scores.append((col1, col2, score))
scores = pd.DataFrame(scores, columns=['col1', 'col2', 'score'])
scores_sym = pd.concat([scores, scores.rename(columns={'col1': 'col2', 'col2': 'col1'})])
return scores_sym
scores1 = calc_scores(partial(normalized_mutual_info_score, average_method='arithmetic'))
sns.clustermap(scores1.pivot('col1', 'col2', 'score').fillna(scores1['score'].max()), figsize=(15, 15))
scores2 = calc_scores(mutual_info_score)
sns.clustermap(scores2.pivot('col1', 'col2', 'score').fillna(scores2['score'].max()) ** (1 / 3), figsize=(15, 15))
display(scores2.sort_values('score', ascending=False).iloc[:20]) | code |
18110097/cell_8 | [
"text_html_output_1.png",
"image_output_1.png"
] | from functools import partial
from itertools import combinations
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
import datetime as dt
import pandas as pd
import seaborn as sns
def astype_cat(dd, cols):
for col in cols:
if isinstance(col, tuple):
col, idx1, idx2 = col
for idx in range(idx1, idx2 + 1):
full_col = col + str(idx)
dd[full_col] = dd[full_col].astype('category')
else:
dd[col] = dd[col].astype('category')
dd = pd.read_csv('../input/train_transaction.csv')
astype_cat(dd, ['ProductCD', ('card', 1, 6), 'addr1', 'addr2', 'P_emaildomain', 'R_emaildomain', ('M', 1, 9)])
ddid = pd.read_csv('../input/train_identity.csv')
astype_cat(ddid, ['DeviceType', 'DeviceInfo', ('id_', 12, 38)])
dd = dd.merge(ddid, 'left', 'TransactionID')
dd['datetime'] = dd['TransactionDT'].apply(lambda x: dt.timedelta(seconds=x) + pd.Timestamp('2017-11-30'))
del ddid
cat_cols = dd.dtypes.loc[lambda x: x == 'category'].index
def calc_scores(score_func):
scores = []
for col1, col2 in tqdm(list(combinations(cat_cols, 2))):
score = score_func(dd[col1].cat.codes, dd[col2].cat.codes)
scores.append((col1, col2, score))
scores = pd.DataFrame(scores, columns=['col1', 'col2', 'score'])
scores_sym = pd.concat([scores, scores.rename(columns={'col1': 'col2', 'col2': 'col1'})])
return scores_sym
scores1 = calc_scores(partial(normalized_mutual_info_score, average_method='arithmetic'))
sns.clustermap(scores1.pivot('col1', 'col2', 'score').fillna(scores1['score'].max()), figsize=(15, 15))
display(scores1.sort_values('score', ascending=False).iloc[:20]) | code |
18110097/cell_10 | [
"text_plain_output_1.png"
] | from itertools import combinations
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
import datetime as dt
import pandas as pd
def astype_cat(dd, cols):
for col in cols:
if isinstance(col, tuple):
col, idx1, idx2 = col
for idx in range(idx1, idx2 + 1):
full_col = col + str(idx)
dd[full_col] = dd[full_col].astype('category')
else:
dd[col] = dd[col].astype('category')
dd = pd.read_csv('../input/train_transaction.csv')
astype_cat(dd, ['ProductCD', ('card', 1, 6), 'addr1', 'addr2', 'P_emaildomain', 'R_emaildomain', ('M', 1, 9)])
ddid = pd.read_csv('../input/train_identity.csv')
astype_cat(ddid, ['DeviceType', 'DeviceInfo', ('id_', 12, 38)])
dd = dd.merge(ddid, 'left', 'TransactionID')
dd['datetime'] = dd['TransactionDT'].apply(lambda x: dt.timedelta(seconds=x) + pd.Timestamp('2017-11-30'))
del ddid
cat_cols = dd.dtypes.loc[lambda x: x == 'category'].index
def calc_scores(score_func):
scores = []
for col1, col2 in tqdm(list(combinations(cat_cols, 2))):
score = score_func(dd[col1].cat.codes, dd[col2].cat.codes)
scores.append((col1, col2, score))
scores = pd.DataFrame(scores, columns=['col1', 'col2', 'score'])
scores_sym = pd.concat([scores, scores.rename(columns={'col1': 'col2', 'col2': 'col1'})])
return scores_sym
scores2 = calc_scores(mutual_info_score) | code |
105213782/cell_42 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt #plotting
import numpy as np
import pandas as pd
import seaborn as sns #visualization
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
y_pred_proba = lr.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
plt.plot(fpr, tpr)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC for Logistic Regression')
plt.show() | code |
105213782/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt #plotting
import pandas as pd
import seaborn as sns #visualization
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
sns.countplot(data=data, x='stroke')
plt.show() | code |
105213782/cell_13 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
data['gender'] = label_encoder.fit_transform(data['gender'])
data['gender'].unique() | code |
105213782/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum() | code |
105213782/cell_56 | [
"image_output_1.png"
] | from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train) | code |
105213782/cell_34 | [
"text_html_output_1.png"
] | Y_test | code |
105213782/cell_33 | [
"text_html_output_1.png"
] | Y_train | code |
105213782/cell_76 | [
"image_output_1.png"
] | from sklearn.svm import SVC
from sklearn.svm import SVC
svm_classifier = SVC()
svm_classifier.fit(X_train, Y_train) | code |
105213782/cell_40 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score,accuracy_score #metrics
from sklearn.metrics import roc_auc_score, roc_curve #metrics
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
print('Accuracy:', accuracy_score(Y_test, Y_pred))
print('Precision', precision_score(Y_test, Y_pred))
print('Recall', recall_score(Y_test, Y_pred))
print('F1 score', f1_score(Y_test, Y_pred))
print('ROC score', roc_auc_score(Y_test, Y_pred)) | code |
105213782/cell_29 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
X = udata.iloc[:, :-1]
Y = udata.iloc[:, -1]
Y | code |
105213782/cell_26 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
udata | code |
105213782/cell_48 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
print('Prediction: {}'.format(prediction)) | code |
105213782/cell_72 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve #metrics
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt #plotting
import numpy as np
import pandas as pd
import seaborn as sns #visualization
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
y_pred_proba = lr.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
y_pred_proba = knn_classifier.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from matplotlib import pyplot as plt
dstree = DecisionTreeClassifier()
dstree.fit(X_train, Y_train)
dtree_prob = [0 for _ in range(len(Y_test))]
dstree = dstree.predict_proba(X_test)
dstree_prob = dt_classifier.predict_proba(X_test)
dstree_prob = dstree_prob[:, 1]
dtree_auc = roc_auc_score(Y_test, dtree_prob)
fpr, tpr, _ = roc_curve(Y_test, dstree_prob)
from sklearn.naive_bayes import GaussianNB
gnb_classifier = GaussianNB()
gnb_classifier.fit(X_train, Y_train)
Y_pred_gnb = gnb_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = gnb_classifier.predict(features)
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_curve, roc_auc_score
from matplotlib import pyplot as plt
nb = GaussianNB(var_smoothing=0.15)
nb.fit(X_train, Y_train)
n_prob = [0 for _ in range(len(Y_test))]
nb = nb.predict_proba(X_test)
nb_prob = gnb_classifier.predict_proba(X_test)
nb_prob = nb_prob[:, 1]
n_auc = roc_auc_score(Y_test, n_prob)
fpr, tpr, _ = roc_curve(Y_test, nb_prob)
plt.plot(fpr, tpr)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC curve For Naive Bayes')
plt.show() | code |
105213782/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum() | code |
105213782/cell_60 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score,accuracy_score #metrics
from sklearn.metrics import roc_auc_score, roc_curve #metrics
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
print('Accuracy:', accuracy_score(Y_test, Y_pred_dtc))
print('Precision', precision_score(Y_test, Y_pred_dtc))
print('Recall', recall_score(Y_test, Y_pred_dtc))
print('F1 score', f1_score(Y_test, Y_pred_dtc))
print('ROC score', roc_auc_score(Y_test, Y_pred_dtc)) | code |
105213782/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
data.info() | code |
105213782/cell_50 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score,accuracy_score #metrics
from sklearn.metrics import roc_auc_score, roc_curve #metrics
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
print('Accuracy:', accuracy_score(Y_test, Y_pred_knn))
print('Precision', precision_score(Y_test, Y_pred_knn))
print('Recall', recall_score(Y_test, Y_pred_knn))
print('F1 score', f1_score(Y_test, Y_pred_knn))
print('ROC score', roc_auc_score(Y_test, Y_pred_knn)) | code |
105213782/cell_52 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt #plotting
import numpy as np
import pandas as pd
import seaborn as sns #visualization
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
y_pred_proba = lr.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
y_pred_proba = knn_classifier.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
plt.plot(fpr, tpr)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC for KNN')
plt.show() | code |
105213782/cell_32 | [
"text_plain_output_1.png"
] | X_test | code |
105213782/cell_68 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
from sklearn.naive_bayes import GaussianNB
gnb_classifier = GaussianNB()
gnb_classifier.fit(X_train, Y_train)
Y_pred_gnb = gnb_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = gnb_classifier.predict(features)
print('Prediction: {}'.format(prediction)) | code |
105213782/cell_62 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve #metrics
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt #plotting
import numpy as np
import pandas as pd
import seaborn as sns #visualization
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
y_pred_proba = lr.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
y_pred_proba = knn_classifier.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(Y_test, y_pred_proba)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from matplotlib import pyplot as plt
dstree = DecisionTreeClassifier()
dstree.fit(X_train, Y_train)
dtree_prob = [0 for _ in range(len(Y_test))]
dstree = dstree.predict_proba(X_test)
dstree_prob = dt_classifier.predict_proba(X_test)
dstree_prob = dstree_prob[:, 1]
dtree_auc = roc_auc_score(Y_test, dtree_prob)
fpr, tpr, _ = roc_curve(Y_test, dstree_prob)
plt.plot(fpr, tpr)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC curve for Decision Tree')
plt.show() | code |
105213782/cell_58 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
print('Prediction: {}'.format(prediction)) | code |
105213782/cell_28 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
X = udata.iloc[:, :-1]
Y = udata.iloc[:, -1]
X | code |
105213782/cell_78 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=3, metric='minkowski', p=3)
knn_classifier.fit(X_train, Y_train)
Y_pred_knn = knn_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = knn_classifier.predict(features)
from sklearn import tree
dt_classifier = DecisionTreeClassifier()
dt_classifier.fit(X_train, Y_train)
Y_pred_dtc = dt_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = dt_classifier.predict(features)
from sklearn.naive_bayes import GaussianNB
gnb_classifier = GaussianNB()
gnb_classifier.fit(X_train, Y_train)
Y_pred_gnb = gnb_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = gnb_classifier.predict(features)
from sklearn.svm import SVC
svm_classifier = SVC()
svm_classifier.fit(X_train, Y_train)
Y_pred_svm = svm_classifier.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = svm_classifier.predict(features)
print('Prediction: {}'.format(prediction)) | code |
105213782/cell_15 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
data['gender'] = label_encoder.fit_transform(data['gender'])
data['gender'].unique()
data['ever_married'] = label_encoder.fit_transform(data['ever_married'])
data['ever_married'].unique()
data['work_type'] = label_encoder.fit_transform(data['work_type'])
data['work_type'].unique() | code |
105213782/cell_16 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
data['gender'] = label_encoder.fit_transform(data['gender'])
data['gender'].unique()
data['ever_married'] = label_encoder.fit_transform(data['ever_married'])
data['ever_married'].unique()
data['work_type'] = label_encoder.fit_transform(data['work_type'])
data['work_type'].unique()
data['Residence_type'] = label_encoder.fit_transform(data['Residence_type'])
data['Residence_type'].unique() | code |
105213782/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
strokes = len(data[data['stroke'] == 1])
no_strokes = data[data.stroke == 0].index
random_indices = np.random.choice(no_strokes, strokes, replace=False)
stroke_indices = data[data.stroke == 1].index
under_sample_indices = np.concatenate([stroke_indices, random_indices])
udata = data.loc[under_sample_indices]
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, Y_train)
Y_pred = lr.predict(X_test)
features = np.array([[0, 78, 0, 0, 1, 3, 0, 60, 28.8, 1]])
prediction = lr.predict(features)
print('Prediction: {}'.format(prediction)) | code |
105213782/cell_17 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
data = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv')
data.drop(['id'], axis=1, inplace=True)
data.isnull().sum()
data.isnull().sum()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
data['gender'] = label_encoder.fit_transform(data['gender'])
data['gender'].unique()
data['ever_married'] = label_encoder.fit_transform(data['ever_married'])
data['ever_married'].unique()
data['work_type'] = label_encoder.fit_transform(data['work_type'])
data['work_type'].unique()
data['Residence_type'] = label_encoder.fit_transform(data['Residence_type'])
data['Residence_type'].unique()
data['smoking_status'] = label_encoder.fit_transform(data['smoking_status'])
data['smoking_status'].unique() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.