path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34142232/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pm25-mean-annual-exposure/PM25_MAE.csv', index_col=0) df = df.drop(['Country Code', 'Indicator Name', 'Indicator Code'], axis=1) df = df.dropna(thresh=10, axis=1) before = df.shape[0] na_free = df.dropna(thresh=10, axis=0) only_na = df[~df.index.isin(na_free.index)] after = na_free.shape[0] namelist = only_na.index df = na_free.transpose() import matplotlib.pyplot as plt import datetime as dt latest = df.tail(1).transpose() latest
code
34142232/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pm25-mean-annual-exposure/PM25_MAE.csv', index_col=0) df = df.drop(['Country Code', 'Indicator Name', 'Indicator Code'], axis=1) df = df.dropna(thresh=10, axis=1) before = df.shape[0] na_free = df.dropna(thresh=10, axis=0) only_na = df[~df.index.isin(na_free.index)] after = na_free.shape[0] namelist = only_na.index df = na_free.transpose() import matplotlib.pyplot as plt import datetime as dt df.plot(y='World')
code
34142232/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pm25-mean-annual-exposure/PM25_MAE.csv', index_col=0) df = df.drop(['Country Code', 'Indicator Name', 'Indicator Code'], axis=1) df = df.dropna(thresh=10, axis=1) before = df.shape[0] na_free = df.dropna(thresh=10, axis=0) only_na = df[~df.index.isin(na_free.index)] after = na_free.shape[0] print(str(before - after) + " countries don't have any PM2.5 data reported:") namelist = only_na.index for name in namelist: print(name)
code
34142232/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pm25-mean-annual-exposure/PM25_MAE.csv', index_col=0) df = df.drop(['Country Code', 'Indicator Name', 'Indicator Code'], axis=1) df = df.dropna(thresh=10, axis=1) before = df.shape[0] na_free = df.dropna(thresh=10, axis=0) only_na = df[~df.index.isin(na_free.index)] after = na_free.shape[0] namelist = only_na.index df = na_free.transpose() import matplotlib.pyplot as plt import datetime as dt df[['World', 'China']].plot(kind='bar')
code
34142232/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34142232/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pm25-mean-annual-exposure/PM25_MAE.csv', index_col=0) df = df.drop(['Country Code', 'Indicator Name', 'Indicator Code'], axis=1) df = df.dropna(thresh=10, axis=1) before = df.shape[0] na_free = df.dropna(thresh=10, axis=0) only_na = df[~df.index.isin(na_free.index)] after = na_free.shape[0] namelist = only_na.index df = na_free.transpose() import matplotlib.pyplot as plt import datetime as dt #fetch the latest data for all countries latest = df.tail(1).transpose() latest latest['2017'].plot(kind='pie')
code
2036047/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): print('The column %s only has one unique value with %r.' % (c, single_val_c[c])) print('It does work for the classification, which will be removed.') feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy()
code
2036047/cell_25
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.linear_model import RidgeClassifier, LogisticRegressionCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder, PolynomialFeatures from time import time import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) def grid_search(base_model, param_grid, X_train, y_train): gs_c = GridSearchCV(base_model, param_grid=param_grid, n_jobs=-1, cv=3) gs_c.fit(X_train, y_train) return gs_c def ridge_model(X_train, y_train): r_c = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', RidgeClassifier(random_state=1))]) params_pool = dict(poly__degree=[2], clf__alpha=[0.01, 0.03, 0.1, 0.3, 1]) return grid_search(r_c, params_pool, X_train, y_train) def randomForest_model(X_train, y_train): rf_c = RandomForestClassifier(random_state=1) params_pool = dict(max_depth=[5, 7, 9], max_features=[0.3, 0.5], n_estimators=[12, 20, 36, 50]) return grid_search(rf_c, params_pool, X_train, y_train) def gaussianNB_model(X_train, y_train): gnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', GaussianNB())]) gnb.fit(X_train, y_train) return gnb def multinomialNB_model(X_train, y_train): mnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', MultinomialNB(alpha=1e-05))]) mnb.fit(X_train, y_train) return mnb def do_model_train(model_name, X_train, y_train, X_test, y_test): bg = time() if 'Ridge' == model_name: model = ridge_model(X_train, y_train) elif 'RandomForest' == model_name: model = randomForest_model(X_train, y_train) elif 'GaussianNB' == model_name: model = gaussianNB_model(X_train, y_train) elif 'multinomialNB' == model_name: model = multinomialNB_model(X_train, y_train) y_hat = model.predict(X_train) y_hat = model.predict(X_test) def clean_data_df(df, threshold=0.02): feature_convert = dict() for col, sub in stats_df.groupby('col'): ns = sub[sub.quantity_percentage < threshold] n_ns = sub[sub.quantity_percentage >= threshold] for idx in ns.index: if ns.loc[idx, 'positive_percentage'] > 0.5: p_n_ns = n_ns[n_ns.positive_percentage > 0.5] if not p_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], p_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = p_n_ns['val'].values[0] else: n_n_ns = n_ns[n_ns.positive_percentage <= 0.5] if not n_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], n_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = n_n_ns['val'].values[0] return (pd.get_dummies(data=df, columns=feature_columns, prefix=feature_columns), feature_convert) cleaned_df, feature_convert = clean_data_df(data_df.copy()) X_all, y_all = (cleaned_df[[c for c in cleaned_df.columns if c != 'y']], cleaned_df['y']) X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.33, random_state=1) do_model_train('RandomForest', X_train, y_train, X_test, y_test)
code
2036047/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('../input/mushrooms.csv') data_df.info()
code
2036047/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) def clean_data_df(df, threshold=0.02): feature_convert = dict() for col, sub in stats_df.groupby('col'): ns = sub[sub.quantity_percentage < threshold] n_ns = sub[sub.quantity_percentage >= threshold] for idx in ns.index: if ns.loc[idx, 'positive_percentage'] > 0.5: p_n_ns = n_ns[n_ns.positive_percentage > 0.5] if not p_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], p_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = p_n_ns['val'].values[0] else: n_n_ns = n_ns[n_ns.positive_percentage <= 0.5] if not n_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], n_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = n_n_ns['val'].values[0] return (pd.get_dummies(data=df, columns=feature_columns, prefix=feature_columns), feature_convert) cleaned_df, feature_convert = clean_data_df(data_df.copy()) cleaned_df.info()
code
2036047/cell_20
[ "text_html_output_10.png", "text_html_output_16.png", "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_15.png", "text_html_output_5.png", "text_html_output_14.png", "text_html_output_19.png", "text_html_output_9.png", "text_html_output_13.png", "text_html_output_20.png", "text_html_output_21.png", "text_html_output_1.png", "text_html_output_17.png", "text_html_output_18.png", "text_html_output_12.png", "text_html_output_11.png", "text_html_output_8.png", "text_html_output_3.png", "text_html_output_7.png" ]
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.linear_model import RidgeClassifier, LogisticRegressionCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder, PolynomialFeatures from time import time import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) def grid_search(base_model, param_grid, X_train, y_train): gs_c = GridSearchCV(base_model, param_grid=param_grid, n_jobs=-1, cv=3) gs_c.fit(X_train, y_train) return gs_c def ridge_model(X_train, y_train): r_c = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', RidgeClassifier(random_state=1))]) params_pool = dict(poly__degree=[2], clf__alpha=[0.01, 0.03, 0.1, 0.3, 1]) return grid_search(r_c, params_pool, X_train, y_train) def randomForest_model(X_train, y_train): rf_c = RandomForestClassifier(random_state=1) params_pool = dict(max_depth=[5, 7, 9], max_features=[0.3, 0.5], n_estimators=[12, 20, 36, 50]) return grid_search(rf_c, params_pool, X_train, y_train) def gaussianNB_model(X_train, y_train): gnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', GaussianNB())]) gnb.fit(X_train, y_train) return gnb def multinomialNB_model(X_train, y_train): mnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', MultinomialNB(alpha=1e-05))]) mnb.fit(X_train, y_train) return mnb def do_model_train(model_name, X_train, y_train, X_test, y_test): bg = time() if 'Ridge' == model_name: model = ridge_model(X_train, y_train) elif 'RandomForest' == model_name: model = randomForest_model(X_train, y_train) elif 'GaussianNB' == model_name: model = gaussianNB_model(X_train, y_train) elif 'multinomialNB' == model_name: model = multinomialNB_model(X_train, y_train) y_hat = model.predict(X_train) y_hat = model.predict(X_test) X_all, y_all = (data_all[[c for c in data_all.columns if c != 'y']], data_all['y']) X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.33, random_state=1) do_model_train('RandomForest', X_train, y_train, X_test, y_test)
code
2036047/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) pyo.iplot(fig) stats_df = pd.concat(stats_df, axis=0)
code
2036047/cell_8
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) stats_df.describe()
code
2036047/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) data_all.info()
code
2036047/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output np.set_printoptions(suppress=True, linewidth=300) pd.options.display.float_format = lambda x: '%0.6f' % x pyo.init_notebook_mode(connected=True) print(check_output(['ls', '../input']).decode('utf-8'))
code
2036047/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) data_all.head()
code
2036047/cell_24
[ "text_plain_output_1.png" ]
from pprint import pprint import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) def clean_data_df(df, threshold=0.02): feature_convert = dict() for col, sub in stats_df.groupby('col'): ns = sub[sub.quantity_percentage < threshold] n_ns = sub[sub.quantity_percentage >= threshold] for idx in ns.index: if ns.loc[idx, 'positive_percentage'] > 0.5: p_n_ns = n_ns[n_ns.positive_percentage > 0.5] if not p_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], p_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = p_n_ns['val'].values[0] else: n_n_ns = n_ns[n_ns.positive_percentage <= 0.5] if not n_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], n_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = n_n_ns['val'].values[0] return (pd.get_dummies(data=df, columns=feature_columns, prefix=feature_columns), feature_convert) cleaned_df, feature_convert = clean_data_df(data_df.copy()) pprint(feature_convert)
code
2036047/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_df.info()
code
2036047/cell_27
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.linear_model import RidgeClassifier, LogisticRegressionCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder, PolynomialFeatures from time import time import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_df = data_df[feature_columns + ['y']].copy() data_df = pd.concat((data_df, data_df), axis=0, ignore_index=True) data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) def grid_search(base_model, param_grid, X_train, y_train): gs_c = GridSearchCV(base_model, param_grid=param_grid, n_jobs=-1, cv=3) gs_c.fit(X_train, y_train) return gs_c def ridge_model(X_train, y_train): r_c = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', RidgeClassifier(random_state=1))]) params_pool = dict(poly__degree=[2], clf__alpha=[0.01, 0.03, 0.1, 0.3, 1]) return grid_search(r_c, params_pool, X_train, y_train) def randomForest_model(X_train, y_train): rf_c = RandomForestClassifier(random_state=1) params_pool = dict(max_depth=[5, 7, 9], max_features=[0.3, 0.5], n_estimators=[12, 20, 36, 50]) return grid_search(rf_c, params_pool, X_train, y_train) def gaussianNB_model(X_train, y_train): gnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', GaussianNB())]) gnb.fit(X_train, y_train) return gnb def multinomialNB_model(X_train, y_train): mnb = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', MultinomialNB(alpha=1e-05))]) mnb.fit(X_train, y_train) return mnb def do_model_train(model_name, X_train, y_train, X_test, y_test): bg = time() if 'Ridge' == model_name: model = ridge_model(X_train, y_train) elif 'RandomForest' == model_name: model = randomForest_model(X_train, y_train) elif 'GaussianNB' == model_name: model = gaussianNB_model(X_train, y_train) elif 'multinomialNB' == model_name: model = multinomialNB_model(X_train, y_train) y_hat = model.predict(X_train) y_hat = model.predict(X_test) X_all, y_all = (data_all[[c for c in data_all.columns if c != 'y']], data_all['y']) X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.33, random_state=1) do_model_train('RandomForest', X_train, y_train, X_test, y_test) def clean_data_df(df, threshold=0.02): feature_convert = dict() for col, sub in stats_df.groupby('col'): ns = sub[sub.quantity_percentage < threshold] n_ns = sub[sub.quantity_percentage >= threshold] for idx in ns.index: if ns.loc[idx, 'positive_percentage'] > 0.5: p_n_ns = n_ns[n_ns.positive_percentage > 0.5] if not p_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], p_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = p_n_ns['val'].values[0] else: n_n_ns = n_ns[n_ns.positive_percentage <= 0.5] if not n_n_ns.empty: feature_convert.setdefault(col, []).append((ns.loc[idx, 'val'], n_n_ns['val'].values[0])) df.loc[df[col] == ns.loc[idx, 'val'], col] = n_n_ns['val'].values[0] return (pd.get_dummies(data=df, columns=feature_columns, prefix=feature_columns), feature_convert) cleaned_df, feature_convert = clean_data_df(data_df.copy()) def get_features_importance(x, y): rf = RandomForestClassifier(n_estimators=500, class_weight={0: 1, 1: 1 / np.sqrt(np.mean(y))}, max_features=0.75, n_jobs=-1, random_state=1) rf.fit(x, y) feature_importance = pd.DataFrame(data={'columns': x.columns, 'importance': rf.feature_importances_}) feature_importance.sort_values(by='importance', axis=0, ascending=False, inplace=True) feature_importance.loc[:, 'cum_importance'] = feature_importance.importance.cumsum() return feature_importance def get_features_corr(df, ycol): corr_y = df.corr()[ycol].map(np.abs) corr_y = corr_y[[c for c in corr_y.index if c != ycol]] corr_y = corr_y / corr_y.sum() feature_importance = pd.DataFrame(data={'columns': corr_y.index.values, 'importance': corr_y.values}) feature_importance.sort_values(by='importance', axis=0, ascending=False, inplace=True) feature_importance.loc[:, 'cum_importance'] = feature_importance.importance.cumsum() return feature_importance data_all = pd.get_dummies(data=data_df, columns=feature_columns, prefix=feature_columns) X_all, y_all = (data_all[[c for c in data_all.columns if c != 'y']], data_all['y']) fi = get_features_importance(X_all, y_all) bg = time() accuracyScores, precisionScores, recallScores, f1Scores = ([], [], [], []) for i in range(len(fi)): cols = fi.iloc[:i + 1]['columns'].values model = Pipeline([('poly', PolynomialFeatures(interaction_only=True, degree=2)), ('clf', GaussianNB())]) model.fit(X_all[cols], y_all) y_p = model.predict(X_all[cols]) accuracyScores.append(accuracy_score(y_true=y_all, y_pred=y_p)) precisionScores.append(precision_score(y_true=y_all, y_pred=y_p)) recallScores.append(recall_score(y_true=y_all, y_pred=y_p)) f1Scores.append(f1_score(y_true=y_all, y_pred=y_p)) if accuracyScores[-1] == 1: break print() print('It took %.3f seconds.' % (time() - bg)) traces = [go.Scatter(x=np.arange(len(fi)) + 1, y=fi['cum_importance'][:i + 1], mode='markers+lines', name='importance', text=fi['columns']), go.Scatter(x=np.arange(len(fi)) + 1, y=accuracyScores, mode='markers+lines', name='accuracy Score'), go.Scatter(x=np.arange(len(fi)) + 1, y=precisionScores, mode='markers+lines', name='precision Score'), go.Scatter(x=np.arange(len(fi)) + 1, y=recallScores, mode='markers+lines', name='recall Score'), go.Scatter(x=np.arange(len(fi)) + 1, y=f1Scores, mode='markers+lines', name='F1 Score')] layout = go.Layout(title='Feature importance/accuracy/precision/recall/F1 Score on different number of features', xaxis=dict(title='number of features'), yaxis=dict(title='importance/accuracy/precision/recall/F1 Score')) pyo.iplot(go.Figure(data=traces, layout=layout))
code
2036047/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('../input/mushrooms.csv') data_df.head()
code
32070789/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from IPython import display from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys letters = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' backgrounds = np.array(f[keys[0]]) labels = np.array(f[keys[2]]) images = np.array(f[keys[1]]) / 255 gray_images = np.dot(images[..., :3], [0.299, 0.587, 0.114]) rn = np.random.randint(10000) pl.xticks([]) pl.yticks([]) gray_images = gray_images.reshape(-1, 32, 32, 1) cbackgrounds, clabels = (ohe(backgrounds), ohe(labels)) ctargets = np.concatenate((clabels, cbackgrounds), axis=1) pd.DataFrame([clabels.shape, cbackgrounds.shape, ctargets.shape]) x_train1, x_valid1, x_test1, y_train1, y_valid1, y_test1 = tts(gray_images, clabels) x_train2, x_valid2, x_test2, y_train2, y_valid2, y_test2 = tts(gray_images, ctargets) y_train2_list = [y_train2[:, :33], y_train2[:, 33:]] y_test2_list = [y_test2[:, :33], y_test2[:, 33:]] y_valid2_list = [y_valid2[:, :33], y_valid2[:, 33:]] def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) def gray_multi_model(): model_input = Input(shape=(32, 32, 1)) x = BatchNormalization()(model_input) x = Conv2D(32, (5, 5), padding='same')(model_input) x = LeakyReLU(alpha=0.02)(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.25)(x) x = Conv2D(256, (5, 5), padding='same')(x) x = LeakyReLU(alpha=0.02)(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.25)(x) x = GlobalMaxPooling2D()(x) x = Dense(1024)(x) x = LeakyReLU(alpha=0.02)(x) x = Dropout(0.25)(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.02)(x) x = Dropout(0.025)(x) y1 = Dense(33, activation='softmax')(x) y2 = Dense(4, activation='softmax')(x) model = Model(inputs=model_input, outputs=[y1, y2]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_multi_model = gray_multi_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=2, factor=0.75) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_multi_model.fit(x_train2, y_train2_list, epochs=200, batch_size=128, verbose=2, validation_data=(x_valid2, y_valid2_list), callbacks=[checkpointer, lr_reduction, estopping]) gray_multi_model.evaluate(x_test2, y_test2_list)
code
32070789/cell_13
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) history_plot(history) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1)
code
32070789/cell_20
[ "text_plain_output_1.png" ]
from IPython import display from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys letters = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' backgrounds = np.array(f[keys[0]]) labels = np.array(f[keys[2]]) images = np.array(f[keys[1]]) / 255 gray_images = np.dot(images[..., :3], [0.299, 0.587, 0.114]) rn = np.random.randint(10000) pl.xticks([]) pl.yticks([]) gray_images = gray_images.reshape(-1, 32, 32, 1) cbackgrounds, clabels = (ohe(backgrounds), ohe(labels)) ctargets = np.concatenate((clabels, cbackgrounds), axis=1) pd.DataFrame([clabels.shape, cbackgrounds.shape, ctargets.shape]) x_train1, x_valid1, x_test1, y_train1, y_valid1, y_test1 = tts(gray_images, clabels) x_train2, x_valid2, x_test2, y_train2, y_valid2, y_test2 = tts(gray_images, ctargets) y_train2_list = [y_train2[:, :33], y_train2[:, 33:]] y_test2_list = [y_test2[:, :33], y_test2[:, 33:]] y_valid2_list = [y_valid2[:, :33], y_valid2[:, 33:]] def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) def gray_multi_model(): model_input = Input(shape=(32, 32, 1)) x = BatchNormalization()(model_input) x = Conv2D(32, (5, 5), padding='same')(model_input) x = LeakyReLU(alpha=0.02)(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.25)(x) x = Conv2D(256, (5, 5), padding='same')(x) x = LeakyReLU(alpha=0.02)(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.25)(x) x = GlobalMaxPooling2D()(x) x = Dense(1024)(x) x = LeakyReLU(alpha=0.02)(x) x = Dropout(0.25)(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.02)(x) x = Dropout(0.025)(x) y1 = Dense(33, activation='softmax')(x) y2 = Dense(4, activation='softmax')(x) model = Model(inputs=model_input, outputs=[y1, y2]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_multi_model = gray_multi_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=2, factor=0.75) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_multi_model.fit(x_train2, y_train2_list, epochs=200, batch_size=128, verbose=2, validation_data=(x_valid2, y_valid2_list), callbacks=[checkpointer, lr_reduction, estopping])
code
32070789/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5'
code
32070789/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys letters = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' backgrounds = np.array(f[keys[0]]) labels = np.array(f[keys[2]]) images = np.array(f[keys[1]]) / 255 gray_images = np.dot(images[..., :3], [0.299, 0.587, 0.114]) rn = np.random.randint(10000) pl.figure(figsize=(2, 3)) pl.title('Label: %s \n' % letters[labels[rn] - 1] + 'Background: %s' % backgrounds[rn], fontsize=18) pl.imshow(gray_images[rn], cmap=pl.cm.bone) pl.xticks([]) pl.yticks([]) pl.show() gray_images = gray_images.reshape(-1, 32, 32, 1)
code
32070789/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from IPython import display from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys letters = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' backgrounds = np.array(f[keys[0]]) labels = np.array(f[keys[2]]) images = np.array(f[keys[1]]) / 255 gray_images = np.dot(images[..., :3], [0.299, 0.587, 0.114]) rn = np.random.randint(10000) pl.xticks([]) pl.yticks([]) gray_images = gray_images.reshape(-1, 32, 32, 1) cbackgrounds, clabels = (ohe(backgrounds), ohe(labels)) ctargets = np.concatenate((clabels, cbackgrounds), axis=1) display.display(pd.DataFrame([labels[97:103], clabels[97:103]]).T) pd.DataFrame([clabels.shape, cbackgrounds.shape, ctargets.shape])
code
32070789/cell_15
[ "text_html_output_2.png", "text_html_output_1.png" ]
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1) steps, epochs = (1000, 10) igen = ImageDataGenerator(zoom_range=0.3, shear_range=0.3, rotation_range=30) generator = gray_model.fit_generator(igen.flow(x_train1, y_train1, batch_size=64), steps_per_epoch=steps, epochs=epochs, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction]) history_plot(generator) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1)
code
32070789/cell_17
[ "text_plain_output_1.png" ]
from IPython import display from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys letters = u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' backgrounds = np.array(f[keys[0]]) labels = np.array(f[keys[2]]) images = np.array(f[keys[1]]) / 255 gray_images = np.dot(images[..., :3], [0.299, 0.587, 0.114]) rn = np.random.randint(10000) pl.xticks([]) pl.yticks([]) gray_images = gray_images.reshape(-1, 32, 32, 1) cbackgrounds, clabels = (ohe(backgrounds), ohe(labels)) ctargets = np.concatenate((clabels, cbackgrounds), axis=1) pd.DataFrame([clabels.shape, cbackgrounds.shape, ctargets.shape]) def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1) steps, epochs = (1000, 10) igen = ImageDataGenerator(zoom_range=0.3, shear_range=0.3, rotation_range=30) generator = gray_model.fit_generator(igen.flow(x_train1, y_train1, batch_size=64), steps_per_epoch=steps, epochs=epochs, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction]) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1) py_test1 = gray_model.predict_classes(x_test1) fig = pl.figure(figsize=(12, 12)) for i, idx in enumerate(np.random.choice(x_test1.shape[0], size=16, replace=False)): ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[]) ax.imshow(np.squeeze(x_test1[idx]), cmap=pl.cm.bone) pred_idx = py_test1[idx] true_idx = np.argmax(y_test1[idx]) ax.set_title('{} ({})'.format(letters[pred_idx], letters[true_idx]), color='darkblue' if pred_idx == true_idx else 'darkred')
code
32070789/cell_14
[ "image_output_1.png" ]
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping]) gray_model.load_weights(fw) gray_model.evaluate(x_test1, y_test1) steps, epochs = (1000, 10) igen = ImageDataGenerator(zoom_range=0.3, shear_range=0.3, rotation_range=30) generator = gray_model.fit_generator(igen.flow(x_train1, y_train1, batch_size=64), steps_per_epoch=steps, epochs=epochs, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction])
code
32070789/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping from keras.layers import Conv2D,MaxPooling2D,GlobalMaxPooling2D from keras.layers import Dense,LSTM,GlobalAveragePooling1D,GlobalAveragePooling2D from keras.layers import Input,Activation,Flatten,Dropout,BatchNormalization from keras.layers.advanced_activations import PReLU,LeakyReLU from keras.metrics import top_k_categorical_accuracy,categorical_accuracy from keras.models import Sequential,load_model,Model import warnings warnings.filterwarnings('ignore') import numpy as np, pandas as pd, pylab as pl, h5py from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from IPython import display from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.metrics import top_k_categorical_accuracy, categorical_accuracy from keras.models import Sequential, load_model, Model from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.layers import Input, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D fw = 'weights.best.letters.hdf5' def top_3_categorical_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def gray_model(): model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train1.shape[1:])) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (5, 5))) model.add(LeakyReLU(alpha=0.02)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(GlobalMaxPooling2D()) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(LeakyReLU(alpha=0.02)) model.add(Dropout(0.25)) model.add(Dense(33)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=[categorical_accuracy, top_3_categorical_accuracy]) return model gray_model = gray_model() checkpointer = ModelCheckpoint(filepath=fw, verbose=2, save_best_only=True) lr_reduction = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=2, factor=0.5) estopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2) history = gray_model.fit(x_train1, y_train1, epochs=200, batch_size=64, verbose=2, validation_data=(x_valid1, y_valid1), callbacks=[checkpointer, lr_reduction, estopping])
code
32070789/cell_5
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder def history_plot(fit_history): keys = list(fit_history.history.keys())[0:4] def ohe(x): return OneHotEncoder(categories='auto').fit(x.reshape(-1, 1)).transform(x.reshape(-1, 1)).toarray().astype('int64') def tts(X, y): x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) n = int(len(x_test) / 2) x_valid, y_valid = (x_test[:n], y_test[:n]) x_test, y_test = (x_test[n:], y_test[n:]) return (x_train, x_valid, x_test, y_train, y_valid, y_test) f = h5py.File('../input/LetterColorImages_123.h5', 'r') keys = list(f.keys()) keys
code
16130893/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) plt.imshow(X[1], interpolation='bicubic') (plt.xticks([]), plt.yticks([])) plt.show() print(y[1])
code
16130893/cell_6
[ "image_output_1.png" ]
from IPython.display import Image from tensorflow.keras import layers import tensorflow as tf def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) tf.keras.utils.plot_model(model, 'model.png', show_shapes=True) return model model = gen_model() Image('model.png')
code
16130893/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) print(y.shape) print(y[0])
code
16130893/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split print(os.listdir('../input/utkface_aligned_cropped/'))
code
16130893/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from IPython.display import Image from sklearn.model_selection import train_test_split from tensorflow.keras import layers import cv2 import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) (plt.xticks([]), plt.yticks([])) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) y_train = [y_train[:, 1], y_train[:, 0]] y_valid = [y_valid[:, 1], y_valid[:, 0]] def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) return model model = gen_model() Image('model.png') model.summary() model.fit(X_train, y_train, epochs=200, batch_size=120, validation_data=(X_valid, y_valid))
code
16130893/cell_8
[ "text_plain_output_1.png" ]
from IPython.display import Image from sklearn.model_selection import train_test_split from tensorflow.keras import layers import cv2 import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) (plt.xticks([]), plt.yticks([])) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) y_train = [y_train[:, 1], y_train[:, 0]] y_valid = [y_valid[:, 1], y_valid[:, 0]] def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) return model model = gen_model() Image('model.png') model.summary() model.fit(X_train, y_train, epochs=200, batch_size=120, validation_data=(X_valid, y_valid)) print(y_valid[0][24], y_valid[1][24]) print(model.predict([[X_valid[24]]]))
code
16130893/cell_3
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape
code
90130201/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df stroke_df_copy = stroke_df.copy() pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].gender, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True) pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].smoking_status, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True)
code
90130201/cell_13
[ "text_html_output_1.png" ]
201 / 5110
code
90130201/cell_9
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape
code
90130201/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df stroke_df_copy = stroke_df.copy() pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].gender, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True)
code
90130201/cell_11
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape stroke_df.describe()
code
90130201/cell_18
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df for i in ['gender', 'hypertension', 'heart_disease', 'ever_married', 'work_type', 'Residence_type', 'smoking_status', 'stroke']: print(stroke_df.loc[stroke_df['bmi'].isna()][i].value_counts(), '\n')
code
90130201/cell_8
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.head(5)
code
90130201/cell_15
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df stroke_df.loc[stroke_df['bmi'].isna()].describe()
code
90130201/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df for i in ['gender', 'hypertension', 'heart_disease', 'ever_married', 'work_type', 'Residence_type', 'smoking_status', 'stroke']: print(stroke_df[i].value_counts(), '\n')
code
90130201/cell_22
[ "text_html_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df stroke_df_copy = stroke_df.copy() pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].gender, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True) pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].smoking_status, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True) pd.crosstab(stroke_df.loc[stroke_df['bmi'].isna()].hypertension, stroke_df.loc[stroke_df['bmi'].isna()].stroke, margins=True)
code
90130201/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd stroke_df = pd.read_csv('../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') stroke_df.shape na_count = [] for i in range(0, len(stroke_df.columns)): na_count.append(stroke_df[stroke_df.columns[i]].isna().sum()) na_df = pd.DataFrame(zip(stroke_df.columns, na_count)) na_df.columns = ['Variable', 'Counts (NA)'] na_df
code
105174236/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) ml_df.head()
code
105174236/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.describe()
code
105174236/cell_23
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) lin_regr = linear_model.LinearRegression() lin_regr.fit(X_train, y_train) lw = lin_regr.coef_ lb = lin_regr.intercept_ ridge = linear_model.Ridge(alpha=0.5) ridge.fit(X_train, y_train) rw = ridge.coef_ rb = ridge.intercept_ print(f'The coefficient vector W is: {rw}') print(f'The intercept b is: {rb}')
code
105174236/cell_20
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) lin_regr = linear_model.LinearRegression() lin_regr.fit(X_train, y_train) lw = lin_regr.coef_ lb = lin_regr.intercept_ print(f'The coefficient vector W is: {lw}') print(f'The intercept b is: {lb}')
code
105174236/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.head()
code
105174236/cell_11
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) df.head()
code
105174236/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105174236/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') print(df.columns)
code
105174236/cell_18
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) print(f'The size of training set is: {X_train.shape}x{y_train.shape}') print(f'The size of testing set is: {X_test.shape}x{y_test.shape}')
code
105174236/cell_28
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) lin_regr = linear_model.LinearRegression() lin_regr.fit(X_train, y_train) lw = lin_regr.coef_ lb = lin_regr.intercept_ ridge = linear_model.Ridge(alpha=0.5) ridge.fit(X_train, y_train) rw = ridge.coef_ rb = ridge.intercept_ lw = np.array(lw).reshape(lw.shape[0], 1).reshape(-1) rw = np.array(rw).reshape(rw.shape[0], 1).reshape(-1) coefs = pd.DataFrame(data=np.array([lw, rw, [lb] * lw.shape[0], [rb] * lw.shape[0]]).T, columns=['Linear_W', 'Ridge_W', 'Linear_b', 'Ridge_b']) coefs.describe()
code
105174236/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.info()
code
105174236/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) ml_df.head()
code
105174236/cell_16
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] print(f'The size of input is: {X.shape}') print(f'The size of target is: {y.shape}')
code
105174236/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') print('Is there any missing value? \n') df.isnull().sum()
code
105174236/cell_27
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) ml_df = df.loc[:, ('age', 'le_sex', 'bmi', 'children', 'le_region', 'le_smoker', 'charges')] ml_df.rename(columns={'le_region': 'region', 'le_smoker': 'smoker', 'le_sex': 'sex'}, inplace=True) columns = ml_df.columns X = ml_df[columns[:-1]] y = ml_df[columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) lin_regr = linear_model.LinearRegression() lin_regr.fit(X_train, y_train) lw = lin_regr.coef_ lb = lin_regr.intercept_ ridge = linear_model.Ridge(alpha=0.5) ridge.fit(X_train, y_train) rw = ridge.coef_ rb = ridge.intercept_ lw = np.array(lw).reshape(lw.shape[0], 1).reshape(-1) rw = np.array(rw).reshape(rw.shape[0], 1).reshape(-1) coefs = pd.DataFrame(data=np.array([lw, rw, [lb] * lw.shape[0], [rb] * lw.shape[0]]).T, columns=['Linear_W', 'Ridge_W', 'Linear_b', 'Ridge_b']) coefs.head()
code
105174236/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance/insurance.csv') df.isnull().sum() le = LabelEncoder() df['le_region'] = le.fit_transform(df.region) df['le_smoker'] = le.fit_transform(df.smoker) df['le_sex'] = le.fit_transform(df.sex) print(f'Before encoding: \n{df.smoker.value_counts()}') print(f'After encoding: \n{df.le_smoker.value_counts()}')
code
73097374/cell_9
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] num_cols = [col for col in features.columns if features[col].dtype in ('int64', 'float64')] X = features.copy() X_test = test[features.columns].copy() numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[('imp', SimpleImputer(strategy='most_frequent')), ('OHen', OneHotEncoder(handle_unknown='ignore', sparse=False))]) processor = ColumnTransformer(transformers=[('num', numerical_transformer, num_cols), ('cat', categorical_transformer, object_cols)]) my_pipeline = Pipeline(steps=[('processor', processor)]) X_pre = pd.DataFrame(my_pipeline.fit_transform(X)) X_pre.index = X.index X_test_pre = pd.DataFrame(my_pipeline.transform(X_test)) X_test_pre.index = X_test.index X_train, X_valid, y_train, y_valid = train_test_split(X_pre, y, train_size=0.8, test_size=0.2, random_state=0) print(X_train, '\n', X_valid)
code
73097374/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) print(features.nunique()) features.head()
code
73097374/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] num_cols = [col for col in features.columns if features[col].dtype in ('int64', 'float64')] X = features.copy() X_test = test[features.columns].copy() numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[('imp', SimpleImputer(strategy='most_frequent')), ('OHen', OneHotEncoder(handle_unknown='ignore', sparse=False))]) processor = ColumnTransformer(transformers=[('num', numerical_transformer, num_cols), ('cat', categorical_transformer, object_cols)]) my_pipeline = Pipeline(steps=[('processor', processor)]) X_pre = pd.DataFrame(my_pipeline.fit_transform(X)) X_pre.index = X.index X_test_pre = pd.DataFrame(my_pipeline.transform(X_test)) X_test_pre.index = X_test.index model = XGBRegressor(n_estimators=5000, n_jobs=4, learning_rate=0.005, max_depth=5, colsample_bytree=0.5, tree_method='hist', random_state=0) model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], eval_metric='rmse', verbose=False) preds_valid = model.predict(X_valid) predictions = model.predict(X_test_pre) print(predictions) output = pd.DataFrame({'Id': X_test_pre.index, 'target': predictions}) output.to_csv('submission.csv', index=False)
code
73097374/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import xgboost as xgb import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73097374/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] num_cols = [col for col in features.columns if features[col].dtype in ('int64', 'float64')] X = features.copy() X_test = test[features.columns].copy() def remove_outlier(df): Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQ_range = Q3 - Q1 df_removed = df[~((df < Q1 - 1.5 * IQ_range) | (df > Q3 + 1.5 * IQ_range))] return df_removed X[num_cols] = remove_outlier(X[num_cols]) X_test[num_cols] = remove_outlier(X_test[num_cols]) plt.figure(figsize=(10, 8)) sns.boxplot(data=X[num_cols]).set(title='Review X outlier-removed data') plt.figure(figsize=(10, 8)) sns.boxplot(data=X_test[num_cols]).set(title='Review X_test outlier-removed data')
code
73097374/cell_8
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] num_cols = [col for col in features.columns if features[col].dtype in ('int64', 'float64')] X = features.copy() X_test = test[features.columns].copy() numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[('imp', SimpleImputer(strategy='most_frequent')), ('OHen', OneHotEncoder(handle_unknown='ignore', sparse=False))]) processor = ColumnTransformer(transformers=[('num', numerical_transformer, num_cols), ('cat', categorical_transformer, object_cols)]) my_pipeline = Pipeline(steps=[('processor', processor)]) X_pre = pd.DataFrame(my_pipeline.fit_transform(X)) X_pre.index = X.index X_test_pre = pd.DataFrame(my_pipeline.transform(X_test)) X_test_pre.index = X_test.index print(X_pre, '\n', X_test_pre)
code
73097374/cell_3
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.head() print(train)
code
73097374/cell_10
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor model = XGBRegressor(n_estimators=5000, n_jobs=4, learning_rate=0.005, max_depth=5, colsample_bytree=0.5, tree_method='hist', random_state=0) model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], eval_metric='rmse', verbose=False) preds_valid = model.predict(X_valid) print(mean_squared_error(y_valid, preds_valid, squared=False))
code
33111929/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data.head(10)
code
33111929/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data = data.drop(data.loc[data.x <= 0].index) data = data.drop(data.loc[data.y <= 0].index) data = data.drop(data.loc[data.z <= 0].index) data['ratio'] = data.x / data.y premium = ['D', 'E', 'F', 'G', 'H'] def data_split(status): if status in premium: return 'premium' else: return 'normal' def data_split_num(status): if status in premium: return 1 else: return 0 data['data_split'] = data['color'].apply(data_split) data['data_split_num'] = data['color'].apply(data_split_num) #correlation matrix for 15 variables with largest correlation corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) k = 8 #number of variables for heatmap cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(data[cols].values.T) # Generate a mask for the upper triangle mask = np.zeros_like(cm, dtype=np.bool) mask[np.triu_indices_from(mask)] = True hm = sns.heatmap(cm, vmax=1, mask=mask, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() sns.countplot(y=data.cut) plt.show()
code
33111929/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33111929/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes
code
33111929/cell_8
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data.info()
code
33111929/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data = data.drop(data.loc[data.x <= 0].index) data = data.drop(data.loc[data.y <= 0].index) data = data.drop(data.loc[data.z <= 0].index) data['ratio'] = data.x / data.y premium = ['D', 'E', 'F', 'G', 'H'] def data_split(status): if status in premium: return 'premium' else: return 'normal' def data_split_num(status): if status in premium: return 1 else: return 0 data['data_split'] = data['color'].apply(data_split) data['data_split_num'] = data['color'].apply(data_split_num) corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) k = 8 cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(data[cols].values.T) mask = np.zeros_like(cm, dtype=np.bool) mask[np.triu_indices_from(mask)] = True hm = sns.heatmap(cm, vmax=1, mask=mask, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
33111929/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data = data.drop(data.loc[data.x <= 0].index) data = data.drop(data.loc[data.y <= 0].index) data = data.drop(data.loc[data.z <= 0].index) data['ratio'] = data.x / data.y premium = ['D', 'E', 'F', 'G', 'H'] def data_split(status): if status in premium: return 'premium' else: return 'normal' def data_split_num(status): if status in premium: return 1 else: return 0 data['data_split'] = data['color'].apply(data_split) data['data_split_num'] = data['color'].apply(data_split_num) #correlation matrix for 15 variables with largest correlation corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) k = 8 #number of variables for heatmap cols = corrmat.nlargest(k, 'price')['price'].index cm = np.corrcoef(data[cols].values.T) # Generate a mask for the upper triangle mask = np.zeros_like(cm, dtype=np.bool) mask[np.triu_indices_from(mask)] = True hm = sns.heatmap(cm, vmax=1, mask=mask, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() print(' Diamond Carat = ' + str(np.mean(data.carat))) plt.subplots(figsize=(10, 7)) sns.distplot(data.carat) plt.show()
code
33111929/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data = data.drop(data.loc[data.x <= 0].index) data = data.drop(data.loc[data.y <= 0].index) data = data.drop(data.loc[data.z <= 0].index) data['ratio'] = data.x / data.y premium = ['D', 'E', 'F', 'G', 'H'] def data_split(status): if status in premium: return 'premium' else: return 'normal' def data_split_num(status): if status in premium: return 1 else: return 0 data['data_split'] = data['color'].apply(data_split) data['data_split_num'] = data['color'].apply(data_split_num) data.head()
code
33111929/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/diamonds/diamonds.csv') data.dtypes data.describe()
code
17144077/cell_2
[ "text_html_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import os import cv2 import random import numpy as np import pandas as pd import scipy as sp import torch from fastai.vision import * import glob print(os.listdir('../input/fastai-pretrained-models'))
code
17144077/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17144077/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder='train_images', suffix='.png') test = ImageList.from_df(test_df, path=PATH, cols='id_code', folder='test_images', suffix='.png') from sklearn.metrics import cohen_kappa_score data = train.split_by_rand_pct(0.2).label_from_df(cols='diagnosis', label_cls=FloatList).add_test(test).transform(get_transforms(), size=SIZE).databunch(path=Path('.'), bs=32).normalize(imagenet_stats) data.show_batch(rows=3, figsize=(7, 6))
code
17144077/cell_15
[ "text_html_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder='train_images', suffix='.png') test = ImageList.from_df(test_df, path=PATH, cols='id_code', folder='test_images', suffix='.png') def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'), device='cuda:0') from sklearn.metrics import cohen_kappa_score data = train.split_by_rand_pct(0.2).label_from_df(cols='diagnosis', label_cls=FloatList).add_test(test).transform(get_transforms(), size=SIZE).databunch(path=Path('.'), bs=32).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet101, metrics=[quadratic_kappa], pretrained=True) learn.fit_one_cycle(6, slice(0.00275, 0.0275)) learn.save('stage1') learn.unfreeze() learn.lr_find() lr = 0.00275 learn.fit_one_cycle(4, slice(1.5e-06, lr / 8), wd=0.05)
code
17144077/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder='train_images', suffix='.png') test = ImageList.from_df(test_df, path=PATH, cols='id_code', folder='test_images', suffix='.png') def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'), device='cuda:0') from sklearn.metrics import cohen_kappa_score data = train.split_by_rand_pct(0.2).label_from_df(cols='diagnosis', label_cls=FloatList).add_test(test).transform(get_transforms(), size=SIZE).databunch(path=Path('.'), bs=32).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet101, metrics=[quadratic_kappa], pretrained=True) learn.fit_one_cycle(6, slice(0.00275, 0.0275)) learn.save('stage1') learn.unfreeze() learn.lr_find() lr = 0.00275 learn.fit_one_cycle(4, slice(1.5e-06, lr / 8), wd=0.05) valid_preds, valid_y = learn.TTA(ds_type=DatasetType.Valid) test_preds, _ = learn.TTA(ds_type=DatasetType.Test)
code
17144077/cell_14
[ "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder='train_images', suffix='.png') test = ImageList.from_df(test_df, path=PATH, cols='id_code', folder='test_images', suffix='.png') def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'), device='cuda:0') from sklearn.metrics import cohen_kappa_score data = train.split_by_rand_pct(0.2).label_from_df(cols='diagnosis', label_cls=FloatList).add_test(test).transform(get_transforms(), size=SIZE).databunch(path=Path('.'), bs=32).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet101, metrics=[quadratic_kappa], pretrained=True) learn.fit_one_cycle(6, slice(0.00275, 0.0275)) learn.save('stage1') learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True)
code
17144077/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import cv2 import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as sp SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') def crop_image(img, tol=7): mask = img > tol return img[np.ix_(mask.any(1), mask.any(0))] def open_aptos2019_image(fn, convert_mode, after_open) -> Image: image = cv2.imread(fn) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = crop_image(image) image = cv2.resize(image, (SIZE, SIZE)) image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), SIZE / 10), -4, 128) return Image(pil2tensor(image, np.float32).div_(255)) vision.data.open_image = open_aptos2019_image class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x'] optR = OptimizedRounder() optR.fit(valid_preds, valid_y) coefficients = optR.coefficients() valid_predictions = optR.predict(valid_preds, coefficients)[:, 0].astype(int) test_predictions = optR.predict(test_preds, coefficients)[:, 0].astype(int) valid_score = cohen_kappa_score(valid_y.numpy().astype(int), valid_predictions, weights='quadratic') test_df.diagnosis = test_predictions test_df.to_csv('submission.csv', index=None) test_df.head()
code
17144077/cell_12
[ "text_plain_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch SIZE = 224 train_df = pd.read_csv(PATH + '/train.csv') test_df = pd.read_csv(PATH + '/sample_submission.csv') train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder='train_images', suffix='.png') test = ImageList.from_df(test_df, path=PATH, cols='id_code', folder='test_images', suffix='.png') def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'), device='cuda:0') from sklearn.metrics import cohen_kappa_score data = train.split_by_rand_pct(0.2).label_from_df(cols='diagnosis', label_cls=FloatList).add_test(test).transform(get_transforms(), size=SIZE).databunch(path=Path('.'), bs=32).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet101, metrics=[quadratic_kappa], pretrained=True) learn.fit_one_cycle(6, slice(0.00275, 0.0275)) learn.recorder.plot_losses() learn.recorder.plot_metrics()
code
105190429/cell_9
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.head()
code
105190429/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] data.drop_duplicates(inplace=True) data = data.reset_index(drop=True) plt.figure(figsize=(12,6)) sns.set(style='darkgrid') ax = sns.countplot(y = 'output', data=data, palette='viridis') ax.set_yticklabels(['Less Chance','Higher Chance']) plt.ylabel('Chance to Have Heart Attack') plt.xlabel('Number of Person') plt.title('Output distribution', fontsize=20, loc = 'center') plt.show() fig, ax = plt.subplots(1, 2, figsize=(15, 8)) sns.histplot(x='age', data=data, kde=True, hue='output', ax=ax[0], palette='viridis') ax[0].set_title('Age distribution on Output') ax[0].set_ylabel('Number of Person') ax[0].set_xlabel('age') ax[0].legend(title='Heart Attack Chances', labels=['Higher Chance', 'Less Chance'], loc='upper right') sns.boxplot(x='output', data=data, y='age', ax=ax[1], palette='viridis') ax[1].set_title('Age distribution on Output') ax[1].set_xticklabels(['Less Chance', 'Higher Chance']) ax[1].set_xlabel('Chance to Have Heart Attack') plt.tight_layout() plt.show()
code
105190429/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] data.drop_duplicates(inplace=True) data = data.reset_index(drop=True) plt.figure(figsize=(12, 6)) sns.set(style='darkgrid') ax = sns.countplot(y='output', data=data, palette='viridis') ax.set_yticklabels(['Less Chance', 'Higher Chance']) plt.ylabel('Chance to Have Heart Attack') plt.xlabel('Number of Person') plt.title('Output distribution', fontsize=20, loc='center') plt.show()
code
105190429/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.info()
code
105190429/cell_1
[ "text_plain_output_1.png" ]
!pip install catboost !pip install shap
code
105190429/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] duplicate
code
105190429/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] data.drop_duplicates(inplace=True) data = data.reset_index(drop=True) plt.figure(figsize=(12,6)) sns.set(style='darkgrid') ax = sns.countplot(y = 'output', data=data, palette='viridis') ax.set_yticklabels(['Less Chance','Higher Chance']) plt.ylabel('Chance to Have Heart Attack') plt.xlabel('Number of Person') plt.title('Output distribution', fontsize=20, loc = 'center') plt.show() fig, ax = plt.subplots(1,2, figsize=(15,8)) sns.histplot(x='age', data=data, kde=True, hue='output', ax=ax[0], palette = 'viridis') ax[0].set_title('Age distribution on Output') ax[0].set_ylabel('Number of Person') ax[0].set_xlabel('age') ax[0].legend(title='Heart Attack Chances', labels=['Higher Chance','Less Chance'], loc='upper right') sns.boxplot(x='output', data=data, y='age', ax=ax[1], palette = 'viridis') ax[1].set_title('Age distribution on Output') ax[1].set_xticklabels(['Less Chance','Higher Chance']) ax[1].set_xlabel('Chance to Have Heart Attack') plt.tight_layout() plt.show() fig, ax = plt.subplots(1, 2, figsize=(12, 6)) sns.countplot(x='sex', data=data, hue='output', palette='viridis', ax=ax[0]) ax[0].set_title('Sex distribution on output', fontsize=15) ax[0].legend(title='Heart Attack Chances', labels=['Less Chance', 'Higher Chance'], loc='upper left') ax[0].set_ylabel('Number of Person') sns.countplot(x='sex', data=data, palette='flare', ax=ax[1]) ax[1].set_title('Sex distribution', fontsize=15) ax[1].set_ylabel('Number of Person') plt.tight_layout() plt.show()
code
105190429/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] data.drop_duplicates(inplace=True) data = data.reset_index(drop=True) plt.figure(figsize=(12,6)) sns.set(style='darkgrid') ax = sns.countplot(y = 'output', data=data, palette='viridis') ax.set_yticklabels(['Less Chance','Higher Chance']) plt.ylabel('Chance to Have Heart Attack') plt.xlabel('Number of Person') plt.title('Output distribution', fontsize=20, loc = 'center') plt.show() fig, ax = plt.subplots(1,2, figsize=(15,8)) sns.histplot(x='age', data=data, kde=True, hue='output', ax=ax[0], palette = 'viridis') ax[0].set_title('Age distribution on Output') ax[0].set_ylabel('Number of Person') ax[0].set_xlabel('age') ax[0].legend(title='Heart Attack Chances', labels=['Higher Chance','Less Chance'], loc='upper right') sns.boxplot(x='output', data=data, y='age', ax=ax[1], palette = 'viridis') ax[1].set_title('Age distribution on Output') ax[1].set_xticklabels(['Less Chance','Higher Chance']) ax[1].set_xlabel('Chance to Have Heart Attack') plt.tight_layout() plt.show() fig, ax = plt.subplots(1,2, figsize=(12,6)) sns.countplot(x='sex', data=data, hue='output', palette='viridis', ax=ax[0]) ax[0].set_title('Sex distribution on output', fontsize=15) ax[0].legend(title='Heart Attack Chances', labels=['Less Chance','Higher Chance'], loc='upper left') ax[0].set_ylabel('Number of Person') sns.countplot(x='sex', data=data, palette='flare', ax=ax[1]) ax[1].set_title('Sex distribution', fontsize=15) ax[1].set_ylabel('Number of Person') plt.tight_layout() plt.show() fig, ax = plt.subplots(1, 2, figsize=(12, 6)) sns.countplot(x='cp', data=data, hue='output', palette='viridis', ax=ax[0]) ax[0].set_title('Chest Pain Type Distribution on Output') ax[0].set_ylabel('Number of Person') ax[0].set_xlabel('Chest Pain Type') ax[0].legend(title='Heart Attack Chances', labels=['Less Chance', 'Higher Chance'], loc='upper right') ax[0].set_xticklabels(['typical angina', 'atypical agina', 'non-anginal pain', 'asymptomatic'], rotation=-45) sns.countplot(x='cp', data=data, ax=ax[1], palette='flare') ax[1].set_title('Chest Pain Type Distribution') ax[1].set_ylabel('Number of Person') ax[1].set_xlabel('Chest Pain Type') ax[1].set_xticklabels(['typical angina', 'atypical agina', 'non-anginal pain', 'asymptomatic'], rotation=-45) plt.tight_layout() plt.show()
code
105190429/cell_24
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum() duplicate = data[data.duplicated()] data.drop_duplicates(inplace=True) data = data.reset_index(drop=True) print(f'There are {data.shape[0]} records and {data.shape[1]} columns on this dataset.')
code
105190429/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.describe().T data.isnull().sum()
code