path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90149707/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/housing-prices-dataset/Housing.csv') df.head()
code
90149707/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/housing-prices-dataset/Housing.csv') df = df.drop(columns=['parking', 'bedrooms', 'bathrooms']) X = df[['area', 'stories']] y = df['price'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_
code
90149707/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/housing-prices-dataset/Housing.csv') df = df.drop(columns=['parking', 'bedrooms', 'bathrooms']) X = df[['area', 'stories']] y = df['price'] y.head()
code
90149707/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/housing-prices-dataset/Housing.csv') sns.lmplot(x='stories', y='price', data=df, ci=None)
code
1008271/cell_13
[ "text_plain_output_1.png" ]
from sklearn.metrics import log_loss from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(n_estimators=1000) from sklearn.tree import DecisionTreeClassifier clf.fit(X_train, y_train) y_val_pred = clf.predict_proba(X_val) log_loss(y_val, y_val_pred)
code
1008271/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) list(trn.columns.values) list(trn.columns.values)
code
1008271/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) list(trn.columns.values)
code
1008271/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008271/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) list(trn.columns.values) trn.head()
code
1008271/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) trn = pd.read_json(open('../input/train.json', 'r')) tst = pd.read_json(open('../input/test.json', 'r')) print('Train set: ', trn.shape) print('Test set: ', tst.shape)
code
324967/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") def load(what='NationalNames'): assert what in ('NationalNames', 'StateNames') cols = ['Name', 'Year', 'Gender', 'Count'] if what == 'StateNames': cols.append('State') df = pd.read_sql_query('SELECT {} from {}'.format(','.join(cols), what), con) return df df2 = load(what='StateNames') tmp = df2.groupby(['Year', 'State']).agg({'Count': 'sum'}).reset_index() largest_states = tmp.groupby('State').agg({'Count': 'sum'}).sort_values('Count', ascending=False).index[:5].tolist() tmp.pivot(index='Year', columns='State', values='Count')[largest_states].plot()
code
324967/cell_4
[ "text_plain_output_1.png" ]
import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") print(cursor.fetchall())
code
324967/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
324967/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") def load(what='NationalNames'): assert what in ('NationalNames', 'StateNames') cols = ['Name', 'Year', 'Gender', 'Count'] if what == 'StateNames': cols.append('State') df = pd.read_sql_query('SELECT {} from {}'.format(','.join(cols), what), con) return df df = load(what='NationalNames') df.query('Name=="Alice"')[['Year', 'Count']].groupby('Year').sum().plot()
code
105177221/cell_21
[ "text_plain_output_1.png" ]
from category_encoders import OneHotEncoder, WOEEncoder from sklearn.base import clone, TransformerMixin, BaseEstimator from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer, MissingIndicator from sklearn.linear_model import BayesianRidge, Ridge, Lasso, HuberRegressor from sklearn.metrics import roc_auc_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier import gc import numpy as np import pandas as pd import time RANDOM_SEED = 153 def prepreprocessing(df_train, df_test): data = pd.concat([df_train, df_test]) data['m3_missing'] = data['measurement_3'].isnull().astype(np.int8) data['m5_missing'] = data['measurement_5'].isnull().astype(np.int8) data['area'] = data['attribute_2'] * data['attribute_3'] feature = [f for f in df_test.columns if f.startswith('measurement') or f == 'loading'] full_fill_dict = {} full_fill_dict['measurement_17'] = {'A': ['measurement_5', 'measurement_6', 'measurement_8'], 'B': ['measurement_4', 'measurement_5', 'measurement_7'], 'C': ['measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'D': ['measurement_5', 'measurement_6', 'measurement_7', 'measurement_8'], 'E': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_8'], 'F': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_7'], 'G': ['measurement_4', 'measurement_6', 'measurement_8', 'measurement_9'], 'H': ['measurement_4', 'measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'I': ['measurement_3', 'measurement_7', 'measurement_8']} col = [col for col in df_test.columns if 'measurement' not in col] + ['loading', 'm3_missing', 'm5_missing'] a = [] b = [] for x in range(3, 17): corr = np.absolute(data.drop(col, axis=1).corr()[f'measurement_{x}']).sort_values(ascending=False) a.append(np.round(np.sum(corr[1:4]), 3)) b.append(f'measurement_{x}') c = pd.DataFrame() c['Selected columns'] = b c['correlation total'] = a c = c.sort_values(by='correlation total', ascending=False).reset_index(drop=True) for i in range(10): measurement_col = 'measurement_' + c.iloc[i, 0][12:] fill_dict = {} for x in data.product_code.unique(): corr = np.absolute(data[data.product_code == x].drop(col, axis=1).corr()[measurement_col]).sort_values(ascending=False) measurement_col_dic = {} measurement_col_dic[measurement_col] = corr[1:5].index.tolist() fill_dict[x] = measurement_col_dic[measurement_col] full_fill_dict[measurement_col] = fill_dict feature = [f for f in data.columns if f.startswith('measurement') or f == 'loading'] nullValue_cols = [col for col in df_train.columns if df_train[col].isnull().sum() != 0] for code in data.product_code.unique(): for measurement_col in list(full_fill_dict.keys()): tmp = data[data.product_code == code] column = full_fill_dict[measurement_col][code] tmp_train = tmp[column + [measurement_col]].dropna(how='any') tmp_test = tmp[(tmp[column].isnull().sum(axis=1) == 0) & tmp[measurement_col].isnull()] model = HuberRegressor(epsilon=1.9) model.fit(tmp_train[column], tmp_train[measurement_col]) data.loc[(data.product_code == code) & (data[column].isnull().sum(axis=1) == 0) & data[measurement_col].isnull(), measurement_col] = model.predict(tmp_test[column]) NA = data.loc[data['product_code'] == code, nullValue_cols].isnull().sum().sum() model1 = KNNImputer(n_neighbors=3) data.loc[data.product_code == code, feature] = model1.fit_transform(data.loc[data.product_code == code, feature]) data['measurement_avg'] = data[[f'measurement_{i}' for i in range(3, 17)]].mean(axis=1) df_train = data.iloc[:df_train.shape[0], :] df_test = data.iloc[df_train.shape[0]:, :] features = ['loading', 'attribute_0', 'measurement_17', 'measurement_0', 'measurement_1', 'measurement_2', 'area', 'm3_missing', 'm5_missing', 'measurement_avg'] encoder = WOEEncoder(cols=['attribute_0']) df_train = encoder.fit_transform(df_train, target) df_test = encoder.transform(df_test) df_train['loading'] = np.log1p(df_train['loading']) df_test['loading'] = np.log1p(df_test['loading']) return (df_train, df_test, features) submission = pd.read_csv('../input/tabular-playground-series-aug-2022/sample_submission.csv') train = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv', index_col='id') target = train['failure'].copy() gc.collect() class ProductSplitter3v2: def split(self, X: pd.DataFrame, _y=None, _groups=None): indices = list(X.groupby('product_code').indices.values()) for i in range(len(indices)): for j in range(i + 1, len(indices)): yield [np.concatenate([ix for k, ix in enumerate(indices) if k not in [i, j]]), np.concatenate([ix for k, ix in enumerate(indices) if k in [i, j]])] cv = ProductSplitter3v2() SPLITS = list(cv.split(train)) def score_pipeline(pipeline): test_preds = np.zeros((test.shape[0],)) oof_preds = np.zeros((train.shape[0],)) scores = np.zeros(len(SPLITS)) start = time.time() for fold, (train_idx, valid_idx) in enumerate(SPLITS): X_train = train[columns].iloc[train_idx] X_valid = train[columns].iloc[valid_idx] y_train = target.iloc[train_idx] y_valid = target.iloc[valid_idx] model = clone(pipeline) model.fit(X_train, y_train) valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(test[columns])[:, 1] / len(SPLITS) oof_preds[valid_idx] = valid_preds scores[fold] = roc_auc_score(y_valid, valid_preds) end = time.time() temp = np.sort(scores) stats = np.array([np.mean(temp[:3]), np.mean(scores), np.median(scores), np.mean(temp[-3:])]) return (np.array([*scores, *stats]), test_preds) cv_scores = dict() cv_scores['Scheme'] = [*[f'Split {i}' for i in range(len(SPLITS))], *['Worst 3', 'Average', 'Median', 'Best 3']] model = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(splitter='random', random_state=RANDOM_SEED), random_state=RANDOM_SEED) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'AdaboostExtraTree'] = scores model = BaggingClassifier(base_estimator=DecisionTreeClassifier(splitter='random', random_state=RANDOM_SEED), n_jobs=-1, random_state=RANDOM_SEED) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'BaggingExtraTree'] = scores model = ExtraTreesClassifier(n_jobs=-1, random_state=RANDOM_SEED, max_features=None) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'ExtraTrees'] = scores
code
105177221/cell_6
[ "text_plain_output_1.png" ]
from category_encoders import OneHotEncoder, WOEEncoder from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer, MissingIndicator from sklearn.linear_model import BayesianRidge, Ridge, Lasso, HuberRegressor import gc import numpy as np import pandas as pd def prepreprocessing(df_train, df_test): data = pd.concat([df_train, df_test]) data['m3_missing'] = data['measurement_3'].isnull().astype(np.int8) data['m5_missing'] = data['measurement_5'].isnull().astype(np.int8) data['area'] = data['attribute_2'] * data['attribute_3'] feature = [f for f in df_test.columns if f.startswith('measurement') or f == 'loading'] full_fill_dict = {} full_fill_dict['measurement_17'] = {'A': ['measurement_5', 'measurement_6', 'measurement_8'], 'B': ['measurement_4', 'measurement_5', 'measurement_7'], 'C': ['measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'D': ['measurement_5', 'measurement_6', 'measurement_7', 'measurement_8'], 'E': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_8'], 'F': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_7'], 'G': ['measurement_4', 'measurement_6', 'measurement_8', 'measurement_9'], 'H': ['measurement_4', 'measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'I': ['measurement_3', 'measurement_7', 'measurement_8']} col = [col for col in df_test.columns if 'measurement' not in col] + ['loading', 'm3_missing', 'm5_missing'] a = [] b = [] for x in range(3, 17): corr = np.absolute(data.drop(col, axis=1).corr()[f'measurement_{x}']).sort_values(ascending=False) a.append(np.round(np.sum(corr[1:4]), 3)) b.append(f'measurement_{x}') c = pd.DataFrame() c['Selected columns'] = b c['correlation total'] = a c = c.sort_values(by='correlation total', ascending=False).reset_index(drop=True) for i in range(10): measurement_col = 'measurement_' + c.iloc[i, 0][12:] fill_dict = {} for x in data.product_code.unique(): corr = np.absolute(data[data.product_code == x].drop(col, axis=1).corr()[measurement_col]).sort_values(ascending=False) measurement_col_dic = {} measurement_col_dic[measurement_col] = corr[1:5].index.tolist() fill_dict[x] = measurement_col_dic[measurement_col] full_fill_dict[measurement_col] = fill_dict feature = [f for f in data.columns if f.startswith('measurement') or f == 'loading'] nullValue_cols = [col for col in df_train.columns if df_train[col].isnull().sum() != 0] for code in data.product_code.unique(): for measurement_col in list(full_fill_dict.keys()): tmp = data[data.product_code == code] column = full_fill_dict[measurement_col][code] tmp_train = tmp[column + [measurement_col]].dropna(how='any') tmp_test = tmp[(tmp[column].isnull().sum(axis=1) == 0) & tmp[measurement_col].isnull()] model = HuberRegressor(epsilon=1.9) model.fit(tmp_train[column], tmp_train[measurement_col]) data.loc[(data.product_code == code) & (data[column].isnull().sum(axis=1) == 0) & data[measurement_col].isnull(), measurement_col] = model.predict(tmp_test[column]) NA = data.loc[data['product_code'] == code, nullValue_cols].isnull().sum().sum() model1 = KNNImputer(n_neighbors=3) data.loc[data.product_code == code, feature] = model1.fit_transform(data.loc[data.product_code == code, feature]) data['measurement_avg'] = data[[f'measurement_{i}' for i in range(3, 17)]].mean(axis=1) df_train = data.iloc[:df_train.shape[0], :] df_test = data.iloc[df_train.shape[0]:, :] features = ['loading', 'attribute_0', 'measurement_17', 'measurement_0', 'measurement_1', 'measurement_2', 'area', 'm3_missing', 'm5_missing', 'measurement_avg'] encoder = WOEEncoder(cols=['attribute_0']) df_train = encoder.fit_transform(df_train, target) df_test = encoder.transform(df_test) df_train['loading'] = np.log1p(df_train['loading']) df_test['loading'] = np.log1p(df_test['loading']) return (df_train, df_test, features) submission = pd.read_csv('../input/tabular-playground-series-aug-2022/sample_submission.csv') train = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv', index_col='id') target = train['failure'].copy() gc.collect() train.head()
code
105177221/cell_19
[ "text_html_output_1.png" ]
from category_encoders import OneHotEncoder, WOEEncoder from sklearn.base import clone, TransformerMixin, BaseEstimator from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer, MissingIndicator from sklearn.linear_model import BayesianRidge, Ridge, Lasso, HuberRegressor from sklearn.metrics import roc_auc_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier import gc import numpy as np import pandas as pd import time RANDOM_SEED = 153 def prepreprocessing(df_train, df_test): data = pd.concat([df_train, df_test]) data['m3_missing'] = data['measurement_3'].isnull().astype(np.int8) data['m5_missing'] = data['measurement_5'].isnull().astype(np.int8) data['area'] = data['attribute_2'] * data['attribute_3'] feature = [f for f in df_test.columns if f.startswith('measurement') or f == 'loading'] full_fill_dict = {} full_fill_dict['measurement_17'] = {'A': ['measurement_5', 'measurement_6', 'measurement_8'], 'B': ['measurement_4', 'measurement_5', 'measurement_7'], 'C': ['measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'D': ['measurement_5', 'measurement_6', 'measurement_7', 'measurement_8'], 'E': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_8'], 'F': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_7'], 'G': ['measurement_4', 'measurement_6', 'measurement_8', 'measurement_9'], 'H': ['measurement_4', 'measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'I': ['measurement_3', 'measurement_7', 'measurement_8']} col = [col for col in df_test.columns if 'measurement' not in col] + ['loading', 'm3_missing', 'm5_missing'] a = [] b = [] for x in range(3, 17): corr = np.absolute(data.drop(col, axis=1).corr()[f'measurement_{x}']).sort_values(ascending=False) a.append(np.round(np.sum(corr[1:4]), 3)) b.append(f'measurement_{x}') c = pd.DataFrame() c['Selected columns'] = b c['correlation total'] = a c = c.sort_values(by='correlation total', ascending=False).reset_index(drop=True) for i in range(10): measurement_col = 'measurement_' + c.iloc[i, 0][12:] fill_dict = {} for x in data.product_code.unique(): corr = np.absolute(data[data.product_code == x].drop(col, axis=1).corr()[measurement_col]).sort_values(ascending=False) measurement_col_dic = {} measurement_col_dic[measurement_col] = corr[1:5].index.tolist() fill_dict[x] = measurement_col_dic[measurement_col] full_fill_dict[measurement_col] = fill_dict feature = [f for f in data.columns if f.startswith('measurement') or f == 'loading'] nullValue_cols = [col for col in df_train.columns if df_train[col].isnull().sum() != 0] for code in data.product_code.unique(): for measurement_col in list(full_fill_dict.keys()): tmp = data[data.product_code == code] column = full_fill_dict[measurement_col][code] tmp_train = tmp[column + [measurement_col]].dropna(how='any') tmp_test = tmp[(tmp[column].isnull().sum(axis=1) == 0) & tmp[measurement_col].isnull()] model = HuberRegressor(epsilon=1.9) model.fit(tmp_train[column], tmp_train[measurement_col]) data.loc[(data.product_code == code) & (data[column].isnull().sum(axis=1) == 0) & data[measurement_col].isnull(), measurement_col] = model.predict(tmp_test[column]) NA = data.loc[data['product_code'] == code, nullValue_cols].isnull().sum().sum() model1 = KNNImputer(n_neighbors=3) data.loc[data.product_code == code, feature] = model1.fit_transform(data.loc[data.product_code == code, feature]) data['measurement_avg'] = data[[f'measurement_{i}' for i in range(3, 17)]].mean(axis=1) df_train = data.iloc[:df_train.shape[0], :] df_test = data.iloc[df_train.shape[0]:, :] features = ['loading', 'attribute_0', 'measurement_17', 'measurement_0', 'measurement_1', 'measurement_2', 'area', 'm3_missing', 'm5_missing', 'measurement_avg'] encoder = WOEEncoder(cols=['attribute_0']) df_train = encoder.fit_transform(df_train, target) df_test = encoder.transform(df_test) df_train['loading'] = np.log1p(df_train['loading']) df_test['loading'] = np.log1p(df_test['loading']) return (df_train, df_test, features) submission = pd.read_csv('../input/tabular-playground-series-aug-2022/sample_submission.csv') train = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv', index_col='id') target = train['failure'].copy() gc.collect() class ProductSplitter3v2: def split(self, X: pd.DataFrame, _y=None, _groups=None): indices = list(X.groupby('product_code').indices.values()) for i in range(len(indices)): for j in range(i + 1, len(indices)): yield [np.concatenate([ix for k, ix in enumerate(indices) if k not in [i, j]]), np.concatenate([ix for k, ix in enumerate(indices) if k in [i, j]])] cv = ProductSplitter3v2() SPLITS = list(cv.split(train)) def score_pipeline(pipeline): test_preds = np.zeros((test.shape[0],)) oof_preds = np.zeros((train.shape[0],)) scores = np.zeros(len(SPLITS)) start = time.time() for fold, (train_idx, valid_idx) in enumerate(SPLITS): X_train = train[columns].iloc[train_idx] X_valid = train[columns].iloc[valid_idx] y_train = target.iloc[train_idx] y_valid = target.iloc[valid_idx] model = clone(pipeline) model.fit(X_train, y_train) valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(test[columns])[:, 1] / len(SPLITS) oof_preds[valid_idx] = valid_preds scores[fold] = roc_auc_score(y_valid, valid_preds) end = time.time() temp = np.sort(scores) stats = np.array([np.mean(temp[:3]), np.mean(scores), np.median(scores), np.mean(temp[-3:])]) return (np.array([*scores, *stats]), test_preds) cv_scores = dict() cv_scores['Scheme'] = [*[f'Split {i}' for i in range(len(SPLITS))], *['Worst 3', 'Average', 'Median', 'Best 3']] model = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(splitter='random', random_state=RANDOM_SEED), random_state=RANDOM_SEED) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'AdaboostExtraTree'] = scores model = BaggingClassifier(base_estimator=DecisionTreeClassifier(splitter='random', random_state=RANDOM_SEED), n_jobs=-1, random_state=RANDOM_SEED) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'BaggingExtraTree'] = scores
code
105177221/cell_7
[ "text_plain_output_1.png" ]
train, test, columns = prepreprocessing(train, test)
code
105177221/cell_3
[ "text_plain_output_1.png" ]
from sklearnex import patch_sklearn import warnings import numpy as np import pandas as pd import time import gc import warnings warnings.filterwarnings('ignore') from sklearnex import patch_sklearn patch_sklearn() import sklearn from sklearn.experimental import enable_iterative_imputer from sklearn.base import clone, TransformerMixin, BaseEstimator from sklearn.metrics import roc_auc_score from sklearn.utils.extmath import softmax from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler from category_encoders import OneHotEncoder, WOEEncoder from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer, MissingIndicator from sklearn.pipeline import make_pipeline from sklearn.compose import make_column_transformer, make_column_selector from sklearn.preprocessing import FunctionTransformer from sklearn.linear_model import LogisticRegression, PassiveAggressiveClassifier from sklearn.linear_model import SGDClassifier, RidgeClassifier from sklearn.linear_model import BayesianRidge, Ridge, Lasso, HuberRegressor from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier from xgboost import XGBClassifier, XGBRegressor, XGBRFClassifier from lightgbm import LGBMClassifier, LGBMRegressor from catboost import CatBoostClassifier from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.ensemble import HistGradientBoostingClassifier
code
105177221/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from category_encoders import OneHotEncoder, WOEEncoder from sklearn.base import clone, TransformerMixin, BaseEstimator from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer, MissingIndicator from sklearn.linear_model import BayesianRidge, Ridge, Lasso, HuberRegressor from sklearn.metrics import roc_auc_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier import gc import numpy as np import pandas as pd import time RANDOM_SEED = 153 def prepreprocessing(df_train, df_test): data = pd.concat([df_train, df_test]) data['m3_missing'] = data['measurement_3'].isnull().astype(np.int8) data['m5_missing'] = data['measurement_5'].isnull().astype(np.int8) data['area'] = data['attribute_2'] * data['attribute_3'] feature = [f for f in df_test.columns if f.startswith('measurement') or f == 'loading'] full_fill_dict = {} full_fill_dict['measurement_17'] = {'A': ['measurement_5', 'measurement_6', 'measurement_8'], 'B': ['measurement_4', 'measurement_5', 'measurement_7'], 'C': ['measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'D': ['measurement_5', 'measurement_6', 'measurement_7', 'measurement_8'], 'E': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_8'], 'F': ['measurement_4', 'measurement_5', 'measurement_6', 'measurement_7'], 'G': ['measurement_4', 'measurement_6', 'measurement_8', 'measurement_9'], 'H': ['measurement_4', 'measurement_5', 'measurement_7', 'measurement_8', 'measurement_9'], 'I': ['measurement_3', 'measurement_7', 'measurement_8']} col = [col for col in df_test.columns if 'measurement' not in col] + ['loading', 'm3_missing', 'm5_missing'] a = [] b = [] for x in range(3, 17): corr = np.absolute(data.drop(col, axis=1).corr()[f'measurement_{x}']).sort_values(ascending=False) a.append(np.round(np.sum(corr[1:4]), 3)) b.append(f'measurement_{x}') c = pd.DataFrame() c['Selected columns'] = b c['correlation total'] = a c = c.sort_values(by='correlation total', ascending=False).reset_index(drop=True) for i in range(10): measurement_col = 'measurement_' + c.iloc[i, 0][12:] fill_dict = {} for x in data.product_code.unique(): corr = np.absolute(data[data.product_code == x].drop(col, axis=1).corr()[measurement_col]).sort_values(ascending=False) measurement_col_dic = {} measurement_col_dic[measurement_col] = corr[1:5].index.tolist() fill_dict[x] = measurement_col_dic[measurement_col] full_fill_dict[measurement_col] = fill_dict feature = [f for f in data.columns if f.startswith('measurement') or f == 'loading'] nullValue_cols = [col for col in df_train.columns if df_train[col].isnull().sum() != 0] for code in data.product_code.unique(): for measurement_col in list(full_fill_dict.keys()): tmp = data[data.product_code == code] column = full_fill_dict[measurement_col][code] tmp_train = tmp[column + [measurement_col]].dropna(how='any') tmp_test = tmp[(tmp[column].isnull().sum(axis=1) == 0) & tmp[measurement_col].isnull()] model = HuberRegressor(epsilon=1.9) model.fit(tmp_train[column], tmp_train[measurement_col]) data.loc[(data.product_code == code) & (data[column].isnull().sum(axis=1) == 0) & data[measurement_col].isnull(), measurement_col] = model.predict(tmp_test[column]) NA = data.loc[data['product_code'] == code, nullValue_cols].isnull().sum().sum() model1 = KNNImputer(n_neighbors=3) data.loc[data.product_code == code, feature] = model1.fit_transform(data.loc[data.product_code == code, feature]) data['measurement_avg'] = data[[f'measurement_{i}' for i in range(3, 17)]].mean(axis=1) df_train = data.iloc[:df_train.shape[0], :] df_test = data.iloc[df_train.shape[0]:, :] features = ['loading', 'attribute_0', 'measurement_17', 'measurement_0', 'measurement_1', 'measurement_2', 'area', 'm3_missing', 'm5_missing', 'measurement_avg'] encoder = WOEEncoder(cols=['attribute_0']) df_train = encoder.fit_transform(df_train, target) df_test = encoder.transform(df_test) df_train['loading'] = np.log1p(df_train['loading']) df_test['loading'] = np.log1p(df_test['loading']) return (df_train, df_test, features) submission = pd.read_csv('../input/tabular-playground-series-aug-2022/sample_submission.csv') train = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv', index_col='id') target = train['failure'].copy() gc.collect() class ProductSplitter3v2: def split(self, X: pd.DataFrame, _y=None, _groups=None): indices = list(X.groupby('product_code').indices.values()) for i in range(len(indices)): for j in range(i + 1, len(indices)): yield [np.concatenate([ix for k, ix in enumerate(indices) if k not in [i, j]]), np.concatenate([ix for k, ix in enumerate(indices) if k in [i, j]])] cv = ProductSplitter3v2() SPLITS = list(cv.split(train)) def score_pipeline(pipeline): test_preds = np.zeros((test.shape[0],)) oof_preds = np.zeros((train.shape[0],)) scores = np.zeros(len(SPLITS)) start = time.time() for fold, (train_idx, valid_idx) in enumerate(SPLITS): X_train = train[columns].iloc[train_idx] X_valid = train[columns].iloc[valid_idx] y_train = target.iloc[train_idx] y_valid = target.iloc[valid_idx] model = clone(pipeline) model.fit(X_train, y_train) valid_preds = model.predict_proba(X_valid)[:, 1] test_preds += model.predict_proba(test[columns])[:, 1] / len(SPLITS) oof_preds[valid_idx] = valid_preds scores[fold] = roc_auc_score(y_valid, valid_preds) end = time.time() temp = np.sort(scores) stats = np.array([np.mean(temp[:3]), np.mean(scores), np.median(scores), np.mean(temp[-3:])]) return (np.array([*scores, *stats]), test_preds) cv_scores = dict() cv_scores['Scheme'] = [*[f'Split {i}' for i in range(len(SPLITS))], *['Worst 3', 'Average', 'Median', 'Best 3']] model = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(splitter='random', random_state=RANDOM_SEED), random_state=RANDOM_SEED) pipeline = make_pipeline(RobustScaler(), model) scores, preds = score_pipeline(pipeline) cv_scores[f'AdaboostExtraTree'] = scores
code
74054242/cell_13
[ "text_html_output_1.png" ]
from warnings import simplefilter import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) df_week.weekly_ret.plot(kind='line', figsize=(12, 6))
code
74054242/cell_9
[ "image_output_1.png" ]
from warnings import simplefilter import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week.head()
code
74054242/cell_23
[ "text_plain_output_1.png" ]
from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from warnings import simplefilter import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() plt.figure(figsize=(12, 6)) orig = plt.plot(udiff, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation') plt.title('Rolling Mean & Standard Deviation') plt.legend(loc='best') plt.show(block=False) dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']) for key, value in dftest[4].items(): dfoutput['Critical Value ({0})'.format(key)] = value dfoutput from statsmodels.graphics.tsaplots import plot_acf # the autocorrelation chart provides just the correlation at increasing lags fig, ax = plt.subplots(figsize=(12,5)) plot_acf(udiff.values, lags=10, ax=ax) plt.show() from statsmodels.graphics.tsaplots import plot_pacf fig, ax = plt.subplots(figsize=(12, 5)) plot_pacf(udiff.values, lags=10, ax=ax) plt.show()
code
74054242/cell_26
[ "image_output_1.png" ]
from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from statsmodels.tsa.arima_model import ARMA from warnings import simplefilter import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() plt.figure(figsize=(12, 6)) orig = plt.plot(udiff, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation') plt.title('Rolling Mean & Standard Deviation') plt.legend(loc='best') plt.show(block=False) dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']) for key, value in dftest[4].items(): dfoutput['Critical Value ({0})'.format(key)] = value dfoutput from statsmodels.graphics.tsaplots import plot_acf # the autocorrelation chart provides just the correlation at increasing lags fig, ax = plt.subplots(figsize=(12,5)) plot_acf(udiff.values, lags=10, ax=ax) plt.show() from statsmodels.graphics.tsaplots import plot_pacf fig, ax = plt.subplots(figsize=(12,5)) plot_pacf(udiff.values, lags=10, ax=ax) plt.show() from statsmodels.tsa.arima_model import ARMA ar1 = ARMA(tuple(udiff.values), (3, 1)).fit() ar1.summary()
code
74054242/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from warnings import simplefilter import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.head()
code
74054242/cell_19
[ "text_html_output_1.png" ]
from warnings import simplefilter import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']) for key, value in dftest[4].items(): dfoutput['Critical Value ({0})'.format(key)] = value dfoutput
code
74054242/cell_1
[ "text_plain_output_1.png" ]
from warnings import simplefilter import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score print('The Pacific Time', time.strftime('%H:%M:%S')) import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74054242/cell_7
[ "image_output_1.png" ]
from warnings import simplefilter import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) print(df.shape) df.head()
code
74054242/cell_18
[ "image_output_1.png" ]
from warnings import simplefilter import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() plt.figure(figsize=(12, 6)) orig = plt.plot(udiff, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label='Rolling Std Deviation') plt.title('Rolling Mean & Standard Deviation') plt.legend(loc='best') plt.show(block=False)
code
74054242/cell_28
[ "image_output_1.png" ]
from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from statsmodels.tsa.arima_model import ARMA from warnings import simplefilter import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() plt.figure(figsize=(12, 6)) orig = plt.plot(udiff, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation') plt.title('Rolling Mean & Standard Deviation') plt.legend(loc='best') plt.show(block=False) dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']) for key, value in dftest[4].items(): dfoutput['Critical Value ({0})'.format(key)] = value dfoutput from statsmodels.graphics.tsaplots import plot_acf # the autocorrelation chart provides just the correlation at increasing lags fig, ax = plt.subplots(figsize=(12,5)) plot_acf(udiff.values, lags=10, ax=ax) plt.show() from statsmodels.graphics.tsaplots import plot_pacf fig, ax = plt.subplots(figsize=(12,5)) plot_pacf(udiff.values, lags=10, ax=ax) plt.show() from statsmodels.tsa.arima_model import ARMA ar1 = ARMA(tuple(udiff.values), (3, 1)).fit() ar1.summary() plt.figure(figsize=(12, 8)) plt.plot(udiff.values, color='blue') preds = ar1.fittedvalues plt.plot(preds, color='red') plt.show()
code
74054242/cell_14
[ "text_html_output_1.png" ]
from warnings import simplefilter import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) udiff.head()
code
74054242/cell_22
[ "image_output_1.png" ]
from statsmodels.graphics.tsaplots import plot_acf from warnings import simplefilter import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statsmodels.api as sm import time import numpy as np import pandas as pd import time pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) from sklearn.utils import shuffle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) simplefilter(action='ignore', category=UserWarning) pd.options.mode.chained_assignment = None from math import radians, cos, sin, asin, sqrt import seaborn as sns import matplotlib.pyplot as plt from haversine import haversine, Unit from scipy import stats from scipy.stats import spearmanr from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn.metrics import mean_squared_error from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestRegressor import lightgbm as lgbm import xgboost as xgb from sklearn.ensemble import VotingRegressor from sklearn.metrics import roc_auc_score import os df = pd.read_csv('/kaggle/input/aapl10y/AAPL10Y.csv') df['date'] = pd.to_datetime(df['date']) df.sort_values('date', inplace=True) df.set_index('date', inplace=True) df_week = df.resample('w').mean() df_week = df_week[['close']] df_week['weekly_ret'] = np.log(df_week['close']).diff() df_week.dropna(inplace=True) udiff = df_week.drop(['close'], axis=1) rolmean = udiff.rolling(20).mean() rolstd = udiff.rolling(20).std() plt.figure(figsize=(12, 6)) orig = plt.plot(udiff, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation') plt.title('Rolling Mean & Standard Deviation') plt.legend(loc='best') plt.show(block=False) dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']) for key, value in dftest[4].items(): dfoutput['Critical Value ({0})'.format(key)] = value dfoutput from statsmodels.graphics.tsaplots import plot_acf fig, ax = plt.subplots(figsize=(12, 5)) plot_acf(udiff.values, lags=10, ax=ax) plt.show()
code
32068244/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=5) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape)
code
32068244/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show()
code
32068244/cell_4
[ "image_output_1.png" ]
data.head()
code
32068244/cell_6
[ "text_plain_output_1.png" ]
data.describe()
code
32068244/cell_11
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X, y) y_pred = knn.predict(X) scores.append(metrics.accuracy_score(y, y_pred)) plt.plot(k_range, scores) plt.xlabel('Value of k for KNN') plt.ylabel('Accuracy Score') plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors') plt.show()
code
32068244/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068244/cell_7
[ "image_output_1.png" ]
data['Species'].value_counts()
code
32068244/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show()
code
32068244/cell_15
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X, y) y_pred = knn.predict(X) scores.append(metrics.accuracy_score(y, y_pred)) logreg = LogisticRegression() logreg.fit(X, y) y_pred = logreg.predict(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=5) k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(y_test, y_pred)) logreg = LogisticRegression() logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) print(metrics.accuracy_score(y_test, y_pred))
code
32068244/cell_16
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X, y) y_pred = knn.predict(X) scores.append(metrics.accuracy_score(y, y_pred)) logreg = LogisticRegression() logreg.fit(X, y) y_pred = logreg.predict(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=5) k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(y_test, y_pred)) knn = KNeighborsClassifier(n_neighbors=12) knn.fit(X, y) knn.predict([[6, 3, 4, 2]])
code
32068244/cell_14
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X, y) y_pred = knn.predict(X) scores.append(metrics.accuracy_score(y, y_pred)) logreg = LogisticRegression() logreg.fit(X, y) y_pred = logreg.predict(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=5) k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(y_test, y_pred)) plt.plot(k_range, scores) plt.xlabel('Value of k for KNN') plt.ylabel('Accuracy Score') plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors') plt.show()
code
32068244/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] print(X.shape) print(y.shape)
code
32068244/cell_12
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import seaborn as sns tmp = data.drop('Id', axis=1) g = sns.pairplot(tmp, hue='Species', markers='+') plt.show() g = sns.violinplot(y='Species', x='SepalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='SepalWidthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalLengthCm', data=data, inner='quartile') plt.show() g = sns.violinplot(y='Species', x='PetalWidthCm', data=data, inner='quartile') plt.show() X = data.drop(['Id', 'Species'], axis=1) y = data['Species'] k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X, y) y_pred = knn.predict(X) scores.append(metrics.accuracy_score(y, y_pred)) logreg = LogisticRegression() logreg.fit(X, y) y_pred = logreg.predict(X) print(metrics.accuracy_score(y, y_pred))
code
32068244/cell_5
[ "text_plain_output_1.png" ]
data.info()
code
33112043/cell_21
[ "text_plain_output_1.png" ]
from keras import layers from keras import losses from keras import models from keras import optimizers from keras.utils import to_categorical import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_label = train_data['label'] train_data = train_data.drop('label', axis=1) train_label_to_cat = to_categorical(train_label) train_label_to_cat.shape train_data.shape train_data = train_data.values.reshape(-1, 28, 28, 1) train_data.shape model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.summary() model.add(layers.Flatten()) model.add(layers.Dense(1024, activation='relu')) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.categorical_crossentropy, metrics=['accuracy']) model.fit(train_data, train_label_to_cat, epochs=60)
code
33112043/cell_9
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_label = train_data['label'] train_label_to_cat = to_categorical(train_label) train_label_to_cat.shape
code
33112043/cell_19
[ "text_plain_output_1.png" ]
from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.summary() model.add(layers.Flatten()) model.add(layers.Dense(1024, activation='relu')) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.summary()
code
33112043/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd from keras import optimizers from keras import models from keras import layers from keras import losses from keras.utils import to_categorical import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33112043/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_data.head(2)
code
33112043/cell_17
[ "text_plain_output_1.png" ]
from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.summary()
code
33112043/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_data = train_data.drop('label', axis=1) train_data.shape train_data = train_data.values.reshape(-1, 28, 28, 1) train_data.shape
code
33112043/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_data = train_data.drop('label', axis=1) train_data.shape
code
33112043/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') test_data.head(2)
code
105210284/cell_33
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') chosen_openings = rapid_games[rapid_games.appearances >= 20].index.unique() chosen_games = rapid_games[rapid_games.appearances >= 20] chosen_games win_rate_all = rapid_games.winner.value_counts(normalize=True) * 100 fig, ax = plt.subplots(figsize=(10, 6)) patches, texts, pcts = ax.pie(x=win_rate_all, labels=win_rate_all.index, autopct='%.1f', wedgeprops={'linewidth': 3.0, 'edgecolor': 'white'}, startangle=90, counterclock=False) for i, patch in enumerate(patches): texts[i].set_color(patch.get_facecolor()) plt.setp(pcts, color='white') plt.setp(texts, fontweight=600) ax.set_title('General win rate', fontsize=18) plt.tight_layout() plt.show()
code
105210284/cell_28
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') plt.subplots(figsize=[12, 5]) sns.violinplot(data=appearances, x=appearances.index, color='cornflowerblue') plt.title('Violin Plot of Openings by Apperances') plt.ylabel('openings') plt.subplots(figsize=[12, 5]) sns.boxenplot(data=appearances, x=appearances.index, color='cornflowerblue') plt.title('Boxen plot of Openings by Apperances') plt.ylabel('openings') plt.subplots(figsize=[12, 5]) sns.stripplot(data=appearances, x=appearances.index, color='cornflowerblue') plt.title('Strip plot of Openings by Apperances') plt.ylabel('openings') plt.show()
code
105210284/cell_15
[ "text_html_output_1.png" ]
import pandas as pd pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.describe()
code
105210284/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns
code
105210284/cell_43
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') chosen_openings = rapid_games[rapid_games.appearances >= 20].index.unique() chosen_games = rapid_games[rapid_games.appearances >= 20] chosen_games win_rate_all = rapid_games.winner.value_counts(normalize=True) * 100 fig, ax = plt.subplots(figsize=(10,6)) patches, texts, pcts = ax.pie(x=win_rate_all, labels=win_rate_all.index, autopct='%.1f', wedgeprops={'linewidth': 3.0, 'edgecolor': 'white'}, startangle=90, counterclock=False) # Format the pie chart for i, patch in enumerate(patches): texts[i].set_color(patch.get_facecolor()) plt.setp(pcts, color='white') plt.setp(texts, fontweight=600) ax.set_title('General win rate', fontsize=18) plt.tight_layout() plt.show() opening_win_rate = chosen_games.groupby('opening_name').winner.value_counts(normalize=True) * 100 top_appearances = appearances.sort_values(ascending=False)[:10].to_frame() top_appearances = top_appearances.merge(opening_win_rate.unstack(), how='inner', on='opening_name') # Top appearances fig, ax = plt.subplots(figsize=(15, 9)) ax2 = ax.twinx() # Plot charts sns.barplot(data=top_appearances, x=top_appearances.index, y='appearances', ax=ax, palette="Greys") sns.lineplot(data=top_appearances, x=top_appearances.index, y='white', ax=ax2, label='white_win_rate', color='#e4e4a1') sns.lineplot(data=top_appearances, x=top_appearances.index, y='black', ax=ax2, label='black_win_rate', color='#484848') # Add title, labels ax.set_title("Top 10 Most Played Openings at All Level", fontsize=20) ax.set_xlabel("Opening", fontsize=14) ax.set_ylabel("Apperances", fontsize=14) ax2.set_ylabel("Win rate (%)", fontsize=14) ax.set_xticklabels(labels=top_appearances.index, rotation=60, ha='right') # rotate xticks fig.tight_layout() fig.savefig("most_played.png", bbox_inches="tight") # save the barplot opening_win_rate_white = opening_win_rate[:, 'white'] opening_win_rate_white.name = 'white_win_rate' opening_win_rate_white = opening_win_rate_white.to_frame() opening_win_rate_white.insert(loc=len(opening_win_rate_white.columns), column='appearances', value=chosen_games.index.value_counts()[opening_win_rate_white.index].values) plt.subplots(figsize=(12, 4)) sns.scatterplot(data=opening_win_rate_white, x='white_win_rate', y='appearances', hue='white_win_rate', size='appearances', palette='crest', legend=False) plt.title('Apperances by White Win Rate at All Level', fontsize=14) plt.show()
code
105210284/cell_24
[ "text_html_output_1.png" ]
import pandas as pd pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') chosen_openings = rapid_games[rapid_games.appearances >= 20].index.unique() chosen_games = rapid_games[rapid_games.appearances >= 20] chosen_games
code
105210284/cell_14
[ "text_html_output_1.png" ]
import pandas as pd pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.head()
code
105210284/cell_22
[ "text_html_output_1.png" ]
import pandas as pd pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') rapid_games
code
105210284/cell_37
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', 10) games = pd.read_csv('../input/chess-games/games.csv') games.columns games.loc[:, 'first_move'] = games.moves.map(lambda x: x.split(' ')[0]) chosen_cols = ['winner', 'increment_code', 'white_rating', 'black_rating', 'opening_name', 'first_move'] increment_code_values = ['10+0', '15+0', '15+15'] rapid_games = games[games.increment_code.isin(increment_code_values)][chosen_cols] rapid_games.set_index('opening_name', inplace=True) appearances = rapid_games.groupby('opening_name').winner.count().rename('appearances') rapid_games = rapid_games.merge(appearances, how='left', on='opening_name') chosen_openings = rapid_games[rapid_games.appearances >= 20].index.unique() chosen_games = rapid_games[rapid_games.appearances >= 20] chosen_games win_rate_all = rapid_games.winner.value_counts(normalize=True) * 100 fig, ax = plt.subplots(figsize=(10,6)) patches, texts, pcts = ax.pie(x=win_rate_all, labels=win_rate_all.index, autopct='%.1f', wedgeprops={'linewidth': 3.0, 'edgecolor': 'white'}, startangle=90, counterclock=False) # Format the pie chart for i, patch in enumerate(patches): texts[i].set_color(patch.get_facecolor()) plt.setp(pcts, color='white') plt.setp(texts, fontweight=600) ax.set_title('General win rate', fontsize=18) plt.tight_layout() plt.show() opening_win_rate = chosen_games.groupby('opening_name').winner.value_counts(normalize=True) * 100 top_appearances = appearances.sort_values(ascending=False)[:10].to_frame() top_appearances = top_appearances.merge(opening_win_rate.unstack(), how='inner', on='opening_name') fig, ax = plt.subplots(figsize=(15, 9)) ax2 = ax.twinx() sns.barplot(data=top_appearances, x=top_appearances.index, y='appearances', ax=ax, palette='Greys') sns.lineplot(data=top_appearances, x=top_appearances.index, y='white', ax=ax2, label='white_win_rate', color='#e4e4a1') sns.lineplot(data=top_appearances, x=top_appearances.index, y='black', ax=ax2, label='black_win_rate', color='#484848') ax.set_title('Top 10 Most Played Openings at All Level', fontsize=20) ax.set_xlabel('Opening', fontsize=14) ax.set_ylabel('Apperances', fontsize=14) ax2.set_ylabel('Win rate (%)', fontsize=14) ax.set_xticklabels(labels=top_appearances.index, rotation=60, ha='right') fig.tight_layout() fig.savefig('most_played.png', bbox_inches='tight')
code
33111160/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import pandas as pd import glob import os def list_columns_in_folder(file_path): """List out every column for every file in a folder""" for dirname, _, filenames in os.walk(file_path): for filename in filenames: df = pd.read_csv(os.path.join(dirname, filename)) columns = df.columns.values[1:] list_columns_in_folder('/kaggle/input/aipowered-literature-review-csvs/kaggle/working/Key Scientific Questions/')
code
33111160/cell_3
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import pandas as pd import glob import os def list_columns_in_folder(file_path): """List out every column for every file in a folder""" for dirname, _, filenames in os.walk(file_path): for filename in filenames: df = pd.read_csv(os.path.join(dirname, filename)) columns = df.columns.values[1:] list_columns_in_folder('/kaggle/input/aipowered-literature-review-csvs/kaggle/working/Risk Factors/')
code
329777/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/StateNames.csv') df2 = pd.read_csv('../input/NationalNames.csv') df[df['Name'] == 'Mary']
code
329777/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression import seaborn as sns import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17132170/cell_9
[ "text_plain_output_1.png" ]
from clustergrammer2 import net import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape net.load_df(df.round(2)) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.widget()
code
17132170/cell_6
[ "text_plain_output_1.png" ]
from clustergrammer2 import net show_widget = False from clustergrammer2 import net if show_widget == False: print('\n-----------------------------------------------------') print('>>> <<<') print('>>> Please set show_widget to True to see widgets <<<') print('>>> <<<') print('-----------------------------------------------------\n') delattr(net, 'widget_class')
code
17132170/cell_2
[ "text_plain_output_1.png" ]
from IPython.display import HTML import warnings from IPython.display import HTML import warnings warnings.filterwarnings('ignore') HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/9vqLO6McFwQ?rel=0&amp;controls=0&amp;showinfo=0" frameborder="0" allowfullscreen></iframe>')
code
17132170/cell_11
[ "text_html_output_1.png" ]
from clustergrammer2 import net import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape net.load_df(df.round(2)) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.widget() net.load_df(df) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.normalize(axis='row', norm_type='zscore') df = net.export_df().round(2) net.load_df(df) net.widget()
code
17132170/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape
code
17132170/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
90148768/cell_2
[ "text_plain_output_1.png" ]
import numpy as np from ast import increment_lineno import numpy as np import os import pandas as pd import matplotlib.pyplot as plt dataset = np.loadtxt('../input/ex1data1/ex1data1.txt', delimiter=',') X = dataset[:, 0] Y = dataset[:, 1] m = Y.size print(m)
code
90148768/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np from ast import increment_lineno import numpy as np import os import pandas as pd import matplotlib.pyplot as plt dataset = np.loadtxt('../input/ex1data1/ex1data1.txt', delimiter=',') X = dataset[:, 0] Y = dataset[:, 1] m = Y.size X = dataset[:, 0] Y = dataset[:, 1] plt.scatter(X, Y, marker='x', color='red') plt.ylabel('profit in $10,000s') plt.xlabel('population of city in 10,000s')
code
18149936/cell_13
[ "image_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] df_reconstructed.head()
code
18149936/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1, 3, figsize=(20, 4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2])
code
18149936/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df.head()
code
18149936/cell_20
[ "text_html_output_1.png" ]
from pandas.plotting import autocorrelation_plot from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller, kpss import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] from statsmodels.tsa.stattools import adfuller, kpss df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) result = adfuller(df.value.values, autolag='AIC') result = kpss(df.value.values, regression='c') from statsmodels.tsa.seasonal import seasonal_decompose df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') detrended = df.value.values - result_mul.trend from pandas.plotting import autocorrelation_plot df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv') plt.rcParams.update({'figure.figsize': (9, 5), 'figure.dpi': 120}) autocorrelation_plot(df.value.tolist())
code
18149936/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) fig, axes = plt.subplots(1, 2, figsize=(20, 7), dpi=80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18) axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show()
code
18149936/cell_11
[ "text_html_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') result_mul.plot().suptitle('Multiplicative Decompose', fontsize=22) result_add.plot().suptitle('Additive Decompose', fontsize=22) plt.show()
code
18149936/cell_18
[ "image_output_2.png", "image_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller, kpss import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] from statsmodels.tsa.stattools import adfuller, kpss df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) result = adfuller(df.value.values, autolag='AIC') result = kpss(df.value.values, regression='c') from statsmodels.tsa.seasonal import seasonal_decompose df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') detrended = df.value.values - result_mul.trend print(df.value.values, result_mul.trend) plt.plot(detrended) plt.title('Drug Sales detrended by subtracting the trend component', fontsize=16)
code
18149936/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller, kpss import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] from statsmodels.tsa.stattools import adfuller, kpss df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) result = adfuller(df.value.values, autolag='AIC') print(f'ADF Statistic: {result[0]}') print(f'p-value: {result[1]}') for key, value in result[4].items(): print('Critial Values:') print(f' {key}, {value}') result = kpss(df.value.values, regression='c') print('\nKPSS Statistic: %f' % result[0]) print('p-value: %f' % result[1]) for key, value in result[3].items(): print('Critial Values:') print(f' {key}, {value}')
code
18149936/cell_14
[ "image_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] df_reconstructed['seas'].iloc[0] * df_reconstructed['trend'].iloc[0] * df_reconstructed['resid'].iloc[0]
code
18149936/cell_22
[ "text_plain_output_1.png" ]
from pandas.plotting import autocorrelation_plot from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller, kpss import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) for i, y in enumerate(years): if i > 0: plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) # Draw Plot # plt.subplot(nrows, ncols) fig, axes = plt.subplots(1, 2, figsize=(20,7), dpi= 80) sns.boxplot(x='year', y='value', data=df, ax=axes[0]) sns.boxplot(x='month', y='value', data=df.loc[~df.year.isin([1991, 2008]), :]) # Set Title axes[0].set_title('Year-wise Box Plot\n(The Trend)', fontsize=18); axes[1].set_title('Month-wise Box Plot\n(The Seasonality)', fontsize=18) plt.show() fig, axes = plt.subplots(1,3, figsize=(20,4), dpi=100) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/guinearice.csv', parse_dates=['date'], index_col='date').plot(title='Trend Only', legend=False, ax=axes[0]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/sunspotarea.csv', parse_dates=['date'], index_col='date').plot(title='Seasonality Only', legend=False, ax=axes[1]) pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/AirPassengers.csv', parse_dates=['date'], index_col='date').plot(title='Trend and Seasonality', legend=False, ax=axes[2]) from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') result_add = seasonal_decompose(df['value'], model='additive', extrapolate_trend='freq') df_reconstructed = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1) df_reconstructed.columns = ['seas', 'trend', 'resid', 'actual_values'] from statsmodels.tsa.stattools import adfuller, kpss df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) result = adfuller(df.value.values, autolag='AIC') result = kpss(df.value.values, regression='c') from statsmodels.tsa.seasonal import seasonal_decompose df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') detrended = df.value.values - result_mul.trend from pandas.plotting import autocorrelation_plot df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv') plt.rcParams.update({'figure.figsize': (9, 5), 'figure.dpi': 120}) df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date'], index_col='date') result_mul = seasonal_decompose(df['value'], model='multiplicative', extrapolate_trend='freq') deseasonalized = df.value.values / result_mul.seasonal plt.plot(deseasonalized) plt.title('Drug Sales Deseasonalized', fontsize=16) plt.plot()
code
18149936/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/a10.csv', parse_dates=['date']) df['year'] = [d.year for d in df.date] df['month'] = [d.month for d in df.date] years = df['year'].unique() np.random.seed(100) mycolors = np.random.choice(list(mpl.colors.XKCD_COLORS.keys()), len(years.tolist()), replace=False) plt.figure(figsize=(16, 12), dpi=80) for i, y in enumerate(years): if i > 0: plt.plot('month', 'value', data=df.loc[df.year == y, :], color=mycolors[i], label=y) plt.text(df.loc[df.year == y, :].shape[0] - 0.9, df.loc[df.year == y, 'value'][-1:].values[0], y, fontsize=12, color=mycolors[i]) plt.gca().set(xlim=(-0.3, 11), ylim=(2, 30), ylabel='$Drug Sales$', xlabel='$Month$') plt.yticks(fontsize=12, alpha=0.7) plt.title('Seasonal Plot of Drug Sales Time Series', fontsize=20) plt.show()
code
16147938/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True)
code
16147938/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum()
code
16147938/cell_4
[ "image_output_1.png" ]
import os import os print(os.listdir('../input/churn-prediction'))
code
16147938/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True) df['tenure_range'] = pd.cut(df.tenure, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75], right=True) df.dropna(inplace=True) df = df.drop('tenure', axis=1) gender_dis = (df['gender'].value_counts() * 100 / len(df)).plot(kind='bar', stacked=True) gender_dis.set_ylabel('% Customers') gender_dis.set_xlabel('Gender') gender_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in gender_dis.patches: totals.append(i.get_width()) total = sum(totals) for i in gender_dis.patches: gender_dis.text(i.get_x() + 0.15, i.get_height() + 3.9, str(round(i.get_height() / total, 1)) + '%', fontsize=12, color='black')
code
16147938/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.head()
code
16147938/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True) df['tenure_range'] = pd.cut(df.tenure, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75], right=True) df.dropna(inplace=True) df = df.drop('tenure', axis=1) gender_dis=(df['gender'].value_counts()*100/len(df)).plot(kind='bar',stacked=True) gender_dis.set_ylabel('% Customers') gender_dis.set_xlabel('Gender') gender_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in gender_dis.patches: totals.append(i.get_width()) total = sum(totals) #to print the values on top of the bars for i in gender_dis.patches: gender_dis.text(i.get_x()+.15, i.get_height()+3.9, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='black') sc_dis=(df['SeniorCitizen'].value_counts()*100/len(df)).plot(kind='bar',stacked=True) sc_dis.set_ylabel('% Customers') sc_dis.set_xlabel('Senior Citizen') sc_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in sc_dis.patches: totals.append(i.get_width()) total = sum(totals) for i in sc_dis.patches: sc_dis.text(i.get_x()+.15, i.get_height()+5, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='black') sc_dependents = df.groupby(['SeniorCitizen', 'Dependents']).size().unstack() sc_dependents = (sc_dependents.T * 100 / sc_dependents.T.sum()).T.plot(kind='bar', stacked=True) sc_dependents.set_ylabel('% Customers') sc_dependents.set_xlabel('Senior Citizens') sc_dependents.yaxis.set_major_formatter(mtick.PercentFormatter()) for i in sc_dependents.patches: width, height = (i.get_width(), i.get_height()) x, y = i.get_xy() sc_dependents.annotate('{:.0f}%'.format(height), (i.get_x() + 0.4 * width, i.get_y() + 0.3 * height), color='black')
code
16147938/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True) df['tenure_range'] = pd.cut(df.tenure, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75], right=True) df.dropna(inplace=True) df = df.drop('tenure', axis=1) gender_dis=(df['gender'].value_counts()*100/len(df)).plot(kind='bar',stacked=True) gender_dis.set_ylabel('% Customers') gender_dis.set_xlabel('Gender') gender_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in gender_dis.patches: totals.append(i.get_width()) total = sum(totals) #to print the values on top of the bars for i in gender_dis.patches: gender_dis.text(i.get_x()+.15, i.get_height()+3.9, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='black') sc_dis = (df['SeniorCitizen'].value_counts() * 100 / len(df)).plot(kind='bar', stacked=True) sc_dis.set_ylabel('% Customers') sc_dis.set_xlabel('Senior Citizen') sc_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in sc_dis.patches: totals.append(i.get_width()) total = sum(totals) for i in sc_dis.patches: sc_dis.text(i.get_x() + 0.15, i.get_height() + 5, str(round(i.get_height() / total, 1)) + '%', fontsize=12, color='black')
code
16147938/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['Churn'].value_counts()
code
16147938/cell_32
[ "text_plain_output_1.png" ]
import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True) df['tenure_range'] = pd.cut(df.tenure, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75], right=True) df.dropna(inplace=True) df = df.drop('tenure', axis=1) gender_dis=(df['gender'].value_counts()*100/len(df)).plot(kind='bar',stacked=True) gender_dis.set_ylabel('% Customers') gender_dis.set_xlabel('Gender') gender_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in gender_dis.patches: totals.append(i.get_width()) total = sum(totals) #to print the values on top of the bars for i in gender_dis.patches: gender_dis.text(i.get_x()+.15, i.get_height()+3.9, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='black') sc_dis=(df['SeniorCitizen'].value_counts()*100/len(df)).plot(kind='bar',stacked=True) sc_dis.set_ylabel('% Customers') sc_dis.set_xlabel('Senior Citizen') sc_dis.yaxis.set_major_formatter(mtick.PercentFormatter()) totals = [] for i in sc_dis.patches: totals.append(i.get_width()) total = sum(totals) for i in sc_dis.patches: sc_dis.text(i.get_x()+.15, i.get_height()+5, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='black') sc_dependents=df.groupby(['SeniorCitizen','Dependents']).size().unstack() sc_dependents=(sc_dependents.T*100/sc_dependents.T.sum()).T.plot(kind='bar',stacked=True) sc_dependents.set_ylabel('% Customers') sc_dependents.set_xlabel('Senior Citizens') sc_dependents.yaxis.set_major_formatter(mtick.PercentFormatter()) for i in sc_dependents.patches: width, height = i.get_width(), i.get_height() x, y =i.get_xy() sc_dependents.annotate('{:.0f}%'.format(height), (i.get_x()+.40*width, i.get_y()+.3*height), color = 'black') sc_dependents = df.groupby(['Partner', 'Dependents']).size().unstack() sc_dependents = (sc_dependents.T * 100 / sc_dependents.T.sum()).T.plot(kind='bar', stacked=True) sc_dependents.set_ylabel('% Customers') sc_dependents.set_xlabel('Partners') sc_dependents.yaxis.set_major_formatter(mtick.PercentFormatter()) for i in sc_dependents.patches: width, height = (i.get_width(), i.get_height()) x, y = i.get_xy() sc_dependents.annotate('{:.0f}%'.format(height), (i.get_x() + 0.4 * width, i.get_y() + 0.3 * height), color='black')
code
16147938/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape
code
16147938/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes df['TotalCharges'] = df.TotalCharges.convert_objects(convert_numeric=True) df['tenure_range'] = pd.cut(df.tenure, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75], right=True) df['tenure_range'].value_counts()
code
16147938/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/churn-prediction/Churn.csv') df.shape df.isna().sum() df.dtypes
code
88091003/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import StratifiedKFold, GroupKFold import numpy as np import os import pandas as pd SEED = 42 DATA_PATH = '../input/birdclef-2022/' AUDIO_PATH = '../input/birdclef-2022/train_audio' MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) NUM_WORKERS = 4 CLASSES = sorted(os.listdir(AUDIO_PATH)) NUM_CLASSES = len(CLASSES) class AudioParams: """ Parameters used for the audio data """ sr = 32000 duration = 5 n_mels = 224 fmin = 20 fmax = 16000 train = pd.read_csv('../input/birdclef-2022/train_metadata.csv') train['file_path'] = AUDIO_PATH + '/' + train['filename'] paths = train['file_path'].values Fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED) for n, (trn_index, val_index) in enumerate(Fold.split(train, train['primary_label'])): train.loc[val_index, 'kfold'] = int(n) train['kfold'] = train['kfold'].astype(int) train.to_csv('train_folds.csv', index=False) print(train.shape) train.head()
code