hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4af41cfef9d8b16f0a5c3847faf9bec55b3fae64
216,612
ipynb
Jupyter Notebook
notebooks/xgb-lgb-cb-blending.ipynb
feeeper/zindi-expresso-churn-prediction-challenge
39f14a47976984e45963727aad05c79e11dfd0a4
[ "MIT" ]
null
null
null
notebooks/xgb-lgb-cb-blending.ipynb
feeeper/zindi-expresso-churn-prediction-challenge
39f14a47976984e45963727aad05c79e11dfd0a4
[ "MIT" ]
null
null
null
notebooks/xgb-lgb-cb-blending.ipynb
feeeper/zindi-expresso-churn-prediction-challenge
39f14a47976984e45963727aad05c79e11dfd0a4
[ "MIT" ]
null
null
null
43.982132
1,686
0.524001
[ [ [ "import re\nfrom typing import List\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm.notebook import tqdm\n\nimport optuna\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler, PolynomialFeatures\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom catboost import CatBoostClassifier\n\ntqdm.pandas()", "_____no_output_____" ], [ "# XGB\n# fillna числовых колонок как средние значения по соотв колонке,\n# TENURE & REGION OneHotEncoded \n# StScaler on whole dataset \n# target endocding by region and tenure\n\n#import data\ntrain = pd.read_csv('./data/Train_folds.zip')\ntest= pd.read_csv('./data/Test.zip')\nsubmission = pd.read_csv('./data/SampleSubmission.csv')\n\ncat_cols = [\n 'REGION',\n 'TENURE',\n 'TOP_PACK'\n]\n\nnum_cols = [\n 'MONTANT',\n 'FREQUENCE_RECH',\n 'REVENUE',\n 'ARPU_SEGMENT',\n 'FREQUENCE',\n 'DATA_VOLUME',\n 'ON_NET', \n 'ORANGE',\n 'TIGO',\n 'ZONE1',\n 'ZONE2',\n 'REGULARITY',\n 'FREQ_TOP_PACK',\n]\n\ntarget = 'CHURN'\n\nmapping = {\n 'D 3-6 month': 1,\n 'E 6-9 month': 2,\n 'F 9-12 month': 3,\n 'G 12-15 month': 4,\n 'H 15-18 month': 5,\n 'I 18-21 month': 6,\n 'J 21-24 month': 7,\n 'K > 24 month': 8,\n 'OTHER': 9\n}\n\ntrain['TOP_PACK'] = train['TOP_PACK'].fillna('OTHER')\ntest['TOP_PACK'] = test['TOP_PACK'].fillna('OTHER')\n\ntrain['TENURE'] = train['TENURE'].fillna('OTHER')\ntest['TENURE'] = test['TENURE'].fillna('OTHER')\ntrain['TENURE'] = train['TENURE'].map(mapping)\ntest['TENURE'] = test['TENURE'].map(mapping)\n\ntrain['REGION'] = train['REGION'].fillna('OTHER')\ntest['REGION'] = test['REGION'].fillna('OTHER')\n\nfor nc in tqdm(num_cols):\n mean = train[nc].mean()\n train[nc] = train[nc].fillna(mean)\n test[nc] = test[nc].fillna(mean)\n \ntrain.shape, test.shape", "_____no_output_____" ], [ "churn_by_tenure = pd.read_csv('./data/agg_by_tenure_churn.csv')\nchurn_by_tenure = churn_by_tenure.append(pd.DataFrame({'TENURE': [9], 'CHURN_mean': 0, 'CHURN_median': 0}))\n\ntrain = pd.merge(train, churn_by_tenure[['TENURE', 'CHURN_mean']], left_on='TENURE', right_on='TENURE', how='left')\ntrain = train.rename({'CHURN_mean': 'MEAN_CHURN_BY_TENURE'}, axis='columns')\n\ntest = pd.merge(test, churn_by_tenure[['TENURE', 'CHURN_mean']], left_on='TENURE', right_on='TENURE', how='left')\ntest = test.rename({'CHURN_mean': 'MEAN_CHURN_BY_TENURE'}, axis='columns')\n\ntrain.shape, test.shape", "_____no_output_____" ], [ "churn_by_region = pd.read_csv('./data/agg_by_region_churn.csv')\n\nvc = train[train['REGION'] == 'OTHER']['CHURN'].value_counts()\nchurn_by_region_mean = vc[1]/(vc[0]+vc[1])\nchurn_by_region = churn_by_region.append(pd.DataFrame({'REGION': ['OTHER'], 'CHURN_mean': churn_by_region_mean, 'CHURN_median': 0}))\n\ntrain = pd.merge(train, churn_by_region[['REGION', 'CHURN_mean']], left_on='REGION', right_on='REGION', how='left')\ntrain = train.rename({'CHURN_mean': 'MEAN_CHURN_BY_REGION'}, axis='columns')\n\ntest = pd.merge(test, churn_by_region[['REGION', 'CHURN_mean']], left_on='REGION', right_on='REGION', how='left')\ntest = test.rename({'CHURN_mean': 'MEAN_CHURN_BY_REGION'}, axis='columns')\n\ntrain.shape, test.shape", "_____no_output_____" ], [ "# churn_by_top_pack = train[['TOP_PACK', 'CHURN']].groupby('TOP_PACK').agg({'CHURN': ['mean', 'median']})\n# churn_by_top_pack.columns = ['_'.join(col).strip() for col in churn_by_top_pack.columns.values]\n# churn_by_top_pack_mean = np.mean(train[train['TOP_PACK'] == 'OTHER']['CHURN'])\n# churn_by_top_pack = churn_by_top_pack.reset_index()\n\n# d = {\n# 'TOP_PACK': ['OTHER'],\n# 'CHURN_mean': [churn_by_top_pack_mean],\n# 'CHURN_median': [0]\n# }\n\n# for tp in test['TOP_PACK'].unique():\n# if tp not in churn_by_top_pack.index:\n# d['TOP_PACK'].append(tp)\n# d['CHURN_mean'].append(churn_by_top_pack_mean)\n# d['CHURN_median'].append(0)\n \n# churn_by_top_pack = churn_by_top_pack.append(pd.DataFrame(d))\n# churn_by_top_pack.index = range(len(churn_by_top_pack))\n\n# train = pd.merge(train, churn_by_top_pack[['TOP_PACK', 'CHURN_mean']], left_on='TOP_PACK', right_on='TOP_PACK', how='left')\n# train = train.rename({'CHURN_mean': 'MEAN_CHURN_BY_TOP_PACK'}, axis='columns')\n\n# test = pd.merge(test, churn_by_top_pack[['TOP_PACK', 'CHURN_mean']], left_on='TOP_PACK', right_on='TOP_PACK', how='left')\n# test = test.rename({'CHURN_mean': 'MEAN_CHURN_BY_TOP_PACK'}, axis='columns')\n\n# train.shape, test.shape", "_____no_output_____" ], [ "# train['TOP_PACK'] = train['TOP_PACK'].fillna('OTHER')\n# test['TOP_PACK'] = test['TOP_PACK'].fillna('OTHER')\n\nchurn_by_top_pack = train[['TOP_PACK', 'CHURN']].groupby('TOP_PACK').agg({'CHURN': ['mean', 'median']})\nchurn_by_top_pack.columns = ['_'.join(col).strip() for col in churn_by_top_pack.columns.values]\nchurn_by_top_pack_mean = np.mean(train[train['TOP_PACK'] == 'OTHER']['CHURN'])\nchurn_by_top_pack = churn_by_top_pack.reset_index()\n\nd = {\n 'TOP_PACK': [],\n 'CHURN_mean': [],\n 'CHURN_median': []\n}\n\nfor tp in test['TOP_PACK'].unique():\n if tp not in churn_by_top_pack['TOP_PACK'].unique():\n d['TOP_PACK'].append(tp)\n d['CHURN_mean'].append(churn_by_top_pack_mean)\n d['CHURN_median'].append(0)\n \nchurn_by_top_pack = churn_by_top_pack.append(pd.DataFrame(d))\n\ntrain = pd.merge(train, churn_by_top_pack[['TOP_PACK', 'CHURN_mean']], left_on='TOP_PACK', right_on='TOP_PACK', how='left')\ntrain = train.rename({'CHURN_mean': 'MEAN_CHURN_BY_TOP_PACK'}, axis='columns')\n\ntest = pd.merge(test, churn_by_top_pack[['TOP_PACK', 'CHURN_mean']], left_on='TOP_PACK', right_on='TOP_PACK', how='left')\ntest = test.rename({'CHURN_mean': 'MEAN_CHURN_BY_TOP_PACK'}, axis='columns')\n\ntrain.shape, test.shape", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "useful_cols = [\n 'REGION',\n 'TENURE',\n # 'MRG', # constant\n 'TOP_PACK', # wtf column\n 'MONTANT',\n 'FREQUENCE_RECH',\n 'REVENUE',\n 'ARPU_SEGMENT',\n 'FREQUENCE',\n 'DATA_VOLUME',\n 'ON_NET', \n 'ORANGE',\n 'TIGO',\n 'ZONE1',\n 'ZONE2',\n 'REGULARITY',\n 'FREQ_TOP_PACK',\n 'MEAN_CHURN_BY_TENURE',\n 'MEAN_CHURN_BY_REGION',\n 'MEAN_CHURN_BY_TOP_PACK'\n]\n\nfor cat_col in cat_cols:\n encoder = OneHotEncoder(handle_unknown='ignore')\n unique_values = train[cat_col].unique()\n\n one_hot_encoded_cols = [f'{cat_col}_{i}' for i in range(len(unique_values))]\n \n ohe_df = pd.DataFrame(encoder.fit_transform(train[[cat_col]]).toarray(), columns=one_hot_encoded_cols)\n ohe_df.index = train.index\n train = train.drop(cat_col, axis=1)\n train = pd.concat([train, ohe_df], axis=1) \n print(f'[{cat_col}] xtrain transformed')\n\n ohe_df = pd.DataFrame(encoder.transform(test[[cat_col]]).toarray(), columns=one_hot_encoded_cols)\n ohe_df.index = test.index\n test = test.drop(cat_col, axis=1)\n test = pd.concat([test, ohe_df], axis=1)\n print(f'[{cat_col}] xtest transformed')\n \n useful_cols += one_hot_encoded_cols\n useful_cols.remove(cat_col)\n \nscaler = StandardScaler()\ntrain[num_cols] = scaler.fit_transform(train[num_cols])\ntest[num_cols] = scaler.transform(test[num_cols])", "[REGION] xtrain transformed\n[REGION] xtest transformed\n[TENURE] xtrain transformed\n[TENURE] xtest transformed\n[TOP_PACK] xtrain transformed\n[TOP_PACK] xtest transformed\n" ], [ "poly = PolynomialFeatures(degree=3, interaction_only=True, include_bias=False)\ntrain_poly = poly.fit_transform(train[num_cols])\ntest_poly = poly.fit_transform(test[num_cols])\n\npoly_columns = [f'poly_{x.replace(\" \", \"__\")}' for x in poly.get_feature_names(num_cols)] # [f\"poly_{i}\" for i in range(train_poly.shape[1])]\ndf_poly = pd.DataFrame(train_poly, columns=poly_columns, dtype=np.float32)\ndf_test_poly = pd.DataFrame(test_poly, columns=poly_columns, dtype=np.float32)\n\ntrain = pd.concat([train, df_poly], axis=1)\ntest = pd.concat([test, df_test_poly], axis=1)\n\nuseful_cols += poly_columns\n\ntrain.head()", "h:\\projects\\open-data-battle-2021\\venv\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead.\n warnings.warn(msg, category=FutureWarning)\n" ], [ "sum(train.memory_usage())/1024/1024", "_____no_output_____" ], [ "def optimize_floats(df: pd.DataFrame) -> pd.DataFrame:\n floats = df.select_dtypes(include=['float64']).columns.tolist()\n df[floats] = df[floats].apply(pd.to_numeric, downcast='float')\n return df\n\n\ndef optimize_ints(df: pd.DataFrame) -> pd.DataFrame:\n ints = df.select_dtypes(include=['int64']).columns.tolist()\n df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')\n return df\n\n\ndef optimize_objects(df: pd.DataFrame, datetime_features: List[str]) -> pd.DataFrame:\n for col in df.select_dtypes(include=['object']):\n if col not in datetime_features:\n num_unique_values = len(df[col].unique())\n num_total_values = len(df[col])\n if float(num_unique_values) / num_total_values < 0.5:\n df[col] = df[col].astype('category')\n else:\n df[col] = pd.to_datetime(df[col])\n return df\n\n\n\ndef optimize(df: pd.DataFrame, datetime_features: List[str] = []):\n return optimize_floats(optimize_ints(optimize_objects(df, datetime_features)))\n\ntrain = optimize(train, [])", "_____no_output_____" ], [ "sum(train.memory_usage())/1024/1024", "_____no_output_____" ], [ "train.to_csv('./data/train.full.csv', index=None)", "_____no_output_____" ], [ "final_test_predictions = []\nfinal_valid_predictions = {}\n\nscores = []\n\nfor fold in tqdm(range(5), 'folds'):\n xtrain = train[train['kfold'] != fold][useful_cols]\n ytrain = train[train['kfold'] != fold][target]\n \n xvalid = train[train['kfold'] == fold][useful_cols]\n yvalid = train[train['kfold'] == fold][target]\n \n valid_ids = train[train['kfold'] == fold]['user_id'].values.tolist()\n \n xtest = test[useful_cols]\n\n model = XGBClassifier(\n n_estimators=7000,\n n_jobs=-1,\n random_state=42,\n tree_method='gpu_hist',\n gpu_id=0,\n predictor=\"gpu_predictor\",\n# **{\n# 'learning_rate': 0.021655316351235455,\n# 'reg_lambda': 1.0883078718317323e-07,\n# 'reg_alpha': 0.00015120241798978777,\n# 'subsample': 0.7179552032665535,\n# 'colsample_bytree': 0.7408152702492675,\n# 'max_depth': 7\n# }\n **{\n 'learning_rate': 0.014461849398074727,\n 'reg_lambda': 0.08185850904776007,\n 'reg_alpha': 0.0001173486815850512,\n 'subsample': 0.7675905290878289,\n 'colsample_bytree': 0.2708299922996371,\n 'max_depth': 7\n }\n ) \n model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n \n preds_valid = model.predict_proba(xvalid)[:, 1]\n test_preds = model.predict_proba(xtest)[:, 1]\n final_test_predictions.append(test_preds)\n final_valid_predictions.update(dict(zip(valid_ids, preds_valid)))\n score = roc_auc_score(yvalid, preds_valid)\n scores.append(score)\n print(fold, score) \n\nprint(np.mean(scores), np.std(scores))\n\nfinal_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient=\"index\").reset_index()\nfinal_valid_predictions.columns = [\"id\", \"pred_1\"]\nfinal_valid_predictions.to_csv(\"./data/train_pred_1.csv\", index=False)\n\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_test_predictions), axis=1)\nsample_submission.columns = [\"id\", \"pred_1\"]\nsample_submission.to_csv(\"./data/test_pred_1.csv\", index=False)\n\n# final_predictions = []\n# scores = []\n\n# for fold in tqdm(range(5), 'folds'):\n# xtrain = train[train['kfold'] != fold][useful_cols]\n# ytrain = train[train['kfold'] != fold][target]\n \n# xvalid = train[train['kfold'] == fold][useful_cols]\n# yvalid = train[train['kfold'] == fold][target]\n \n# xtest = test[useful_cols]\n\n# model = XGBClassifier(\n# n_estimators=7000,\n# n_jobs=-1,\n# random_state=42,\n# tree_method='gpu_hist',\n# gpu_id=0,\n# predictor=\"gpu_predictor\",\n# # **{'learning_rate': 0.02981286840846979,\n# # 'reg_lambda': 2.1119486166373553e-06,\n# # 'reg_alpha': 0.09652271602187434,\n# # 'subsample': 0.2972622031653025,\n# # 'colsample_bytree': 0.3291720075373176,\n# # 'max_depth': 2}\n# # **{'learning_rate': 0.03359830446697092,\n# # 'reg_lambda': 0.0013493600461741606,\n# # 'reg_alpha': 0.0002728448162129134,\n# # 'subsample': 0.13373120583933554,\n# # 'colsample_bytree': 0.1386996438938067,\n# # 'max_depth': 7},\n# **{\n# 'learning_rate': 0.021655316351235455,\n# 'reg_lambda': 1.0883078718317323e-07,\n# 'reg_alpha': 0.00015120241798978777,\n# 'subsample': 0.7179552032665535,\n# 'colsample_bytree': 0.7408152702492675,\n# 'max_depth': 7\n# }\n# ) \n# model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n \n# preds_valid = model.predict_proba(xvalid)[:, 1]\n# test_preds = model.predict_proba(xtest)[:, 1]\n# final_predictions.append(test_preds)\n# score = roc_auc_score(yvalid, preds_valid)\n# scores.append(score)\n# print(fold, score)\n\n# print(np.mean(scores), np.std(scores))\n\n\n# 0.9314604358446612 0.000506497423655064", "_____no_output_____" ], [ "# xtrain = train[train['kfold'] != 1][useful_cols]\n# print(len(xtrain.columns), len(set(xtrain.columns)))\n# xtrain.columns.to_series()[np.isinf(xtrain).any()]", "557 557\n" ], [ "# xtrain[np.isinf(xtrain['poly_MONTANT__FREQUENCE_RECH__ZONE1'])][['MONTANT', 'FREQUENCE_RECH', 'ZONE1', 'poly_MONTANT__FREQUENCE_RECH__ZONE1']]\n\n\nxtrain[np.isinf(xtrain['poly_MONTANT__REVENUE__ARPU_SEGMENT'])][['MONTANT', 'REVENUE', 'ARPU_SEGMENT', 'poly_MONTANT__REVENUE__ARPU_SEGMENT']]", "_____no_output_____" ], [ "# train[train['MEAN_CHURN_BY_TOP_PACK'].isna()][['MEAN_CHURN_BY_TOP_PACK', 'CHURN']]\ntrain[[col for col in train.columns if not col.startswith('poly') and not col.startswith('TOP_PACK_') and not col.startswith('REGION_') and not col.startswith('TENURE_')]]", "_____no_output_____" ], [ "sample_submission.sample(7)", "_____no_output_____" ], [ "preds = np.mean(np.column_stack(final_predictions), axis=1)\n\nsubmission = pd.read_csv('./data/SampleSubmission.csv')\nsubmission.CHURN = preds\nsubmission.to_csv('./data/submission-xgb-proba-poly-features.csv', index=False)", "_____no_output_____" ], [ "final_test_predictions = []\nfinal_valid_predictions = {}\n\nscores = []\n\nfor fold in tqdm(range(5), 'folds'):\n xtrain = train[train['kfold'] != fold][useful_cols]\n ytrain = train[train['kfold'] != fold][target]\n \n xvalid = train[train['kfold'] == fold][useful_cols]\n yvalid = train[train['kfold'] == fold][target]\n\n valid_ids = train[train['kfold'] == fold]['user_id'].values.tolist()\n\n xtest = test[useful_cols]\n\n lgb_model = LGBMClassifier(\n n_estimators=7000,\n n_jobs=-1,\n random_state=42,\n# **{\n# 'learning_rate': 0.03881855209002591,\n# 'reg_lambda': 0.009591673857338072,\n# 'reg_alpha': 0.5065599259874649,\n# 'subsample': 0.4016863186957058,\n# 'colsample_bytree': 0.9360889506340332,\n# 'max_depth': 4\n# }\n **{\n 'learning_rate': 0.029253877255476443,\n 'reg_lambda': 16.09426889606859,\n 'reg_alpha': 0.014354120473120952,\n 'subsample': 0.43289663848783977,\n 'colsample_bytree': 0.5268279718406376,\n 'max_depth': 6}\n ) \n lgb_model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n \n preds_valid = lgb_model.predict_proba(xvalid)[:, 1]\n test_preds = lgb_model.predict_proba(xtest)[:, 1]\n final_test_predictions.append(test_preds)\n final_valid_predictions.update(dict(zip(valid_ids, preds_valid)))\n score = roc_auc_score(yvalid, preds_valid)\n scores.append(score)\n print(fold, score) \n\nprint(np.mean(scores), np.std(scores))\n\nfinal_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient=\"index\").reset_index()\nfinal_valid_predictions.columns = [\"id\", \"pred_2\"]\nfinal_valid_predictions.to_csv(\"./data/train_pred_2.csv\", index=False)\n\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_test_predictions), axis=1)\nsample_submission.columns = [\"id\", \"pred_2\"]\nsample_submission.to_csv(\"./data/test_pred_2.csv\", index=False)\n\nsample_submission.sample(7)", "_____no_output_____" ], [ "final_test_predictions = []\nfinal_valid_predictions = {}\n\nscores = []\n\nfor fold in tqdm(range(5), 'folds'):\n xtrain = train[train['kfold'] != fold][useful_cols]\n ytrain = train[train['kfold'] != fold][target]\n \n xvalid = train[train['kfold'] == fold][useful_cols]\n yvalid = train[train['kfold'] == fold][target]\n\n valid_ids = train[train['kfold'] == fold]['user_id'].values.tolist()\n\n xtest = test[useful_cols]\n\n cb_model = CatBoostClassifier(\n n_estimators=1000,\n random_state=42,\n **{\n 'objective': 'CrossEntropy',\n 'colsample_bylevel': 0.054208119366927966,\n 'depth': 12,\n 'boosting_type': 'Ordered',\n 'bootstrap_type': 'Bernoulli',\n 'subsample': 0.9494580379034286\n }\n )\n cb_model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n \n preds_valid = cb_model.predict_proba(xvalid)[:, 1]\n test_preds = cb_model.predict_proba(xtest)[:, 1]\n final_test_predictions.append(test_preds)\n final_valid_predictions.update(dict(zip(valid_ids, preds_valid)))\n score = roc_auc_score(yvalid, preds_valid)\n scores.append(score)\n print(fold, score) \n\nprint(np.mean(scores), np.std(scores))\n\nfinal_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient=\"index\").reset_index()\nfinal_valid_predictions.columns = [\"id\", \"pred_3\"]\nfinal_valid_predictions.to_csv(\"./data/train_pred_3.csv\", index=False)\n\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_test_predictions), axis=1)\nsample_submission.columns = [\"id\", \"pred_3\"]\nsample_submission.to_csv(\"./data/test_pred_3.csv\", index=False)\n\nsample_submission.sample(7)", "_____no_output_____" ], [ "final_test_predictions = []\nfinal_valid_predictions = {}\n\nscores = []\n\n# del scgb_model\n\nfor fold in tqdm(range(5), 'folds'):\n xtrain = train[train['kfold'] != fold][useful_cols]\n ytrain = train[train['kfold'] != fold][target]\n \n xvalid = train[train['kfold'] == fold][useful_cols]\n yvalid = train[train['kfold'] == fold][target]\n\n valid_ids = train[train['kfold'] == fold]['user_id'].values.tolist()\n\n xtest = test[useful_cols]\n\n scgb_model = GradientBoostingClassifier(\n n_estimators=100,\n random_state=42, \n verbose=1,\n max_features=0.1\n# **{\n# 'objective': 'CrossEntropy',\n# 'colsample_bylevel': 0.054208119366927966,\n# 'depth': 12,\n# 'boosting_type': 'Ordered',\n# 'bootstrap_type': 'Bernoulli',\n# 'subsample': 0.9494580379034286\n# }\n )\n scgb_model.fit(xtrain, ytrain)\n \n preds_valid = scgb_model.predict_proba(xvalid)[:, 1]\n test_preds = scgb_model.predict_proba(xtest)[:, 1]\n final_test_predictions.append(test_preds)\n final_valid_predictions.update(dict(zip(valid_ids, preds_valid)))\n score = roc_auc_score(yvalid, preds_valid)\n scores.append(score)\n print(fold, score) \n\nprint(np.mean(scores), np.std(scores))\n\nfinal_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient=\"index\").reset_index()\nfinal_valid_predictions.columns = [\"id\", \"pred_4\"]\nfinal_valid_predictions.to_csv(\"./data/train_pred_4.csv\", index=False)\n\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_test_predictions), axis=1)\nsample_submission.columns = [\"id\", \"pred_4\"]\nsample_submission.to_csv(\"./data/test_pred_4.csv\", index=False)\n\nsample_submission.sample(7)", "_____no_output_____" ], [ "final_test_predictions = []\nfinal_valid_predictions = {}\n\nscores = []\n\nfor fold in tqdm(range(5), 'folds'):\n xtrain = train[train['kfold'] != fold][useful_cols]\n ytrain = train[train['kfold'] != fold][target]\n \n xvalid = train[train['kfold'] == fold][useful_cols]\n yvalid = train[train['kfold'] == fold][target]\n\n valid_ids = train[train['kfold'] == fold]['user_id'].values.tolist()\n\n xtest = test[useful_cols]\n\n rf_model = RandomForestClassifier(\n random_state=42,\n n_jobs=-1,\n verbose=1,\n **{\n 'max_depth': 15,\n 'max_features': 'auto',\n 'class_weight': 'balanced_subsample'\n }\n )\n rf_model.fit(xtrain, ytrain)\n\n preds_valid = rf_model.predict_proba(xvalid)[:, 1]\n test_preds = rf_model.predict_proba(xtest)[:, 1]\n final_test_predictions.append(test_preds)\n final_valid_predictions.update(dict(zip(valid_ids, preds_valid)))\n score = roc_auc_score(yvalid, preds_valid)\n scores.append(score)\n print(fold, score) \n\nprint(np.mean(scores), np.std(scores))\n\nfinal_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient=\"index\").reset_index()\nfinal_valid_predictions.columns = [\"id\", \"pred_5\"]\nfinal_valid_predictions.to_csv(\"./data/train_pred_5.csv\", index=False)\n\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_test_predictions), axis=1)\nsample_submission.columns = [\"id\", \"pred_5\"]\nsample_submission.to_csv(\"./data/test_pred_5.csv\", index=False)\n\nsample_submission.sample(7)", "_____no_output_____" ], [ "df = train.copy() # pd.read_csv('./data/Train_folds.zip')\ndf_test = test.copy() # pd.read_csv('./data/Test.zip')\nsample_submission = pd.read_csv('./data/SampleSubmission.csv')\n\ndf1 = pd.read_csv(\"./data/train_pred_1.csv\")\ndf2 = pd.read_csv(\"./data/train_pred_2.csv\")\ndf3 = pd.read_csv(\"./data/train_pred_3.csv\")\ndf4 = pd.read_csv(\"./data/train_pred_4.csv\")\ndf5 = pd.read_csv(\"./data/train_pred_5.csv\")\ndf6 = pd.read_csv(\"./data/train_pred_6.csv\")\n\ndf_test1 = pd.read_csv(\"./data/test_pred_1.csv\")\ndf_test2 = pd.read_csv(\"./data/test_pred_2.csv\")\ndf_test3 = pd.read_csv(\"./data/test_pred_3.csv\")\ndf_test4 = pd.read_csv(\"./data/test_pred_4.csv\")\ndf_test5 = pd.read_csv(\"./data/test_pred_5.csv\")\ndf_test6 = pd.read_csv(\"./data/test_pred_6.csv\")\n\ndf = df.merge(df1, left_on='user_id', right_on=\"id\", how=\"left\")\ndf = df.merge(df2, left_on='user_id', right_on=\"id\", how=\"left\")\ndf = df.merge(df3, left_on='user_id', right_on=\"id\", how=\"left\")\ndf = df.merge(df4, left_on='user_id', right_on=\"id\", how=\"left\")\ndf = df.merge(df5, left_on='user_id', right_on=\"id\", how=\"left\")\ndf = df.merge(df6, left_on='user_id', right_on=\"id\", how=\"left\")\n\ndf_test = df_test.merge(df_test1, left_on='user_id', right_on=\"id\", how=\"left\")\ndf_test = df_test.merge(df_test2, left_on='user_id', right_on=\"id\", how=\"left\")\ndf_test = df_test.merge(df_test3, left_on='user_id', right_on=\"id\", how=\"left\")\ndf_test = df_test.merge(df_test4, left_on='user_id', right_on=\"id\", how=\"left\")\ndf_test = df_test.merge(df_test5, left_on='user_id', right_on=\"id\", how=\"left\")\ndf_test = df_test.merge(df_test6, left_on='user_id', right_on=\"id\", how=\"left\")\n\ndf.head()", "h:\\projects\\zindi-expresso-churn-prediction-challenge\\venv\\lib\\site-packages\\pandas\\core\\frame.py:9186: FutureWarning: Passing 'suffixes' which cause duplicate columns {'id_x'} in the result is deprecated and will raise a MergeError in a future version.\n return merge(\n" ], [ "df_test.head()", "_____no_output_____" ], [ "df[[\"pred_1\", \"pred_2\", \"pred_3\", \"pred_4\", \"pred_5\", \"pred_6\", 'CHURN']]", "_____no_output_____" ], [ "sorted(dict(zip(lgb_model.feature_name_, lgb_model.feature_importances_)).items(), key=lambda x: -x[1])", "_____no_output_____" ], [ "useful_features = ['pred_1', 'pred_2', 'pred_3', 'pred_4', 'pred_5', 'pred_6']\ndf_test = df_test[useful_features]\n\nfinal_predictions = []\nscores = []\n\nfor fold in range(5):\n xtrain = df[df.kfold != fold].reset_index(drop=True)\n xvalid = df[df.kfold == fold].reset_index(drop=True)\n xtest = df_test.copy()\n\n ytrain = xtrain['CHURN']\n yvalid = xvalid['CHURN']\n \n xtrain = xtrain[useful_features]\n xvalid = xvalid[useful_features]\n \n model = LogisticRegression()\n# model = SGDClassifier(random_state=42, loss='modified_huber')\n model.fit(xtrain, ytrain)\n \n preds_valid = model.predict_proba(xvalid)[:, 1]\n test_preds = model.predict_proba(xtest)[:, 1]\n final_predictions.append(test_preds)\n score = roc_auc_score(yvalid, preds_valid)\n print(fold, score)\n scores.append(score)\n\nprint(np.mean(scores), np.std(scores))\n\n# 0 0.9315283221655729\n# 1 0.9322252323181413\n# 2 0.9313247129395837\n# 3 0.9318919085786139\n# 4 0.9307662596698618\n# 0.9315472871343549 0.0004976497673210968\n# 0.9303098065651516 0.0005268336328890778\n# 0.9301957664933731 0.0004690483101817313", "0 0.9303435598903239\n1 0.9308821147690043\n2 0.9299188150996149\n3 0.930353629843727\n4 0.9294754849669176\n0.9301947209139175 0.00047187705320450883\n" ], [ "sample_submission = pd.read_csv('./data/SampleSubmission.csv')\nsample_submission['CHURN'] = np.mean(np.column_stack(final_predictions), axis=1)\nsample_submission.to_csv(\"./data/submission-blending-7-predict-proba-logreg-poly-with-randforest-balanced-and-scgbd-and-nn.csv\", index=False)\n\nsample_submission.sample(7)", "_____no_output_____" ], [ "df_test.to_csv('./data/test_stack.csv', index=None)\ndf.to_csv('./data/train_stack.csv', index=None)", "_____no_output_____" ], [ "df[useful_features].corrwith(df['CHURN'])", "_____no_output_____" ], [ "import optuna\n\ndef run(trial):\n fold = 0\n learning_rate = trial.suggest_float(\"learning_rate\", 1e-2, 0.25, log=True)\n reg_lambda = trial.suggest_loguniform(\"reg_lambda\", 1e-8, 100.0)\n reg_alpha = trial.suggest_loguniform(\"reg_alpha\", 1e-8, 100.0)\n subsample = trial.suggest_float(\"subsample\", 0.1, 1.0)\n colsample_bytree = trial.suggest_float(\"colsample_bytree\", 0.1, 1.0)\n max_depth = trial.suggest_int(\"max_depth\", 1, 7)\n\n xtrain = train[train.kfold != fold].reset_index(drop=True)\n xvalid = train[train.kfold == fold].reset_index(drop=True)\n\n ytrain = xtrain['CHURN']\n yvalid = xvalid['CHURN']\n\n xtrain = xtrain[useful_cols]\n xvalid = xvalid[useful_cols]\n\n model = XGBClassifier(\n random_state=42,\n n_estimators=7000,\n tree_method='gpu_hist',\n gpu_id=0,\n predictor=\"gpu_predictor\",\n learning_rate=learning_rate,\n reg_lambda=reg_lambda,\n reg_alpha=reg_alpha,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n max_depth=max_depth,\n )\n model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n preds_valid = model.predict_proba(xvalid)[:, 1]\n score = roc_auc_score(yvalid, preds_valid)\n return score\n\nmax_study = optuna.create_study(direction=\"maximize\")\nmax_study.optimize(run, n_trials=10)\nmax_study.best_params", "\u001b[32m[I 2021-09-17 08:48:15,928]\u001b[0m A new study created in memory with name: no-name-d85b481e-ec2d-41a6-a53f-bf28125d09e1\u001b[0m\nh:\\projects\\zindi-expresso-churn-prediction-challenge\\venv\\lib\\site-packages\\xgboost\\sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" ], [ "import optuna\n\ndef run(trial):\n fold = 0\n learning_rate = trial.suggest_float(\"learning_rate\", 1e-2, 0.25, log=True)\n reg_lambda = trial.suggest_loguniform(\"reg_lambda\", 1e-8, 100.0)\n reg_alpha = trial.suggest_loguniform(\"reg_alpha\", 1e-8, 100.0)\n subsample = trial.suggest_float(\"subsample\", 0.1, 1.0)\n colsample_bytree = trial.suggest_float(\"colsample_bytree\", 0.1, 1.0)\n max_depth = trial.suggest_int(\"max_depth\", 1, 7)\n\n xtrain = train[train.kfold != fold].reset_index(drop=True)\n xvalid = train[train.kfold == fold].reset_index(drop=True)\n\n ytrain = xtrain['CHURN']\n yvalid = xvalid['CHURN']\n\n xtrain = xtrain[useful_cols]\n xvalid = xvalid[useful_cols]\n\n model = LGBMClassifier(\n random_state=42,\n n_estimators=7000,\n learning_rate=learning_rate,\n reg_lambda=reg_lambda,\n reg_alpha=reg_alpha,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n max_depth=max_depth,\n )\n model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000)\n preds_valid = model.predict_proba(xvalid)[:, 1]\n score = roc_auc_score(yvalid, preds_valid)\n return score\n\nlgb_study = optuna.create_study(direction=\"maximize\")\nlgb_study.optimize(run, n_trials=10)\nlgb_study.best_params", "\u001b[32m[I 2021-09-17 05:25:57,446]\u001b[0m A new study created in memory with name: no-name-66770598-610f-4ccb-b9fa-9be952104e81\u001b[0m\n" ], [ "import optuna\n\ndef run_cb(trial):\n fold = 0\n param = {\n \"objective\": trial.suggest_categorical(\"objective\", [\"Logloss\", \"CrossEntropy\"]),\n \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.01, 0.1),\n \"depth\": trial.suggest_int(\"depth\", 1, 12),\n \"boosting_type\": trial.suggest_categorical(\"boosting_type\", [\"Ordered\", \"Plain\"]),\n \"bootstrap_type\": trial.suggest_categorical(\n \"bootstrap_type\", [\"Bayesian\", \"Bernoulli\", \"MVS\"]\n ),\n# \"used_ram_limit\": \"3gb\",\n }\n \n if param[\"bootstrap_type\"] == \"Bayesian\":\n param[\"bagging_temperature\"] = trial.suggest_float(\"bagging_temperature\", 0, 10)\n elif param[\"bootstrap_type\"] == \"Bernoulli\":\n param[\"subsample\"] = trial.suggest_float(\"subsample\", 0.1, 1)\n\n xtrain = train[train.kfold != fold].reset_index(drop=True)\n xvalid = train[train.kfold == fold].reset_index(drop=True)\n\n ytrain = xtrain['CHURN']\n yvalid = xvalid['CHURN']\n\n xtrain = xtrain[useful_cols]\n xvalid = xvalid[useful_cols]\n\n cb_model = CatBoostClassifier(**param)\n \n cb_model.fit(xtrain, ytrain, early_stopping_rounds=100, eval_set=[(xvalid, yvalid)], verbose=1000)\n \n preds_valid = cb_model.predict_proba(xvalid)[:, 1]\n score = roc_auc_score(yvalid, preds_valid)\n return score\n\ncb_study = optuna.create_study(direction=\"maximize\")\ncb_study.optimize(run_cb, n_trials=100, timeout=600)\n\nprint(\"Number of finished trials: {}\".format(len(cb_study.trials)))\n\nprint(\"Best trial:\")\ntrial = cb_study.best_trial\n\nprint(\" Value: {}\".format(trial.value))\n\nprint(\" Params: \")\nfor key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n \ncb_study.best_params", "\u001b[32m[I 2021-09-17 14:37:49,878]\u001b[0m A new study created in memory with name: no-name-fa2aa7b9-367d-407d-9412-7e8ea0147125\u001b[0m\n" ], [ "import optuna\n\ndef run_rf(trial: optuna.Trial):\n fold = 0\n params = {\n 'max_depth': trial.suggest_int('rf_max_depth', 2, 32, log=True),\n 'max_features': trial.suggest_categorical('rf_max_features', [\"auto\", \"sqrt\", \"log2\"]),\n 'class_weight': trial.suggest_categorical('rf_class_weight', ['balanced', 'balanced_subsample', None])\n }\n \n\n xtrain = train[train.kfold != fold].reset_index(drop=True)\n xvalid = train[train.kfold == fold].reset_index(drop=True)\n\n ytrain = xtrain['CHURN']\n yvalid = xvalid['CHURN']\n\n xtrain = xtrain[useful_cols]\n xvalid = xvalid[useful_cols]\n\n rf_model = RandomForestClassifier(\n n_estimators=100,\n n_jobs=-1,\n random_state=42,\n verbose=1,\n **params)\n \n rf_model.fit(xtrain, ytrain)\n \n preds_valid = rf_model.predict_proba(xvalid)[:, 1]\n score = roc_auc_score(yvalid, preds_valid)\n return score\n\nrf_study = optuna.create_study(direction=\"maximize\")\nrf_study.optimize(run_rf, n_trials=100, timeout=600)\n\nprint(\"Number of finished trials: {}\".format(len(rf_study.trials)))\n\nprint(\"Best trial:\")\ntrial = rf_study.best_trial\n\nprint(\" Value: {}\".format(trial.value))\n\nprint(\" Params: \")\nfor key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n \nrf_study.best_params", "\u001b[32m[I 2021-09-18 21:35:17,788]\u001b[0m A new study created in memory with name: no-name-58fbb78c-6066-4439-a36c-02ecbed6c38b\u001b[0m\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 27.8s\n[Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 1.2min finished\n[Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=8)]: Done 34 tasks | elapsed: 0.2s\n[Parallel(n_jobs=8)]: Done 100 out of 100 | elapsed: 0.6s finished\n\u001b[32m[I 2021-09-18 21:36:42,148]\u001b[0m Trial 0 finished with value: 0.9058284139202586 and parameters: {'rf_max_depth': 4, 'rf_max_features': 'log2', 'rf_class_weight': 'balanced'}. Best is trial 0 with value: 0.9058284139202586.\u001b[0m\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 1.0min\n[Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 2.6min finished\n[Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=8)]: Done 34 tasks | elapsed: 0.5s\n[Parallel(n_jobs=8)]: Done 100 out of 100 | elapsed: 1.5s finished\n\u001b[32m[I 2021-09-18 21:39:29,363]\u001b[0m Trial 1 finished with value: 0.9184464029144264 and parameters: {'rf_max_depth': 10, 'rf_max_features': 'log2', 'rf_class_weight': 'balanced'}. Best is trial 1 with value: 0.9184464029144264.\u001b[0m\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 46.3s\n[Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 2.0min finished\n[Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=8)]: Done 34 tasks | elapsed: 0.1s\n[Parallel(n_jobs=8)]: Done 100 out of 100 | elapsed: 0.5s finished\n\u001b[32m[I 2021-09-18 21:41:39,330]\u001b[0m Trial 2 finished with value: 0.9065188942356315 and parameters: {'rf_max_depth': 3, 'rf_max_features': 'sqrt', 'rf_class_weight': 'balanced'}. Best is trial 1 with value: 0.9184464029144264.\u001b[0m\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 34 tasks | elapsed: 3.3min\n[Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 8.6min finished\n[Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers.\n[Parallel(n_jobs=8)]: Done 34 tasks | elapsed: 0.9s\n[Parallel(n_jobs=8)]: Done 100 out of 100 | elapsed: 2.5s finished\n\u001b[32m[I 2021-09-18 21:50:24,301]\u001b[0m Trial 3 finished with value: 0.9289267479994578 and parameters: {'rf_max_depth': 15, 'rf_max_features': 'auto', 'rf_class_weight': 'balanced_subsample'}. Best is trial 3 with value: 0.9289267479994578.\u001b[0m\n" ], [ "# vc = train['CHURN'].value_counts()\n# vc[0], vc[1]*4.33", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af41d0f99115134fd8b29e62b87eebf75a35014
3,658
ipynb
Jupyter Notebook
Notebooks/big_linear_set.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/big_linear_set.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/big_linear_set.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
24.065789
116
0.532531
[ [ [ "# Big Linear Set of Quaternions", "_____no_output_____" ], [ "Generate a few quaternions using a class found in Q_tool_devo.", "_____no_output_____" ] ], [ [ "%%capture\n%matplotlib inline\nimport numpy as np\nimport sympy as sp\nimport matplotlib.pyplot as plt\n\n# To get equations the look like, well, equations, use the following.\nfrom sympy.interactive import printing\nprinting.init_printing(use_latex=True)\nfrom IPython.display import display\n\n# Tools for manipulating quaternions.\nimport Q_tools as qt;", "_____no_output_____" ] ], [ [ "The class is call QHArray().", "_____no_output_____" ] ], [ [ "qha = qt.QHArray()\n\nfor q_step in qha.range(q_start=qt.QH([0, 0, 0, 0]), q_delta=qt.QH([1, 0.1, 0.2, 0.3]), n_steps=10):\n print(q_step)", "(0, 0, 0, 0) Q\n(1, 0.1, 0.2, 0.3) Q+1dQ\n(2, 0.2, 0.4, 0.6) Q+2dQ\n(3, 0.30000000000000004, 0.6000000000000001, 0.8999999999999999) Q+3dQ\n(4, 0.4, 0.8, 1.2) Q+4dQ\n(5, 0.5, 1.0, 1.5) Q+5dQ\n(6, 0.6, 1.2, 1.8) Q+6dQ\n(7, 0.7, 1.4, 2.1) Q+7dQ\n(8, 0.7999999999999999, 1.5999999999999999, 2.4) Q+8dQ\n(9, 0.8999999999999999, 1.7999999999999998, 2.6999999999999997) Q+9dQ\n(10, 0.9999999999999999, 1.9999999999999998, 2.9999999999999996) Q+10dQ\n" ] ], [ [ "Write out 10k to disk.", "_____no_output_____" ] ], [ [ "with open('/tmp/10k.data', 'w') as datafile:\n for q_step in qha.range(q_start=qt.QH([0, 0, 0, 0]), q_delta=qt.QH([1, 0.1, 0.2, 0.3]), n_steps=10000):\n datafile.write(\"{}, {}, {}, {}\\n\".format(q_step.t, q_step.x, q_step.y, q_step.z))", "_____no_output_____" ], [ "! wc -l /tmp/10k.data\n! tail -4 /tmp/10k.data", " 10001 /tmp/10k.data\n9997, 999.7000000001588, 1999.4000000003175, 2999.100000000358\n9998, 999.8000000001588, 1999.6000000003176, 2999.400000000358\n9999, 999.9000000001588, 1999.8000000003176, 2999.700000000358\n10000, 1000.0000000001588, 2000.0000000003176, 3000.0000000003583\n" ] ], [ [ "Bingo, bingo, we can make a large number of quaternions.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4af41def367148ef1371a5f9676e91018b4b89f3
189,611
ipynb
Jupyter Notebook
AS_MNIST.ipynb
cedias/NNPy
e101ae7631387fb6a59c0a045b5f02cb261516d7
[ "MIT" ]
null
null
null
AS_MNIST.ipynb
cedias/NNPy
e101ae7631387fb6a59c0a045b5f02cb261516d7
[ "MIT" ]
null
null
null
AS_MNIST.ipynb
cedias/NNPy
e101ae7631387fb6a59c0a045b5f02cb261516d7
[ "MIT" ]
null
null
null
52.742976
79,209
0.711198
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af4226e7f63ace7731dfc48bb3b6f9b51213e96
117,657
ipynb
Jupyter Notebook
Heat-transfer-pipes.ipynb
CarlGriffinsteed/UVM-ME144-Heat-Transfer
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
[ "CC-BY-3.0" ]
null
null
null
Heat-transfer-pipes.ipynb
CarlGriffinsteed/UVM-ME144-Heat-Transfer
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
[ "CC-BY-3.0" ]
null
null
null
Heat-transfer-pipes.ipynb
CarlGriffinsteed/UVM-ME144-Heat-Transfer
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
[ "CC-BY-3.0" ]
null
null
null
59.906823
20,292
0.699219
[ [ [ "# Heat transfer for pipes", "_____no_output_____" ] ], [ [ "\"\"\"\nimporting the necessary libraries, do not modify\n\"\"\"\n%matplotlib inline \n\n\nfrom IPython.display import clear_output\n\nimport schemdraw as schem\nimport schemdraw.elements as e\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport scipy.constants as sc\n\nimport sympy as sym\n\n", "_____no_output_____" ] ], [ [ "<img src=\"figures/fig_08_08.jpg\" alt=\"my awesome sketch\" width=75% >\n<i>Fig. 1: Illustration of internal convection.</i>", "_____no_output_____" ], [ "The above sketch illustrates the focus of this notebook: How to quantify the heat transfer between a pipe, in which a fluid flows, and its surroundings. The heat transfer from the outer surface of the pipe to the outer flow is to defined in the previous chapter, external convection. In the following, this notebook establishes the tools necessary to solve the internal convection problem.", "_____no_output_____" ], [ "## Entry flow and fully developed internal flow", "_____no_output_____" ], [ "<img src=\"figures/fig_08_01.jpg\" alt=\"my awesome sketch\" width=100% >\n<i>Fig. 2: Pipe flow nomenclature.</i>", "_____no_output_____" ], [ "### Python module\n\nFor internal flow, the module is loaded as:", "_____no_output_____" ] ], [ [ "from Libraries import HT_internal_convection as intconv\n", "_____no_output_____" ] ], [ [ "As an example, consider the flow of water in a pipe of diameter $D=10$ cm, length $L=10$m. The water thermodynamic properties are estimated at $T_f=50^\\circ$C. The bulk velocity is $U_m=2$m/s.\n", "_____no_output_____" ] ], [ [ "from Libraries import thermodynamics as thermo\nT_f = 50 #C\nwaterflow = thermo.Fluid('water',T_f,\"C\")\n\nL_pipe = 10. #m\nD_pipe = 0.1 #m\nUm_pipe = 2 #m/s", "_____no_output_____" ], [ "?intconv.PipeFlow", "_____no_output_____" ], [ "pipe = intconv.PipeFlow(D= D_pipe, L=L_pipe,\n rho=waterflow.rho, nu=waterflow.nu, Um=Um_pipe)", "_____no_output_____" ] ], [ [ "<img src=\"figures/fig_08_03.jpg\" alt=\"my awesome sketch\" width=100% >\n<i> Fig. 3. Friction factor in pipe flow as a function of Re and relative surface roughness.</i>", "_____no_output_____" ], [ "A uniform flow entering a pipe (Fig. 2) first experiences streamwise variation of velocity to accommodate the wall boundary conditions. A boundary layer, of thickness $\\delta$, forms on the wall and grows until its edge reaches the pipe centerline. This region is the hydrodynamic entrance region. Beyond that point, the flow becomes fully developed, which means that\n<ul>\n<li> In the laminar regime, the velocity profile is only a function of $r$,</li>\n<li> In the turbulent regime, the <b>mean</b> velocity profile is only a function of $r$.</li>\n</ul>\nFriction drag or the force exerted by the flow onto the pipe wall governs the pressure gradient necessary to generate a desired flowrate. Calculation of the friction drag leads to the design of the mechanical force creating the pressure gradient. In fully developed (laminar or turbulent) regimes, the pressure gradient may be determined by\n<p class='alert alert-danger'>\n$$\n-\\frac{\\Delta\\overline{P}}{L}=f\\,\\frac{1}{D}\\,\\frac{\\rho U_m^2}{2}\n$$\n</p>\nwhere $D=2R$ and $L$ are the diameter and length of the pipe, respectively, and $f$ is the <b>friction factor</b>. The bulk velocity or average velocity is\n<p class='alert alert-info'>\n$$\nU_m=\\frac{\\dot{m}}{\\rho A_c}\n$$\n</p>\nwhere $\\dot{m}$ is the mass flux\n$$\n\\dot{m}=\\int_0^{2\\pi}\\int_0^R\\rho \\overline{u}(r)\\,r\\,dr d\\theta=2\\pi\\int_0^R\\rho \\overline{u}(r)\\,r\\,dr\n$$\nand $A_c=\\pi R^2$\nThe Reynolds number of the flow is based on the bulk velocity and pipe diameter:\n<p class='alert alert-danger'>\n$$\nRe_D=\\frac{\\rho U_mD}{\\mu}=\\frac{4\\dot{m}}{\\pi D\\mu}\n$$\n</p>\nThe friction factor in the laminar regime is rigorously derived:\n$$\nf = \\frac{64}{Re_D}\n$$\n</p>\nand is valid up to the critical Reynolds number $Re_{D,c}$, which in most pipe is around 2,000. Be aware that in certain research facilities, the flow can remain laminar for Reynolds numbers up to 10,000. The Reynolds 2,000 is not absolute, universal property, but is the best guess from most engineering applications.\n\nBeyond the critical Reynolds number, $f$ is a function of the roughness to diameter ratio $\\varepsilon=e/D$ (e is typically the standard deviation of the roughness height) and the Reynolds number. A trustworthy empirical correlation is the Colebrook formula:\n<p class='alert alert-danger'>\n$$\n\\frac{1}{\\sqrt{f}}=-2\\log_{10}\\left[\\frac{\\varepsilon}{3.7}+\\frac{2.51}{Re_D\\sqrt{f}}\\right]\n$$\n</p>\nwhich is solved below for a range of relative roughness $\\varepsilon$.\n\nOften there is a need to determine the pump or blower power $P$ necessary to move the flow at a prescribed pressure drop:\n<p class='alert alert-danger'>\n$$\nP=\\frac{\\dot{m}}{\\rho}\\Delta p= \\underbrace{(\\Delta p)A_c}_\\text{force}\\cdot U_m\n$$\n</p>\n\n### Example of functions\nGoing back to our library, let's explore how to determine some of the properties defined above:", "_____no_output_____" ], [ "Reynolds number:", "_____no_output_____" ] ], [ [ "print(\"Re= %1.2e\" %pipe.Re)", "Re= 3.61e+05\n" ] ], [ [ "Mass flow rate:", "_____no_output_____" ] ], [ [ "print(\"mass flowrate= %1.1f kg/s\" %pipe.mdot)", "mass flowrate= 15.5 kg/s\n" ] ], [ [ "Compute the friction factor:", "_____no_output_____" ] ], [ [ "# pipe.f_turbulent()\npipe.f_laminar()", "_____no_output_____" ], [ "print(\"f= %1.5f\" %pipe.f)", "f= 0.00018\n" ] ], [ [ "The mean pressure gradient is:", "_____no_output_____" ] ], [ [ "print(\"-dP/dx= %1.0f Pa/m\" %pipe.dPdx)", "-dP/dx= 35 Pa/m\n" ] ], [ [ "## Heat transfer by internal convection", "_____no_output_____" ], [ "The temperature is expected to vary both in the streamwise direction and in the radial direction. To reduce the complexity of the problem, we define the mean temperature as:\n$$\nT_m=\\frac{1}{\\dot{m}C_p}\\int_{A_c}\\rho\\,u\\,C_p\\, T\\,dA_c\n$$\nwhere $\\dot{m}$ is the mass flow rate, $rho$ and $C_p$ are the density and specific heat of the fluid and $A_c$ is the cross-sectional area of the pipe.\nThe local heat flux may be now expressed as:\n$$\nq_s''=h(T_s-T_m)\n$$\nwhere $h$ is the <b>local</b> convection heat transfer coefficient and $T_s$ is the surface temperature on the inner wall of the pipe. The variation of temperature in the <b>fully developed</b> flow can be shown to be governed by the following ODE:\n<p class='alert alert-info'>\n$$\n\\frac{dT_m}{dx}=\\frac{P}{\\dot{m}C_p}h(T_s-T_m)\n$$\n</p>\nwhere $P$ is the perimeter of the pipe.\nIf the local heat flux is maintained constant over the length of the pipe $L$, the total heat rate is \n<p class='alert alert-danger'>\n$$\nq_\\text{conv}=(PL)q_s''\\, \\text{$q_s''=$constant}\n$$\n</p>\nand the streamwise distribution of the mean temperature is linear:\n$$\nT_m(x)=T_{m,i}+\\frac{q_s''P}{\\dot{m}C_p}x,\\, \\text{$q_s''=$constant}\n$$\nFor the case of constant wall temperature $T_s$, the temperature distribution is the solution of the above ODE, thus of exponential nature. For practical applications, you most always need to compute the overall heat transfer and the outlet mean temperature $T_{m,o}$. The integration of the above ODE for $x=0$ to $x=L$ yields\n<p class='alert alert-danger'>\n$$\n\\frac{T_s-T_{m,o}}{T_s-T_{m,i}}=\\exp\\left(-\\frac{PL}{\\dot{m}C_p}\\overline{h}\\right),\\, \\text{$T_s=$constant}\n$$\n</p>\nwhere \n$$\n\\overline{h}=\\frac{1}{L}\\int_0^L h(x)dx\n$$\nIf you must compute the mean temperature at $x$ an integration from $0$ to $x$ yields\n<FONT FACE=\"courier\" style=\"color:blue\">T_mx_Ts_constant(T_s,T_mi,P,L,mdot,Cp,hbar,x)</FONT>\n<p class='alert alert-danger'>\n$$\n\\frac{T_s-T_{m}(x)}{T_s-T_{m,i}}=\\exp\\left(-\\frac{PL}{\\dot{m}C_p}\\overline{h}_x\\right),\\, \\text{$T_s=$constant}\n$$\n</p>\nwhere \n$$\n\\overline{h}_x=\\frac{1}{L}\\int_0^x h(x')dx'\n$$\nThe computation of the total heat transfer rate can be shown to write:\n<p class='alert alert-danger'>\n$$\nq_\\text{conv}=\\overline{h}(PL)\\Delta T_\\text{lm},\\, \\text{$T_s=$constant}\n$$\n</p>\nwith the log mean temperature\n<FONT FACE=\"courier\" style=\"color:blue\">log_mean_temperature(T_s,T_o,T_i)</FONT>\n<p class='alert alert-danger'>\n$$\n\\Delta T_\\text{lm}=\\cfrac{T_{m,i}-T_{m,o}}{\\ln\\left(\\cfrac{T_s-T_{m,o}}{T_s-T_{m,i}}\\right)}\n$$\n</p>\nIn many problem, $T_s$ is not defined but the outside ambient temperature $T_\\infty$, the thermal conductivity of the pipe is known. One needs to determine the total resistance of the system $R_\\text{tot}$, which requires calculating the heat transfer coefficient of the forced or natural convection, occuring on the outside of the pipe, the radiation coefficient if needed, the thermal resistance due by conduction within the pipe, which may include multiple components in the presence of insulation for example, and the internal convection heat transfer coefficient (to be defined below). In such cases, the variation of temperature between inlet and outlet becomes:\n<FONT FACE=\"courier\" style=\"color:blue\">T_mo_T_infty(T_infty,T_mi,P,L,mdot,Cp,R_tot)</FONT>\n<p class='alert alert-danger'>\n$$\n\\frac{T_\\infty-T_{m,o}}{T_\\infty-T_{m,i}}=\\exp\\left(-\\frac{1}{\\dot{m}C_pR_\\text{tot}}\\right)\n$$\n</p>\nand the total heat transfer rate is\n<p class='alert alert-danger'>\n$$\nq=\\frac{\\Delta T_\\text{lm}}{R_\\text{tot}}\n$$\n</p>\nThe equations derived in this cell enable:\n<ul>\n<li> The computation of the internal convection heat transfer coefficient if $T_{m,i}$ and $T_{m,o}$ are known.</li>\n<li> The computation of $T_{m,i}$ or $T_{m,o}$ if one is known and $\\overline{h}$ is known </li>\n<li> The computation of the required mass flux to achieve given $T_{m,i}$ and $T_{m,o}$, albeit through an iterative process</li>\n</ul>", "_____no_output_____" ], [ "## Correlations for convection heat transfer coefficients in internal pipe flows", "_____no_output_____" ], [ "Here we detailed only the correlations for fully developed flows. For laminar flows, the nusselt numbers are constant, thus the library <FONT FACE=\"courier\" style=\"color:blue\">HT_internal_convection</FONT> provides directly $\\overline{h}$:\n<FONT FACE=\"courier\" style=\"color:blue\">laminar_isoflux() </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=4.36,\\, \\text{$q_s''=$constant}\n$$\n</p>\n<FONT FACE=\"courier\" style=\"color:blue\">laminar_isothermal() </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=4.36,\\, \\text{$q_s''=$constant}\n$$\n</p>\n", "_____no_output_____" ] ], [ [ "pipe.laminar_isoflux()\nprint(\"Nu= %1.2f for laminar isoflux\" %pipe.Nu)\npipe.laminar_isothermal()\nprint(\"Nu= %1.2f for laminar isothermal\" %pipe.Nu)", "Nu= 4.36 for laminar isoflux\nNu= 3.66 for laminar isothermal\n" ] ], [ [ "In turbulent flows, there is a choice of correlations:\n<FONT FACE=\"courier\" style=\"color:blue\">Dittus_Boelter(Re,Pr,mode) </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=0.023Re^{4/5}Pr^n\n$$\n</p>\nwith mode being either <FONT FACE=\"courier\" style=\"color:blue\">'cooling'</FONT> or <FONT FACE=\"courier\" style=\"color:blue\">'heating'</FONT>", "_____no_output_____" ] ], [ [ "pipe.Dittus_Boelter(mode='cooling',Pr=waterflow.Pr)\nprint(\"Nu= %1.0f for cooling\" %pipe.Nu)\npipe.Dittus_Boelter(mode='heating',Pr=waterflow.Pr)\nprint(\"Nu= %1.0f for heating\" %pipe.Nu)", "Nu= 940 for cooling\nNu= 1067 for heating\n" ] ], [ [ "<FONT FACE=\"courier\" style=\"color:blue\">Sieder_Tate(Re,Pr,mu,mu_s) </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=0.027Re^{4/5}Pr^{1/3}\\left(\\cfrac{\\mu}{\\mu_s}\\right)^{0.14}\n$$", "_____no_output_____" ] ], [ [ "T_s = 75 #C\nwatersurface = thermo.Fluid('water',thermo.C2K(T_s))\npipe.Sieder_Tate(mu=waterflow.mu,mu_s=watersurface.mu,Pr=waterflow.Pr)\nprint(\"Nu= %1.0f\" %pipe.Nu)", "Nu= 1213\n" ] ], [ [ "<FONT FACE=\"courier\" style=\"color:blue\">Gnielinski(Re,Pr,f) </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=\\frac{(f/8)(Re-1000)Pr}{1+12.7(f/8)^{1/2}(Pr^{2/3}-1)}\n$$\n</p>", "_____no_output_____" ] ], [ [ "pipe.Gnielinski(f=pipe.f, Pr=waterflow.Pr)\nprint(\"Nu= %1.0f\" %pipe.Nu)", "Nu= 26\n" ] ], [ [ "<FONT FACE=\"courier\" style=\"color:blue\">Skupinski(Re,Pr) </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=4.82+0.0185\\left(Re\\,Pr\\right)^{0.827},\\, \\text{$q_s''=$constant}\n$$\n</p>", "_____no_output_____" ] ], [ [ "pipe.Skupinski(Pr=waterflow.Pr)\nprint(\"Nu= %1.0f\" %pipe.Nu)", "Nu= 2089\n" ] ], [ [ "<FONT FACE=\"courier\" style=\"color:blue\">Seban(Re,Pr) </FONT>\n<p class='alert alert-danger'>\n$$\nNu=\\frac{hD}{k}=5.0+0.025\\left(Re\\,Pr\\right)^{0.8},\\, \\text{$T_s=$constant}\n$$\n</p>", "_____no_output_____" ] ], [ [ "pipe.Seban(Pr=waterflow.Pr)\nprint(\"Nu= %1.0f\" %pipe.Nu)", "Nu= 1931\n" ] ], [ [ "## Natural convection around cylinder", "_____no_output_____" ], [ "<img src=\"figures/fig_09_08.jpg\" alt=\"my awesome sketch\" width=75% >\n<i>Fig. 4: Illustration of the flow induced by natural convection around a cylinder. Insert shows the angular distribution of the local Nu.</i>", "_____no_output_____" ], [ "In a fluid entirely at rest, a heated surface transfers its heat via pure conduction. Natural convection is the enhanced heat transfer between a body of fluid at rest (at infinity) and a heated surface through the creation of a convective flow driven by buoyancy forces. Fig. 4 illustrates a natural convection flow occuring around a cylinder. The fluid at the bottom of the cylinder $\\theta=0$ becomes buoyant through heat transfer between the cylinder and the fluid and rises along the surface of the cylinder. This process creates two boundary layers that merge at $\\theta = \\pi$ to create a vertical jet-like flow, also called a plume. Plumes are characteristic flows of natural convection, i.e. they are found irrespective of the geometry of the heated object.\n\nThe library is called in the following way:", "_____no_output_____" ] ], [ [ "from Libraries import HT_natural_convection as natconv", "_____no_output_____" ] ], [ [ "The non-dimensional numbers relevant to natural convection are:\nthe Grashof number\n<FONT FACE=\"courier\" style=\"color:blue\">Grashof(g,beta,DT,D,nu) </FONT>\n<p class='alert alert-danger'>\n$$\nGr = \\frac{g\\beta(\\Delta T)D^3}{\\nu^2}\n$$\n</p>\nand the Rayleigh number \n<FONT FACE=\"courier\" style=\"color:blue\">Rayleigh(g,beta,DT,D,nu,alpha) </FONT>\n<p class='alert alert-danger'>\n$$\nRa = Gr.Pr= \\frac{g\\beta(\\Delta T)D^3}{\\nu\\alpha}\n$$\n</p>\nwhere $g$ is the gravity magnitude, $\\beta$ is the volumetric thermal expansion coefficient at a given pressure $p$\n$$\n\\beta = -\\frac{1}{\\rho}\\left(\\frac{\\partial\\rho}{\\partial T}\\right)_p\n$$\n$\\Delta T$ is the absolute temperature difference between the heated surface temperature $T_s$ and the fluid temperature at infinity $T_\\infty$, $\\Delta T= \\vert T_s-T_\\infty\\vert$, $D$ is the characteristic length of the system (here the diameter) and $\\nu$ and $\\alpha$ are the kinematic viscosity and the thermal diffusivity, both of dimensions $\\text{m$^2$/s}$. \nNote that for the ideal gas law\n$$\np =\\rho \\frac{R}{M}T\\text{ or } \\rho = \\frac{p}{\\frac{R}{M}T}\n$$\nthus the expansion coefficient is \n<p class='alert alert-info'>\n$$\n\\beta = \\frac{1}{T}\\text{ for an ideal gas, $T$ in K}\n$$\n</p>\nFor a liquid, $\\beta$ must be interpolated from a table. All thermodynamics quantities involved are to be defined at the film temperature which is the arithmetic mean\n<p class='alert alert-info'>\n$$\nT_f=\\frac{T_s+T_\\infty}{2} \n$$\n</p>", "_____no_output_____" ] ], [ [ "#air\nT_infty = 10#C\nT_s = 50#C\nD = 0.1#m\nT_f = (T_s+T_infty)/2\nairflow = thermo.Fluid('air',T_f,\"C\")\nGr= natconv.Gr(beta=airflow.beta,D=D,DT=T_s-T_infty,nu=airflow.nu)\nprint('Natural convection Gr= %1.2e'%Gr)\nRa= natconv.Ra(alpha=airflow.alpha,beta=airflow.beta,D=D,DT=T_s-T_infty,nu=airflow.nu)\nprint('Natural convection Ra= %1.2e'%Ra)", "Natural convection Gr= 5.03e+06\nNatural convection Ra= 3.57e+06\n" ] ], [ [ "The Grashof and Rayleigh number quantify the ratio of buoyancy to viscous forces. When they are large enough, a convective flow sets in and the heat transfer increases in comparison to pure conduction. The Nusselt number, ratio of convective to conduction heat transfer (i.e. $>1$ in the presence of a convection flow) is typically a power law of the Rayleigh number. In the case of the flow around a cylinder with isothermal surface temperature, there are two correlations:\n<FONT FACE=\"courier\" style=\"color:blue\">Morgan(Ra) </FONT>\n<p class='alert alert-danger'>\n$$\n\\overline{Nu}=\\frac{\\overline{h}D}{k}=C\\,Ra^n\n$$\n</p>\n<FONT FACE=\"courier\" style=\"color:blue\">Churchill-Chu(Ra,Pr) </FONT>\n<p class='alert alert-danger'>\n$$\n\\overline{Nu}=\\frac{\\overline{h}D}{k}=\\left[0.60+\\frac{0.387Ra^{1/6}}{\\left[1+\\left(\\frac{0.559}\n{Pr}\\right)^{9/16}\\right]^{8/27}}\n\\right]^2 \n$$\n</p>\nBoth are valid for $Ra\\leq10^{12}$. The Nusselt is averaged over the perimeter of the cylinder to account for the angular variation of heat transfer discussed earlier. The heat transfer from natural convection from a heated cylinder of diameter $D$ and length $L$ is\n<p class='alert alert-info'>\n$$\nq=\\overline{h}(\\pi DL)(T_s-T_\\infty)=\\frac{1}{R_\\text{th,conv}}(T_s-T_\\infty)\n$$\n</p>\nwhere $R_\\text{th,conv}$ may computed with <FONT FACE=\"courier\" style=\"color:blue\">R_th_convection(h,A)</FONT>\n", "_____no_output_____" ] ], [ [ "airnatconv = natconv.HorizontalCylinder(correlation='Morgan',Ra=Ra)\nprint(\"Morgan correlation: Nu= %1.2f\" %airnatconv.Nu)\nairnatconv = natconv.HorizontalCylinder(correlation='Churchill-Chu',Ra=Ra,Pr=airflow.Pr)\nprint(\"Churchill-Chu correlation: Nu= %1.2f\" %airnatconv.Nu)", "Morgan correlation: Nu= 20.87\nChurchill-Chu correlation: Nu= 20.91\n" ], [ "font = {'family' : 'serif',\n #'color' : 'black',\n 'weight' : 'normal',\n 'size' : 14,\n }\n\nfrom matplotlib.ticker import FormatStrFormatter\nplt.rc('font', **font)\nN = 100\nRa = np.logspace(5,12,N)\nNu_Morgan = np.zeros(N)\nNu_ChurchillChu = np.zeros(N)\nPr = 1.0\nfor i in range(N):\n flow = natconv.HorizontalCylinder(correlation='Morgan',Ra=Ra[i])\n Nu_Morgan[i] = flow.Nu\n flow = natconv.HorizontalCylinder(correlation='Churchill-Chu',Ra=Ra[i],Pr=Pr)\n Nu_ChurchillChu[i] = flow.Nu\n\nplt.loglog(Ra,Nu_Morgan, label = r\"Morgan\",lw = 2)\nplt.loglog(Ra,Nu_ChurchillChu, label = r\"Churchill-Chu\", lw= 2)\nplt.xlabel(r\"$Ra$\")\nplt.ylabel(r\"$Nu$\")\nplt.legend(loc=3, bbox_to_anchor=[0., 1.01], ncol=2, shadow=False, fancybox=True)\nplt.show()", "_____no_output_____" ], [ "plt.plot(Ra,np.abs(Nu_Morgan-Nu_ChurchillChu)/Nu_ChurchillChu,lw = 2)\nplt.xlabel(r\"$Ra$\")\nplt.ylabel(r\"$\\vert Nu_{M}-Nu_{CC}\\vert/Nu_{CC}$\")\nplt.show()", "_____no_output_____" ] ], [ [ "## Assignment", "_____no_output_____" ], [ "<ol>\n<li> Read this entire notebook. Using the textbook, add restrictions and range of validity for the above correlations when applicable. Add the entry length Nu correlation for laminar flow</li>\n<li> Add a section on entrance flow</li>\n<li> How should the entrance flow region be treated in turbulent flows?</li>\n<li>Solve 8.31, 8.36, 8.43</li>\n</ol>", "_____no_output_____" ], [ "### 8.31", "_____no_output_____" ], [ "<img src=\"figures/probun_08_07.jpg\" alt=\"my awesome sketch\" width=50% >\nTo cool a summer home without using a vapor-compression refrigeration cycle, air is routed through a plastic pipe ($k=0.15\\text{ W/m.K}$, $D_i=0.15\\text{ m}$, $D_o=0.17\\text{ m}$) that is submerged in an adjoini\nng body of water. The water temperature is nominally at $T_\\infty= 17^\\circ\\text{C}$, and a convection coefficient of $h_o\\approx 1500\\text{ W/m$^2$. K}$ is maintained at the outer surface of the pipe.\n\nIf air from the home enters the pipe at a temperature of $T_{m,i}= 29^\\circ\\text{C}$ and a volumetric flow rate of $\\dot{\\forall}_i= 0.025\\text{ m$^3$/s}$, what pipe length $L$ is needed to provide a discharge temperature of $T_{m,o}=21^\\circ\\text{C}$? What is the fan power required\nto move the air through this length of pipe if its inner surface is smooth?\n\n#### Solution\n\nThe length of the pipe is the given by solving \n$$\n\\frac{T_\\infty-T_{m,o}}{T_\\infty-T_{m,i}}=\\exp\\left(-\\frac{1}{\\dot{m}C_pR_\\text{tot}}\\right)\n$$\nfor the target outlet temperature $T_{m,o}$. First, assuming 1D, steady convection on the outside of the pipe, we must solve for $R'_{tot}$. Since\n$$\nR_{tot}=\\frac{R'_{tot}}{L}\n$$\nthe pipe length is\n$$\nL=-\\dot{m}C_pR'_\\text{tot}\\ln\\frac{T_\\infty-T_{m,o}}{T_\\infty-T_{m,i}}\n$$", "_____no_output_____" ] ], [ [ "from Libraries import HT_thermal_resistance as res\n\nRp = []\nRp.append(res.Resistance(\"$R'_{conv,i}$\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{cond,pipe}$\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{conv,o}$\",\"W/m\"))\n\nd = schem.Drawing()\nd.add(e.DOT, label = r\"$T_{m,i}$\")\nd.add(e.RES, d = 'right', label = Rp[0].name)\nd.add(e.DOT, label = r\"$T_{s,i}$\")\nR1 = d.add(e.RES, d = 'right', label = Rp[1].name)\nd.add(e.DOT, label = r\"$T_{s,o}$\")\nd.add(e.RES, d='right', label = Rp[2].name)\nd.add(e.DOT, label=\"$T_\\infty$\")\nL1 = d.add(e.LINE, toplabel = \"$q'$\", endpts = [[-2.25, 0], [-0.25, 0]])\nd.labelI(L1, arrowofst = 0)\nd.draw()", "_____no_output_____" ], [ "from Libraries import thermodynamics as thermo\nfrom Libraries import HT_internal_convection as intconv\n\nk_pipe = 0.15 #W/m.K\nDi = 0.15 #m\nDo = 0.17 #m\nT_infty = 17. #C\nh_o = 1500 #W/m^2.K\nT_mi = 29 #C\nT_mo = 21 #C\n\nQdot = 0.025 #m^3/s\nT_m = (T_mi + T_mo)/2\n\nairi = thermo.Fluid('air',T_mi,\"C\")\nairm = thermo.Fluid('air', T_m,\"C\")\nairflow = intconv.PipeFlow(D=Di, L = 1., mdot = airi.rho*Qdot, nu = airm.nu, rho = airi.rho)\nairflow.Dittus_Boelter(mode='cooling',Pr=airm.Pr)\nprint(\"Re=%.0f\" %airflow.Re)\nprint(\"Nu=%.0f\" %airflow.Nu)\nhbar_i = airflow.Nu*airm.k/Di\nprint(\"hbar,i=%.2f W/m^2.K\" %hbar_i)\nRp[0].convection(hbar_i,np.pi*Di)\nRp[1].cond_cylinder(k = k_pipe,ra=Di,rb=Do,L=1)\nRp[2].convection(h_o,A=np.pi*Do)\n\nRptot = 0\nfor i in range(3):\n Rptot += Rp[i].R\n \n# def L_given_other_params(T_infty,T_mo,T_mi,mdot,Cp,Rptot):\n# return -mdot*Cp*Rptot*np.log((T_infty -T_mo)/(T_infty - T_mi))\n\nL = intconv.L_given_other_params(T_infty,T_mo,T_mi,airi.rho*Qdot,airm.Cp,Rptot)\nprint(\"Length needed to achieve T_mo=%.0f C is %.1f m\" %(T_mo,L))", "Re=13625\nNu=42\nhbar,i=7.32 W/m^2.K\nLength needed to achieve T_mo=21 C is 13.7 m\n" ], [ "from Libraries import HT_natural_convection as natconv\nT_f = (T_infty + T_m)/2\nwater = thermo.Fluid(\"water\",T_f,\"C\")\nRa = natconv.Ra(beta=water.beta,DT=T_m - T_infty, D=Do,nu=water.nu,alpha = water.alpha)\nprint(\"Ra=%.2e\" %(Ra))\nwaterconv = natconv.HorizontalCylinder(\"Churchill-Chu\",Ra,water.Pr)\nprint(\"Nu=%.0f\" %waterconv.Nu)\nprint(\"For natural convection, h_o=%.0f W/m^2.K\" %(waterconv.Nu*water.k/Do))\n# waterforced = extconv.CircularCylinder()", "Ra=5.94e+08\nNu=124\nFor natural convection, h_o=436 W/m^2.K\n" ] ], [ [ "This little exercise demonstrates that natural convection does not achieve the cooling capacity assumed in the problem ($h_o=1500\\mathrm{W}/\\mathrm{m}^2.K$)", "_____no_output_____" ] ], [ [ "from Libraries import HT_natural_convection as natconv\n?natconv.HorizontalCylinder", "_____no_output_____" ] ], [ [ "### 8.36", "_____no_output_____" ], [ "\nHot water at mean temperature $T_m=50\\text{$^\\circ$C}$ is routed from one building in which it is generated to an adjoining building in which it is used for space heating. Transfer between the buildings occurs in a steel pipe ($k=60\\text{ W/m.K}$) of $100 \\text{ mm}$ outside diameter and 8-mm wall thickness. During the winter, representative environmental conditions involve air at $T_\\infty= -5^\\circ \\mathrm{C}$ and $V_\\infty=3\\text{ m/s}$ in cross flow over the pipe.\nUsing the Churchill Bernstein and Dittus Boehler correlations, calculate the total heat transfer rate <b>per unit length</b> $q'$, the daily energy cost $Q'=q'\\times 24\\text{ h/d}$ per meter and the cost per day and per meter assuming an electricity cost of $\\text{\\$}0.05\\text{/kW.h}$.\n\n**FYI:** This is the Churchill-Bernstein correlation which you can call with the `from Libraries import HT_external_convection as extconv` `airflow=extconv.CircularCylinder('Churchill-Bernstein',Re,Pr)`\n$$\nNu_D = \\frac{hD}{k_f}=0.3+\\frac{0.62Re_D^{1/2}Pr^{1/3}}{\\left[1+\\left(\\frac{0.4}{Pr}\\right)^{2/3}\\right]^{1/4}}\\left[1+\\left(\\frac{Re_D}{282,000}\\right)^{5/8}\\right]^{4/5}\n$$\n\n<img src=\"figures/PB8.36-sketch.png\" alt=\"my awesome sketch\" width=100% >", "_____no_output_____" ], [ "The heat transfer problem in any cross sectional area of the pipe is \n$$\nq' = \\frac{T_m - T _\\infty}{R'_{tot}}\n$$\n\nwith\n\n$$\nR'_{tot}= R'_{conv,int} + R'_{cond,p}+R'_{conv,ext}\n$$\n\nWe must find the convection coefficients $h_{int}$ and $h_{ext}$, using the appropriate correlations.", "_____no_output_____" ] ], [ [ "Tm = 50 #C\nUm = 0.5 #m/s\n\nDi = 0.084 #m\nDo = 0.1 #m\nkp = 60 #W/m.K\n\nT_infty = -5 #C\nU_infty = 3 #m/s\n\nfrom Libraries import HT_thermal_resistance as res\nRp = []\nRp.append(res.Resistance(\"$R'_{conv,int}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{cond,p}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{conv,ext}\",\"W/m\"))\n\n# internal convection\nfrom Libraries import thermodynamics as thermo\nfrom Libraries import HT_internal_convection as intconv\n\nwater = thermo.Fluid('water',Tm,\"C\")\npipeflow = intconv.PipeFlow(D=Di,L=1,Um=Um,nu=water.nu)\nprint(\"Re_D_pipe= %.0f\" %pipeflow.Re)\npipeflow.Dittus_Boelter(mode='cooling',Pr=water.Pr)\nhint = pipeflow.Nu*water.k/Di\nprint(\"hint=%.1f W/m^2.K\" %hint)\nRp[0].convection(h=hint,A=np.pi*Di)\n\n# conduction\nRp[1].cond_cylinder(k=kp,ra=Di,rb=Do,L=1.)\n\n# external convection\n\n#guess for surface temperature at D=Do\nT_so = 49.21 #C\nT_f = (T_infty + T_so)/2\nair = thermo.Fluid('air',T_f,\"C\")\nRe_air = U_infty * Do/air.nu\n# print(Re_air)\nfrom Libraries import HT_external_convection as extconv\nairflow = extconv.CircularCylinder('Churchill-Bernstein',Re_air,air.Pr)\nhext = airflow.Nu*air.k/Do\nprint(\"hext=%.1f W/m^2.K\" %hext)\nRp[2].convection(h=hext,A=np.pi*Do)\n\n# total thermal resistance\nRptot = 0.\nfor i in range(3):\n Rptot += Rp[i].R\n \nqp = (Tm - T_infty)/Rptot\n\nprint(\"Heat rate per unit length: %.0f W/m\" %qp)\n\n#New estimate of T_so\nT_so = T_infty + qp*Rp[2].R\nprint(\"New T_so = %.2f C\" %T_so)", "Re_D_pipe= 75882\nhint=2067.0 W/m^2.K\nhext=20.3 W/m^2.K\nHeat rate per unit length: 345 W/m\nNew T_so = 49.21 C\n" ], [ "Tm = 50 #C\nUm = 0.5 #m/s\n\nDi = 0.084 #m\nDo = 0.1 #m\nkp = 60 #W/m.K\n\nT_infty = -5 #C\nU_infty = 3 #m/s\n\nfrom Libraries import HT_thermal_resistance as res\nRp = []\nRp.append(res.Resistance(\"$R'_{conv,int}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{cond,p}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{conv,ext}\",\"W/m\"))\n\n# internal convection\nfrom Libraries import thermodynamics as thermo\nfrom Libraries import HT_internal_convection as intconv\n\nwater = thermo.Fluid('water',Tm,\"C\")\npipeflow = intconv.PipeFlow(D=Di,L=1,Um=Um,nu=water.nu)\nprint(\"Re_D_pipe= %.0f\" %pipeflow.Re)\npipeflow.Dittus_Boelter(mode='cooling',Pr=water.Pr)\nhint = pipeflow.Nu*water.k/Di\nprint(\"hint=%.1f W/m^2.K\" %hint)\nRp[0].convection(h=hint,A=np.pi*Di)\n\n# conduction\nRp[1].cond_cylinder(k=kp,ra=Di,rb=Do,L=1.)\n\n# external convection\n\n# initial guess for surface temperature at D=Do\nT_so = 0. #C\nerrT = np.inf\niteration = 0\nwhile (errT > 1.0) and (iteration < 10):\n iteration += 1\n T_so_old = T_so\n T_f = (T_infty + T_so)/2\n air = thermo.Fluid('air',T_f,\"C\")\n Re_air = U_infty * Do/air.nu\n # print(Re_air)\n from Libraries import HT_external_convection as extconv\n airflow = extconv.CircularCylinder('Churchill-Bernstein',Re_air,air.Pr)\n hext = airflow.Nu*air.k/Do\n print(\"hext=%.1f W/m^2.K\" %hext)\n Rp[2].convection(h=hext,A=np.pi*Do)\n\n # total thermal resistance\n Rptot = 0.\n for i in range(3):\n Rptot += Rp[i].R\n\n qp = (Tm - T_infty)/Rptot\n\n print(\"Heat rate per unit length: %.0f W/m\" %qp)\n\n #New estimate of T_so\n T_so = T_infty + qp*Rp[2].R\n print(\"New T_so = %.2f C\" %T_so)\n errT = abs(T_so - T_so_old)\n print(\"errT=%.3e\" %errT)", "Re_D_pipe= 75882\nhint=2067.0 W/m^2.K\nhext=20.7 W/m^2.K\nHeat rate per unit length: 352 W/m\nNew T_so = 49.19 C\nerrT=4.919e+01\nhext=20.3 W/m^2.K\nHeat rate per unit length: 345 W/m\nNew T_so = 49.21 C\nerrT=1.580e-02\n" ], [ "Qp = qp*1e-3*24\nprint(\"Daily energy loss: %.3f kW.h/d/m\" %Qp)\nCp = Qp * 0.05\nprint(\"Cost: $%.3f /m.d \" %Cp)", "Daily energy loss: 8.289 kW.h/d/m\nCost: $0.414 /m.d \n" ] ], [ [ "### 8.42", "_____no_output_____" ], [ "Atmospheric air enters a $10\\text{ m}$-long, $150\\text{ mm}$-diameter uninsulated heating duct at $60\\text{$^\\circ$C}$ and $0.04\\text{ kg/s}$. The duct surface temperature is approximately constant at $Ts=15\\text{$^\\circ$C}$.\n\n(a) What are the outlet air temperature, the heat rate q, and pressure drop $\\Delta p$ for these conditions?\n\n(b) To illustrate the tradeoff between heat transfer rate and pressure drop considerations, calculate $q$ and $\\Delta p$ for diameters in the range from $0.1$ to $0.2\\text{ m}$. In your analysis, maintain the total surface area,\n$A_s=\\pi DL$, at the value computed for part (a). Plot $q$, $\\Delta p$, and $L$ as a function of the duct diameter.", "_____no_output_____" ] ], [ [ "Tm = 50 #C\nUm = 0.5 #m/s\n\nDi = 0.084 #m\nDo = 0.1 #m\nkp = 60 #W/m.K\n\nT_infty = -5 #C\nU_infty = 3 #m/s\n\nfrom Libraries import HT_thermal_resistance as res\nRp = []\nRp.append(res.Resistance(\"$R'_{conv,int}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{cond,p}\",\"W/m\"))\nRp.append(res.Resistance(\"$R'_{conv,ext}\",\"W/m\"))\n\n# internal conduction\nfrom Libraries import HT_internal_convection as intconv\nwater = thermo.Fluid('water',Tm,\"C\")\n\npipeflow = intconv.PipeFlow(D=Di,L=1,Um=Um,nu=water.nu)\nprint(pipeflow.Re,water.Pr)\npipeflow.Dittus_Boelter(mode='cooling',Pr=water.Pr)\nprint(pipeflow.Nu*water.k/Di)\nRp[0].convection(h=pipeflow.Nu*water.k/Di,A=np.pi*Di)\n\n\n#conduction\nRp[1].cond_cylinder(k=kp,ra=Di,rb=Do)\n\n# external convection\nfrom Libraries import HT_external_convection as extconv\nT_so = 49.2\nT_fo = (T_infty + T_so)/2\nair = thermo.Fluid('air',T_fo,\"C\")\nRe_air = U_infty*Do/air.nu\nairflow = extconv.CircularCylinder('Churchill-Bernstein',Re_air,air.Pr)\nRp[2].convection(airflow.Nu*air.k/Do,np.pi*Do)\nprint(airflow.Nu*air.k/Do)\nRptot = 0\nfor i in range(3):\n Rptot += Rp[i].R\n print(Rp[i].R)\nqp = (Tm - T_infty)/Rptot\nprint(qp)\nT_so_1 = T_infty + qp*Rp[2].R\nprint(T_so_1)", "75882.1466843155 3.552917388218273\n2067.0442822873297\n20.281563814080407\n0.0018332473278011479\n0.0004624867234817717\n0.15694543532328858\n345.38806902772404\n49.2070808490262\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4af4256c29c4cb3c8224dc591092b0b0856b5410
10,111
ipynb
Jupyter Notebook
algorithms/ConjugateGrad_BBGrad.ipynb
shenlong95/OptimizationDemos
008dcb1b846acf8aae2c52c7795a522519c40457
[ "MIT" ]
1
2021-08-13T09:45:40.000Z
2021-08-13T09:45:40.000Z
algorithms/ConjugateGrad_BBGrad.ipynb
shenlong95/OptimizationDemos
008dcb1b846acf8aae2c52c7795a522519c40457
[ "MIT" ]
null
null
null
algorithms/ConjugateGrad_BBGrad.ipynb
shenlong95/OptimizationDemos
008dcb1b846acf8aae2c52c7795a522519c40457
[ "MIT" ]
null
null
null
33.703333
118
0.546929
[ [ [ "### Dataset\nLets Load the dataset. We shall use the following datasets:\nFeatures are in: \"sido0_train.mat\"\nLabels are in: \"sido0_train.targets\"", "_____no_output_____" ] ], [ [ "from scipy.io import loadmat\nimport numpy as np\n\nX = loadmat(r\"/Users/rkiyer/Desktop/teaching/CS6301/jupyter/data/sido0_matlab/sido0_train.mat\")\ny = np.loadtxt(r\"/Users/rkiyer/Desktop/teaching/CS6301/jupyter/data/sido0_matlab/sido0_train.targets\")\n\n# Statistics of the Dense Format of X\nX = X['X'].todense()\nprint(X.shape)", "(12678, 4932)\n" ] ], [ [ "### Logistic Regression Definition\nLets use the Logistic Regression definition we previously used\n", "_____no_output_____" ] ], [ [ "def LogisticLoss(w, X, y, lam):\n # Computes the cost function for all the training samples\n m = X.shape[0]\n Xw = np.dot(X,w)\n yT = y.reshape(-1,1)\n yXw = np.multiply(yT,Xw)\n f = np.sum(np.logaddexp(0,-yXw)) + 0.5*lam*np.sum(np.multiply(w,w))\n gMul = 1/(1 + np.exp(yXw))\n ymul = -1*np.multiply(yT, gMul)\n g = np.dot(ymul.reshape(1,-1),X) + lam*w.reshape(1,-1)\n g = g.reshape(-1,1)\n return [f, g] ", "_____no_output_____" ] ], [ [ "### Barzelia Borwein step length\nLets invoke BB Step Length Gradient Descent", "_____no_output_____" ] ], [ [ "from numpy import linalg as LA\n\ndef gdBB(funObj,w,maxEvals,alpha,gamma,X,y,lam, verbosity, freq):\n [f,g] = funObj(w,X,y,lam)\n funEvals = 1\n funVals = []\n f_old = f\n g_old = g\n funVals.append(f)\n numBackTrack = 0\n while(1):\n wp = w - alpha*g\n [fp,gp] = funObj(wp,X,y,lam)\n funVals.append(f)\n funEvals = funEvals+1\n backtrack = 0\n if funEvals > 2:\n g_diff = g - g_old\n alpha = -alpha*np.dot(g_old.T, g_diff)[0,0]/np.dot(g_diff.T, g_diff)[0,0]\n while fp > f - gamma*alpha*np.dot(g.T, g):\n alpha = alpha*alpha*np.dot(g.T, g)[0,0]/(2*(fp + np.dot(g.T, g)[0,0]*alpha - f))\n wp = w - alpha*g\n [fp,gp] = funObj(wp,X,y,lam)\n funVals.append(f)\n funEvals = funEvals+1\n numBackTrack = numBackTrack + 1\n f_old = f\n g_old = g\n w = wp\n f = fp\n g = gp\n optCond = LA.norm(g, np.inf)\n if ((verbosity > 0) and (funEvals % freq == 0)):\n print(funEvals,alpha,f,optCond)\n if (optCond < 1e-2):\n break\n if (funEvals >= maxEvals):\n break\n return (funVals,numBackTrack)", "_____no_output_____" ], [ "[nSamples,nVars] = X.shape\nw = np.zeros((nVars,1))\n(funV1,numBackTrack) = gdBB(LogisticLoss,w,250,1,1e-4,X,y,1,1,10)\nprint(len(funV1))\nprint(\"Number of Backtrackings = \" + str(numBackTrack))", "/usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: overflow encountered in exp\n \n" ] ], [ [ "### Conjugate Gradient Descent\nNonlinear Conjugate Gradient Descent", "_____no_output_____" ] ], [ [ "from numpy import linalg as LA\n\ndef gdCG(funObj,w,maxEvals,alpha,gamma,X,y,lam, verbosity, freq):\n [f,g] = funObj(w,X,y,lam)\n funEvals = 1\n funVals = []\n f_old = f\n g_old = g\n funVals.append(f)\n numBackTrack = 0\n d = g\n while(1):\n wp = w - alpha*d\n [fp,gp] = funObj(wp,X,y,lam)\n funVals.append(f)\n funEvals = funEvals+1\n backtrack = 0\n if funEvals > 2:\n alpha = min(1,2*(f_old - f)/np.dot(g.T, g)[0,0])\n beta = np.dot(g.T, g)[0,0]/np.dot(g_old.T, g_old)[0,0]\n d = g + beta*d\n else:\n d = g\n while fp > f - gamma*alpha*np.dot(g.T, d)[0,0]:\n alpha = alpha*alpha*np.dot(g.T, d)[0,0]/(2*(fp + np.dot(g.T, d)[0,0]*alpha - f))\n wp = w - alpha*d\n [fp,gp] = funObj(wp,X,y,lam)\n funVals.append(f)\n funEvals = funEvals+1\n numBackTrack = numBackTrack + 1\n f_old = f\n g_old = g\n w = wp\n f = fp\n g = gp\n optCond = LA.norm(g, np.inf)\n if ((verbosity > 0) and (funEvals % freq == 0)):\n print(funEvals,alpha,f,optCond)\n if (optCond < 1e-2):\n break\n if (funEvals >= maxEvals):\n break\n return (funVals,numBackTrack)", "_____no_output_____" ], [ "[nSamples,nVars] = X.shape\nw = np.zeros((nVars,1))\n(funV1,numBackTrack) = gdCG(LogisticLoss,w,250,1,1e-4,X,y,1,1,10)\nprint(len(funV1))\nprint(\"Number of Backtrackings = \" + str(numBackTrack))", "/usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: overflow encountered in exp\n \n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4af42b5459e0e61b66ff3b1156d4d53e9088a1f6
19,733
ipynb
Jupyter Notebook
Notebooks/Obsolete_Maybe/singleton_networks.ipynb
alreich/qualreas
e1f94fe79a9043cfc6ae83e04ff03aaa608f373f
[ "MIT" ]
11
2021-03-07T12:20:59.000Z
2021-12-02T06:15:17.000Z
Notebooks/Obsolete_Maybe/singleton_networks.ipynb
alreich/qualreas
e1f94fe79a9043cfc6ae83e04ff03aaa608f373f
[ "MIT" ]
null
null
null
Notebooks/Obsolete_Maybe/singleton_networks.ipynb
alreich/qualreas
e1f94fe79a9043cfc6ae83e04ff03aaa608f373f
[ "MIT" ]
3
2021-11-22T08:50:50.000Z
2022-01-18T09:18:38.000Z
22.998834
286
0.451477
[ [ [ "# Singleton Networks", "_____no_output_____" ] ], [ [ "import qualreas as qr\nimport os\nimport copy", "_____no_output_____" ], [ "qr_path = os.path.join(os.getenv('PYPROJ'), 'qualreas')\nalg_dir = os.path.join(qr_path, \"Algebras\")", "_____no_output_____" ] ], [ [ "## Make a Test Network", "_____no_output_____" ] ], [ [ "test1_net_dict = {\n 'name': 'Network Copy Test #1',\n 'algebra': 'Extended_Linear_Interval_Algebra',\n 'description': 'Testing/Developing network copy functionality',\n 'nodes': [\n ['U', ['ProperInterval', 'Point']],\n ['V', ['ProperInterval', 'Point']],\n ['W', ['ProperInterval']],\n ['X', ['Point']]\n ],\n 'edges': [\n ['U', 'V', 'B'],\n ['U', 'W', 'M'],\n ['W', 'V', 'O'],\n ['X', 'W', 'D']\n ]\n}", "_____no_output_____" ], [ "test2_net_dict = {\n 'name': 'Network Copy Test #2',\n 'algebra': 'Extended_Linear_Interval_Algebra',\n 'description': 'Testing/Developing network copy functionality',\n 'nodes': [\n ['X', ['ProperInterval']],\n ['Y', ['ProperInterval']],\n ['Z', ['ProperInterval']]\n ],\n 'edges': [\n ['X', 'Y', 'B'],\n ['Y', 'Z', 'B']\n ]\n}", "_____no_output_____" ], [ "test1_net = qr.Network(algebra_path=alg_dir, network_dict=test1_net_dict)", "_____no_output_____" ], [ "test2_net = qr.Network(algebra_path=alg_dir, network_dict=test2_net_dict)", "_____no_output_____" ], [ "test1_net.propagate()\ntest1_net.summary(show_all=False)", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: M\n => X: B\n V:['ProperInterval']\n => V: E\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => X: DI\n X:['Point']\n => X: PE\n" ], [ "test2_net.propagate()\ntest2_net.summary(show_all=False)", "\nNetwork Copy Test #2: 3 nodes, 9 edges\n Algebra: Extended_Linear_Interval_Algebra\n X:['ProperInterval']\n => X: E\n => Y: B\n => Z: B\n Y:['ProperInterval']\n => Y: E\n => Z: B\n Z:['ProperInterval']\n => Z: E\n" ] ], [ [ "## Test Changing Constraint on an Edge", "_____no_output_____" ], [ "Look at all the edge contraints", "_____no_output_____" ] ], [ [ "for eg in test1_net.edges:\n print(test1_net.edges[eg[0], eg[1]]['constraint'])", "E\nB\nM\nB\nE\nBI\nOI\nBI|DI|PSI\nE\nMI\nO\nDI\nPE\nD\nBI\nB|D|PS\n" ] ], [ [ "Grab the Head (src) and Tail (tgt) of the 3rd edge, above.", "_____no_output_____" ] ], [ [ "src, tgt = list(test1_net.edges)[2]\ntest1_net.edges[src,tgt]['constraint']", "_____no_output_____" ] ], [ [ "Change the constraint and look at the result on the edge & its converse.", "_____no_output_____" ] ], [ [ "test1_net.set_constraint(src, tgt, test1_net.algebra.relset('D|M|FI'))", "_____no_output_____" ], [ "test1_net.edges[src,tgt]['constraint']", "_____no_output_____" ], [ "test1_net.edges[tgt,src]['constraint']", "_____no_output_____" ] ], [ [ "## Test Copy Network", "_____no_output_____" ] ], [ [ "test1_net_copy = test1_net.copy()\n#test1_net_copy = qr.copy(test1_net)", "_____no_output_____" ], [ "test1_net_copy.summary()", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: D|FI|M\n => X: B\n V:['ProperInterval']\n => V: E\n => U: BI\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => U: DI|F|MI\n => V: O\n => X: DI\n X:['Point']\n => X: PE\n => U: BI\n => V: B|D|PS\n => W: D\n" ], [ "test1_net_copy.propagate()\ntest1_net_copy.summary(show_all=False)", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: D|M\n => X: B\n V:['ProperInterval']\n => V: E\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => X: DI\n X:['Point']\n => X: PE\n" ], [ "done = []\nresult = []\nfor eg in test1_net_copy.edges:\n src = eg[0]; tgt = eg[1]\n srcID = src.name; tgtID = tgt.name\n if not (src, tgt) in done:\n cons = test1_net_copy.edges[src, tgt]['constraint']\n print(srcID, tgtID, cons)\n if len(cons) > 1:\n result.append((srcID, tgtID, cons))\n done.append((tgt, src))", "U U E\nU V B\nU W D|M\nU X B\nV V E\nV W OI\nV X BI|DI|PSI\nW W E\nW X DI\nX X PE\n" ], [ "rels = []\nfor rel in result[0][2]:\n rels.append(rel)", "_____no_output_____" ], [ "rels", "_____no_output_____" ], [ "foo = [1, 2, 3]\na = foo.pop()", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "foo", "_____no_output_____" ], [ "def _all_realizations_aux(in_work, result):\n if len(in_work) == 0:\n print(\"DONE\")\n return result\n else:\n print(\"Get next net in work\")\n next_net = in_work.pop()\n if finished(next_net):\n print(\" This one's finished\")\n result.append(next_net)\n _all_realizations_aux(in_work, result)\n else:\n print(\" Expanding net\")\n _all_realizations_aux(in_work + expand(next_net), result)\n\ndef expand(net):\n expansion = []\n for src, tgt in net.edges:\n edge_constraint = net.edges[src, tgt]['constraint']\n if len(edge_constraint) > 1:\n print(\"--------\")\n print(f\"Edge Constraint: {edge_constraint}\")\n for rel in edge_constraint:\n print(f\" Relation: {rel}\")\n net_copy = net.copy()\n src_node, tgt_node, _ = net_copy.get_edge(src.name, tgt.name, return_names=False)\n net_copy.set_constraint(src_node, tgt_node, net_copy.algebra.relset(rel))\n expansion.append(net_copy)\n print(f\" Expansion: {expansion}\")\n break\n return expansion\n\ndef finished(net):\n \"\"\"Returns True if all constraints are singletons.\"\"\"\n answer = True\n for src, tgt in net.edges:\n edge_constraint = net.edges[src, tgt]['constraint']\n if len(edge_constraint) > 1:\n answer = False\n break\n return answer", "_____no_output_____" ], [ "x = _all_realizations_aux([test1_net_copy], list())", "Get next net in work\n Expanding net\n--------\nEdge Constraint: D|M\n Relation: D\n Expansion: [<qualreas.Network object at 0x7fd72089f910>]\n Relation: M\n Expansion: [<qualreas.Network object at 0x7fd72089f910>, <qualreas.Network object at 0x7fd72089f610>]\nGet next net in work\n Expanding net\n--------\nEdge Constraint: BI|DI|PSI\n Relation: BI\n Expansion: [<qualreas.Network object at 0x7fd7208aae10>]\n Relation: DI\n Expansion: [<qualreas.Network object at 0x7fd7208aae10>, <qualreas.Network object at 0x7fd72089cd10>]\n Relation: PSI\n Expansion: [<qualreas.Network object at 0x7fd7208aae10>, <qualreas.Network object at 0x7fd72089cd10>, <qualreas.Network object at 0x7fd7208cd3d0>]\nGet next net in work\n This one's finished\nGet next net in work\n This one's finished\nGet next net in work\n This one's finished\nGet next net in work\n Expanding net\n--------\nEdge Constraint: BI|DI|PSI\n Relation: BI\n Expansion: [<qualreas.Network object at 0x7fd72089c250>]\n Relation: DI\n Expansion: [<qualreas.Network object at 0x7fd72089c250>, <qualreas.Network object at 0x7fd7207bd890>]\n Relation: PSI\n Expansion: [<qualreas.Network object at 0x7fd72089c250>, <qualreas.Network object at 0x7fd7207bd890>, <qualreas.Network object at 0x7fd7207bd310>]\nGet next net in work\n This one's finished\nGet next net in work\n This one's finished\nGet next net in work\n This one's finished\nDONE\n" ], [ "len(x)", "_____no_output_____" ], [ "foo = expand(test1_net)\nfoo", "--------\nD|FI|M\nD\n[<qualreas.Network object at 0x7fc318a5fed0>]\nFI\n[<qualreas.Network object at 0x7fc318a5fed0>, <qualreas.Network object at 0x7fc2e93ccd50>]\nM\n[<qualreas.Network object at 0x7fc318a5fed0>, <qualreas.Network object at 0x7fc2e93ccd50>, <qualreas.Network object at 0x7fc2e8fe74d0>]\n" ], [ "foo[0].summary(show_all=False)", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: D\n => X: B\n V:['ProperInterval']\n => V: E\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => X: DI\n X:['Point']\n => X: PE\n" ], [ "foo[1].summary(show_all=False)", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: FI\n => X: B\n V:['ProperInterval']\n => V: E\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => X: DI\n X:['Point']\n => X: PE\n" ], [ "foo[2].summary(show_all=False)", "\nNetwork Copy Test #1: 4 nodes, 16 edges\n Algebra: Extended_Linear_Interval_Algebra\n U:['ProperInterval']\n => U: E\n => V: B\n => W: M\n => X: B\n V:['ProperInterval']\n => V: E\n => W: OI\n => X: BI|DI|PSI\n W:['ProperInterval']\n => W: E\n => X: DI\n X:['Point']\n => X: PE\n" ], [ "finished(test1_net)", "_____no_output_____" ], [ "finished(test2_net)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af431fc1ab3bfbfc3ba0ea1c42aa3cc3bb1ef23
107,654
ipynb
Jupyter Notebook
RandomForstClassifier on SE Team Assessment database.ipynb
renowator/My_ML
c0687f9b2833268727d4a3a557a98ccf8d5be9b2
[ "MIT" ]
1
2018-08-05T00:30:09.000Z
2018-08-05T00:30:09.000Z
RandomForstClassifier on SE Team Assessment database.ipynb
renowator/My_ML
c0687f9b2833268727d4a3a557a98ccf8d5be9b2
[ "MIT" ]
null
null
null
RandomForstClassifier on SE Team Assessment database.ipynb
renowator/My_ML
c0687f9b2833268727d4a3a557a98ccf8d5be9b2
[ "MIT" ]
null
null
null
141.278215
81,184
0.845654
[ [ [ "import pandas\nimport numpy as np\nimport sklearn\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport glob", "_____no_output_____" ] ], [ [ "# San Francisco State University\n## Software Engineering Team Assessment and Prediction (SETAP) Project Machine Learning Training Data File Version 0.7\n ====================================================================\n\n# Copyright 2000-2017 by San Francisco State University, Dragutin Petkovic, and Marc Sosnick-Perez.\n\n# CONTACT\n -------\n## Professor Dragutin Petkovic: [email protected]\n\n# LICENSE\n -------\n This data is released under the Creative Commons Attribution-\n NonCommercial 4.0 International license. For more information,\n please see\n http://creativecommons.org/licenses/by-nc/4.0/legalcode.\n The research that has made this data possible has been funded in\n part by NSF grant NSF-TUES1140172.\n\n YOUR FEEDBACK IS WELCOME\n ------------------------\n We are interested in how this data is being used. If you use it in\n a research project, we would like to know how you are using the\n data. Please contact us at [email protected].\n\n\n# FILES INCLUDED IN DISTRIBUTION PACKAGE\n ==================================\n More data about the SETAP project, data collection, and description\n and use of machine learning to analyze the data can be found in the\n following paper:\n\n D. Petkovic, M. Sosnick-Perez, K. Okada, R. Todtenhoefer, S. Huang,\n N. Miglani, A. Vigil: \"Using the Random Forest Classifier to Assess\n and Predict Student Learning of Software Engineering Teamwork\".\n Frontiers in Education FIE 2016, Erie, PA, 2016\n\n\n See DATA DESCRIPTION below for more information about the data. The\n README file (which you are reading) contains project information\n such as data collection techniques, data organization and field\n naming convention. In addition to the README file, the archive\n contains a number of .csv files. Each of these CSV files contains\n data aggregated by team from the project (see below), paired with\n that team's outcome for either the process or product component of\n the team's evaluation. The files are named using the following\n convention:\n \n setap[Process|Product]T[1-11].csv\n\n For example, the file setapProcessT5.csv contains the data for all\n teams for time interval 5, paired with the outcome data for the\n Process component of the team's evaluation.\n\n Detailed information about the exact format of the .csv file may be\n found in the csv files themselves.\n\n\n# DATA DESCRIPTION\n ====================================================================\n The following is a detailed description of the data contained in the\n accompanying files.\n\n### INTRODUCTION\n ------------\n\n The data contained in these files were collected over a period of\n several semesters from students engaged in software engineering\n classes at San Francisco State University (class sections of CSC\n 640, CSC 648 and CSC 848). All students consented to this data\n being shared for research purposes provided no uniquely identifiable\n information was contained in the distributed files. The information\n was collected through various means, with emphasis being placed on\n the collection of objective, quantifiable information. For more\n information on the data collection procedures, please see the paper\n referenced above.\n\n\n### PRIVACY\n -------\n The data contained in this file does not contain any information\n which may be individually traced to a particular student who\n participated in the study.\n\n# BRIEF DESCRIPTION OF DATA SOURCES AND DERIVATIONS\n -------------------------------------------------\n SAMs (Student Activity Measure) are collected for each student team\n member during their participation in a software engineering class.\n Student teams work together on a final class project, and comprise\n 5-6 students. Teams that are made up of students from only one\n school are labeled local teams. Teams made up of students from more\n than one school are labeled global teams. SAMs are collected from:\n weekly timecards, instructor observations, and software engineering\n tool usage logs. SAMs are then aggregated by team and time interval\n (see next section) into TAMs (Team Activity Measure). Outcomes are\n determined at the end of the semester through evaluation of student\n team work in two categories: software engineering process (how well\n the team applied best software engineering practices), and software\n engineering product (the quality of the finished product the team\n produced). Thus for each team, two outcomes are determined, process\n and product, respectively. Outcomes are classified into two class\n grades, A or F. A represents teams that are at or above\n expectations, F represents teams that are below expectations or need\n attention. For more information, please see the paper referenced\n above.\n\n The SE process and SE product outcomes represent ML training classes# and are to be considered separately, e.g. one should train ML for SE\n process separately from training for SE product.\n\n", "_____no_output_____" ] ], [ [ "path ='data/SETAP PRODUCT DATA' \nallFiles = glob.glob(path + \"/*.csv\")\nframe = pandas.DataFrame()\nlist_ = []\nfor file_ in allFiles:\n df = pandas.read_csv(file_,index_col=None, header=0)\n list_.append(df)\nframe = pandas.concat(list_)\n\ndata = pandas.DataFrame.from_csv(\"data/SETAP PRODUCT DATA/setapProductT1.csv\")\n# full_data=True will let explore the whole dataset (T1-T11)\nfull_data = True\nif (full_data):\n data = frame", "_____no_output_____" ], [ "labels = data['productLetterGrade']\nfeatures = data.drop('productLetterGrade', axis=1)\n#Drop certain features\nif (full_data):\n features = features.drop([col for col in features.columns if 'Total' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Count' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Student' in col], axis=1)\n #features = features.drop('femaleTeamMembersPercent', axis=1)", "_____no_output_____" ], [ "# Rename strings in data to appropriate integers, labels to booleans\nmapping = {'F': False, 'A': True}\nfeatures_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1}\nfeatures = pandas.DataFrame(features)\nlabels = pandas.DataFrame(labels)\nlabels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s)\n#features.dropna(axis='columns', how='any', inplace=True)\nfeatures.fillna(1, inplace=True)\nfeatures = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s)\nX_train, X_test, y_train, y_test = train_test_split(features, labels, random_state=1, train_size=0.4)", "/Users/nickstepanov/miniconda3/envs/RoboND/lib/python3.5/site-packages/sklearn/model_selection/_split.py:2026: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.\n FutureWarning)\n" ], [ "rfc = RandomForestClassifier(n_estimators= 1000, max_features=0.25, max_depth=50, oob_score=True, n_jobs=-1)", "_____no_output_____" ], [ "rfc.fit(X_train, y_train.values.ravel())\nprint ('Accuracy score: ' + str(round(rfc.score(X_test, y_test.values.ravel()),3)*100) + '%')", "Accuracy score: 84.89999999999999%\n" ], [ "import matplotlib.pyplot as plt\nn_features = len(features.columns)\nplt.figure(figsize=(5,n_features/5))\nplt.barh(range(n_features), rfc.feature_importances_, align='center')\nplt.yticks(np.arange(n_features), features.columns)\nplt.xlabel('Full Feature Importance of ' + str(n_features) + ' features')\nplt.ylabel('Feature')\nplt.show()", "_____no_output_____" ], [ "features.columns[np.argmax(rfc.feature_importances_)] \nprint ( \"Top important features:\")\ncount = 1\nfor string in features.columns[rfc.feature_importances_.argsort()[-6:][::-1]] : \n print(str(count) + '. ' + string )\n count+=1", "Top important features:\n1. averageResponsesByWeek\n2. femaleTeamMembersPercent\n3. standardDeviationResponsesByWeek\n4. averageInPersonMeetingHoursAverageByWeek\n5. leadAdminHoursAverage\n6. inPersonMeetingHoursAverage\n" ], [ "print(\"Full dataset test set accuracy\")\npandas.crosstab(y_test['productLetterGrade'], rfc.predict(X_test), rownames=['Actual'], colnames=['Predicted'])\n#pandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted'])", "Full dataset test set accuracy\n" ], [ "#Drop certain features\nif (full_data): \n data = pandas.DataFrame.from_csv(\"data/SETAP PRODUCT DATA/setapProductT1.csv\")\n labels = data['productLetterGrade']\n features = data.drop('productLetterGrade', axis=1)\n features = features.drop([col for col in features.columns if 'Total' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Count' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Student' in col], axis=1)\n #features = features.drop('femaleTeamMembersPercent', axis=1)\n # Rename strings in data to appropriate integers, labels to booleans\n mapping = {'F': False, 'A': True}\n features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1}\n features = pandas.DataFrame(features)\n labels = pandas.DataFrame(labels)\n labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s)\n #features.dropna(axis='columns', how='any', inplace=True)\n features.fillna(1, inplace=True)\n features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s)\n print ('T1 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%')\npandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted'])", "T1 Accuracy score: 77.0%\n" ], [ "data = pandas.DataFrame.from_csv(\"data/SETAP PRODUCT DATA/setapProductT2.csv\")\nlabels = data['productLetterGrade']\nfeatures = data.drop('productLetterGrade', axis=1)\n#Drop certain features\nif (full_data):\n features = features.drop([col for col in features.columns if 'Total' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Count' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Student' in col], axis=1)\n #features = features.drop('femaleTeamMembersPercent', axis=1)\n # Rename strings in data to appropriate integers, labels to booleans\n mapping = {'F': False, 'A': True}\n features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1}\n features = pandas.DataFrame(features)\n labels = pandas.DataFrame(labels)\n labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s)\n #features.dropna(axis='columns', how='any', inplace=True)\n features.fillna(1, inplace=True)\n features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s)\n print ('T2 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%')\npandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted'])", "T2 Accuracy score: 93.2%\n" ], [ "data = pandas.DataFrame.from_csv(\"data/SETAP PRODUCT DATA/setapProductT3.csv\")\nlabels = data['productLetterGrade']\nfeatures = data.drop('productLetterGrade', axis=1)\n#Drop certain features\nif (full_data):\n features = features.drop([col for col in features.columns if 'Total' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Count' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Student' in col], axis=1)\n #features = features.drop('femaleTeamMembersPercent', axis=1)\n # Rename strings in data to appropriate integers, labels to booleans\n mapping = {'F': False, 'A': True}\n features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1}\n features = pandas.DataFrame(features)\n labels = pandas.DataFrame(labels)\n labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s)\n #features.dropna(axis='columns', how='any', inplace=True)\n features.fillna(1, inplace=True)\n features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s)\n print ('T3 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%')\npandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted'])", "T3 Accuracy score: 89.2%\n" ], [ "data = pandas.DataFrame.from_csv(\"data/SETAP PRODUCT DATA/setapProductT6.csv\")\nlabels = data['productLetterGrade']\nfeatures = data.drop('productLetterGrade', axis=1)\n#Drop certain features\nif (full_data):\n features = features.drop([col for col in features.columns if 'Total' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Count' in col], axis=1)\n features = features.drop([col for col in features.columns if 'Student' in col], axis=1)\n #features = features.drop('femaleTeamMembersPercent', axis=1)\n # Rename strings in data to appropriate integers, labels to booleans\n mapping = {'F': False, 'A': True}\n features_mapping = {'M': 0, 'F' : 1, 'Global': 0, 'Local': 1}\n features = pandas.DataFrame(features)\n labels = pandas.DataFrame(labels)\n labels = labels.applymap(lambda s: mapping.get(s) if s in mapping else s)\n #features.dropna(axis='columns', how='any', inplace=True)\n features.fillna(1, inplace=True)\n features = features.applymap(lambda s: features_mapping.get(s) if s in features_mapping else s)\n print ('T3 Accuracy score: ' + str(round(rfc.score(features, labels.values.ravel()),3)*100) + '%')\npandas.crosstab(labels['productLetterGrade'], rfc.predict(features), rownames=['Actual'], colnames=['Predicted'])", "T3 Accuracy score: 93.2%\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af43d345cbc7845b35b12d263703729742f9a10
4,351
ipynb
Jupyter Notebook
lectures/1_TeoriaProbabilidades/parte3.ipynb
magister-informatica-uach/INFO337
45d7faabbd4ed5b25a575ee065551b87b097f92e
[ "Unlicense" ]
4
2021-06-12T04:07:26.000Z
2022-03-27T23:22:59.000Z
lectures/1_TeoriaProbabilidades/parte3.ipynb
magister-informatica-uach/INFO337
45d7faabbd4ed5b25a575ee065551b87b097f92e
[ "Unlicense" ]
null
null
null
lectures/1_TeoriaProbabilidades/parte3.ipynb
magister-informatica-uach/INFO337
45d7faabbd4ed5b25a575ee065551b87b097f92e
[ "Unlicense" ]
1
2019-11-07T14:49:09.000Z
2019-11-07T14:49:09.000Z
29.201342
140
0.450241
[ [ [ "# Estadísticos principales\n\n- Esperanzas, varianza y ley débil de los grandes números\n- Variables aleatorias especiales\n\n\n## Esperanza\nLa esperanza o valor esperado de una v.a. $X$ se denota $E[X]$ y se calcula como:\n\n$\\begin{array}{ll} \nE[X] = \n\\left\\{\\begin{array}{ll} \\sum_i x_i P(X=x_i) & si\\,X\\, discreta\\\\\n \\int x f_X(x)dx & si\\,X\\, continua\\\\\n\\end{array} \\right .\\\\\n\\end{array}$\n\n\nConsideremos $g$ una función a valores reales, entonces:\n\n$\\begin{array}{lll}\nE[g(X)] & = & \n\\left\\{\\begin{array}{ll} \\sum_i g(x_i) P(X=x_i) & si\\,X\\, discreta\\\\\n \\int g(x) f_X(x)dx & si\\,X\\, continua\\\\\n\\end{array}\\right .\\\\\n\\end{array}$\n\nPara el caso especial de $g(x) = x^n$ se define el n-ésimo momento de X como:\n\n$\\begin{array}{lll} \nE[X^n] & = & \n\\left\\{\\begin{array}{ll} \\sum_i x_i^n P(X=x_i) & si\\,X\\, discreta\\\\\n \\int x^n f_X(x)dx & si\\,X\\, continua\\\\\n\\end{array}\\right .\\\\\n\\end{array}$\n\nLa esperanza es el primer momento y se denota $\\mu$.\n\n**Propiedades**\n\nSean $a,b \\in \\cal{R}$ entonces:\n\n$\\begin{array}{lll} \nE[aX+b] & = & aE[X] + b \\\\\nE[X + Y] & = & E[X] + E[Y]\\\\\n\\end{array}$\n\n", "_____no_output_____" ], [ "## Varianza y covarianza\nLa varianza mide la variación de la v.a. entorno a la esperanza o media $\\mu$, y se define como\n\n$\\begin{equation}\n\\begin{array}{ll} \nVar(X) = E[(X-\\mu)^2] = E[X^2] - \\mu^2\n\\end{array}\n\\end{equation}$\n\nSe cumple que:\n\n$\\begin{equation}\n\\begin{array}{ll} \nVar(aX+b) = a^2 Var(X)\n\\end{array}\n\\end{equation}$\n\nSe define además la desviación estándar $\\sigma = \\sqrt{Var(X)}$ \n\n\nLa covarianza mide la relación (lineal) que hay entre dos v.a. $X$ e $Y$. Si denotamos $\\mu_X = E[X]$ y $ \\mu_Y= E[Y]$ entonces:\n\n$\\begin{equation}\n\\begin{array}{lll}\nCov(X,Y) & = & E[(X-\\mu_X)(Y-\\mu_y)]\n\\end{array}\n\\end{equation}$\n\nLa correlación es una medida normalizada:\n\n$\\begin{equation}\n\\begin{array}{lll}\nCorr(X,Y) & = & \\frac{Cov(X,Y)}{Var(X) Var(Y)}\n\\end{array}\n\\end{equation}$\n\n\n\n**Propiedades**\n\n$\\begin{array}{lll} \nCov(X,Y) & = & Cov(Y,X) \\\\\nCov(X,X) & = & Var(X)\\\\\nCov(X+Z,Y) & = & Cov(X,Y) + Cov(Z,Y)\\\\\nCov(\\sum_i \\limits X_i,Y) & = & \\sum_i \\limits Cov(X_i,Y)\\\\\nVar(X+Y) & = & Var(X) + Var(Y) + 2Cov(X,Y)\\\\\nVar(\\sum_i \\limits X_i) & = & \n\\sum_i \\limits Var(X_i) + \\sum_i \\limits \\sum_{j\\neq i} \\limits Cov(X_i,X_j)\n\\end{array}$", "_____no_output_____" ], [ "## Otros estadísticos\n$\\begin{array}{lll}\n\\text{ Asimetría (skewness) } & = & \\frac{E[(X-\\mu)^3]}{\\sigma^3} = \\frac{E[X^3]-3\\mu\\sigma^2 - \\mu^3}{\\sigma^3}\\\\\n&&\\\\\n\\text{ Curtosis }& = &\\frac{E[(X-\\mu)^4]}{\\sigma^4} = \\frac{E[X^4] - 4\\mu E[X^3] + 6\\mu^2\\sigma^2 + 3\\mu^4}{\\sigma^4}\\\\\n\\end{array}$", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
4af453376b60b5b9696350efe5d2bc92d6240234
123,766
ipynb
Jupyter Notebook
Case Study 2/Local Outlier Factor (LOF)/Local Outlier Factor.ipynb
thirasit/Practical-ML-in-Petrol-with-Python
42188ea57c583e81ee9e55a307e5e4a20d49773f
[ "MIT" ]
null
null
null
Case Study 2/Local Outlier Factor (LOF)/Local Outlier Factor.ipynb
thirasit/Practical-ML-in-Petrol-with-Python
42188ea57c583e81ee9e55a307e5e4a20d49773f
[ "MIT" ]
null
null
null
Case Study 2/Local Outlier Factor (LOF)/Local Outlier Factor.ipynb
thirasit/Practical-ML-in-Petrol-with-Python
42188ea57c583e81ee9e55a307e5e4a20d49773f
[ "MIT" ]
null
null
null
123,766
123,766
0.760346
[ [ [ "# **Local Outlier Factor (LOF)**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "from google.colab import files\nuploaded = files.upload()", "_____no_output_____" ], [ "df=pd.read_csv('PE_Income_Spending_DataSet.csv')", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df=pd.get_dummies(df,drop_first=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "sns.boxplot(df['Petroleum_Engineer_Age'], color='orange')", "/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.boxplot(df['Petroleum_Engineer_Income (K$)'], color='orange')", "/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.boxplot(df['Spending_Habits (From 1 to 100)'], color='orange')", "/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.boxplot(df['Petroleum_Engineer_Gender_Male'], color='orange')", "/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "df.columns", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\ndf_scaled=scaler.fit(df)\ndf_scaled=scaler.transform(df)\n# df_scaled", "_____no_output_____" ], [ "df_scaled=pd.DataFrame(df_scaled, columns=['Petroleum_Engineer_Age', 'Petroleum_Engineer_Income (K$)',\n 'Spending_Habits (From 1 to 100)', 'Petroleum_Engineer_Gender_Male'])\ndf_scaled", "_____no_output_____" ], [ "from sklearn.neighbors import LocalOutlierFactor\nclf = LocalOutlierFactor(n_neighbors=40, contamination=.1, metric='euclidean')\nclf.fit(df_scaled)", "_____no_output_____" ], [ "df_scaled.iloc[:,:4]", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df_scaled['Scores']=clf.negative_outlier_factor_\nplt.figure(figsize=(12,8))\nplt.hist(df_scaled['Scores'])\nplt.title('Histogram of Negative Outlier Factor')", "_____no_output_____" ], [ "df_scaled", "_____no_output_____" ], [ "df_scaled['Anomaly']=clf.fit_predict(df_scaled.iloc[:,:4])\nAnomaly=df_scaled.loc[df_scaled['Anomaly']==-1]\nAnomaly_index=list(Anomaly.index)\nAnomaly", "_____no_output_____" ], [ "plt.figure(figsize=(12,8))\ngroups = df_scaled.groupby(\"Anomaly\")\nfor name, group in groups:\n plt.plot(group['Petroleum_Engineer_Income (K$)'], group['Spending_Habits (From 1 to 100)'], marker=\"o\", linestyle=\"\", label=name)\nplt.xlabel('Petroleum Engineer Income')\nplt.ylabel('Petroleum Engineer Spending Habits')\nplt.title('Local Outlier Factor Anomalies')", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af45b7e06e86580ae808d56cce91c0d1d2fac36
285,363
ipynb
Jupyter Notebook
11_visualize_embedding.ipynb
ast0414/semit
c221222ba06f14611e3d030969cdb9f7c17ff98f
[ "MIT" ]
null
null
null
11_visualize_embedding.ipynb
ast0414/semit
c221222ba06f14611e3d030969cdb9f7c17ff98f
[ "MIT" ]
1
2022-02-26T07:21:04.000Z
2022-02-26T07:21:04.000Z
11_visualize_embedding.ipynb
ast0414/semit
c221222ba06f14611e3d030969cdb9f7c17ff98f
[ "MIT" ]
null
null
null
1,327.269767
279,724
0.957707
[ [ [ "import os\nimport pickle\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "plt.style.use(\"dark_background\")", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ], [ "DATA_PATH = \"/nethome/san37/Workspace/semit/data\"\nUMAP_PATH = \"/localscratch/san37/semit/SUIT/UMAP\"", "_____no_output_____" ] ], [ [ "# Loading Data", "_____no_output_____" ] ], [ [ "with open(os.path.join(DATA_PATH, 'mnist_full.pkl'), 'rb') as f:\n mnist_full = pickle.load(f)\nmnist_x_train = mnist_full[\"x_train\"] \nmnist_y_train = mnist_full[\"y_train\"]", "_____no_output_____" ], [ "with open(os.path.join(DATA_PATH, 'kannada_semi_1pct.pkl'), 'rb') as f:\n kannada_semi = pickle.load(f)\nkannada_x_train_labeled = kannada_semi[\"x_train_labeled\"]\nkannada_y_train_labeled = kannada_semi[\"y_train_labeled\"]\nkannada_x_train_unlabeled = kannada_semi[\"x_train_unlabeled\"]\nkannada_y_train_unlabeled = kannada_semi[\"y_train_unlabeled\"]\nkannada_x_train = np.concatenate((kannada_x_train_labeled, kannada_x_train_unlabeled), axis=0)\nkannada_y_train = np.concatenate((kannada_y_train_labeled, kannada_y_train_unlabeled), axis=0)", "_____no_output_____" ] ], [ [ "# Loading pre-generated UMAP/tSNE embedding", "_____no_output_____" ] ], [ [ "mnist_umap = np.load(os.path.join(UMAP_PATH, 'mnist_tsne.npy'))\nkannada_umap = np.load(os.path.join(UMAP_PATH, 'kannada_tsne.npy'))", "_____no_output_____" ] ], [ [ "# Visualize Embedding", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(12, 12))\ncm = plt.cm.get_cmap('tab20')\nfor i in range(10):\n indexing = (mnist_y_train == i)\n vis_points = mnist_umap[indexing]\n ax.scatter(vis_points[:, 0], vis_points[:, 1], s=5, alpha=0.3, c=[cm.colors[i]], label='M{}'.format(i))\n \n mean_loc = np.mean(vis_points, axis=0)\n mean_class_img = np.squeeze(np.mean(mnist_x_train[indexing], axis=0))\n \n offset_img = OffsetImage(mean_class_img, zoom=1, cmap='gray')\n ab = AnnotationBbox(offset_img, mean_loc, xycoords='data', frameon=True, pad=0.0)\n ax.add_artist(ab)\n\nfor i in range(10):\n indexing = (kannada_y_train == i)\n vis_points = kannada_umap[indexing]\n ax.scatter(vis_points[:, 0], vis_points[:, 1], s=5, alpha=0.3, c=[cm.colors[10 + i]], label='K{}'.format(i))\n \n mean_class_img = np.squeeze(np.mean(kannada_x_train[indexing], axis=0))\n mean_loc = np.mean(vis_points, axis=0)\n \n offset_img = OffsetImage(mean_class_img, zoom=1, cmap='gray')\n ab = AnnotationBbox(offset_img, mean_loc, xycoords='data', frameon=True, pad=0.0)\n ax.add_artist(ab)\n\nlgnd = ax.legend(loc=\"upper left\", ncol=2, scatterpoints=1, fontsize=12, title=\"MNIST (M) / Kannada (K)\")\nfor handle in lgnd.legendHandles:\n #handle.set_title('ABD')\n handle.set_sizes([40])\n handle.set_alpha(1)\n\nax.set_xlim(left=-47)\nax.axis('off')", "_____no_output_____" ], [ "plt.tight_layout()\nfig.savefig(os.path.join(UMAP_PATH, \"tsne_plot.png\"), format='png', bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4af46aa4890979049922cf02cf7fd74c4d2c54be
71,475
ipynb
Jupyter Notebook
text-processing.ipynb
vikram-patil1289/text-pre-processing
f34c111ec8171bd14fe5d3836e6ed9520d1e02a8
[ "MIT" ]
null
null
null
text-processing.ipynb
vikram-patil1289/text-pre-processing
f34c111ec8171bd14fe5d3836e6ed9520d1e02a8
[ "MIT" ]
null
null
null
text-processing.ipynb
vikram-patil1289/text-pre-processing
f34c111ec8171bd14fe5d3836e6ed9520d1e02a8
[ "MIT" ]
1
2022-03-18T13:01:29.000Z
2022-03-18T13:01:29.000Z
67.877493
32,856
0.745407
[ [ [ "Date: 2/09/2018\n\nVersion: 1.0\n\nEnvironment: Python 3.6.1 and Jupyter notebook\n\nLibraries used: Main libraries used for assignment:\n* re (for regular expression, included in Anaconda Python 3.6) \n* sys (to display system version, included in Anaconda Python 3.6) \n* nltk (for text processing, included in Anaconda Python 3.6) \n* pathlib (to set the document directory in order to read files, included in Anaconda Python 3.6) \n* nltk.tokenize (for tokenize and mwetokenize process, included in Anaconda Python 3.6) \n* os (for changing file directory, included in Anaconda Python 3.6) \n* nltk.util (for bigrams, included in Anaconda Python 3.6) \n* nltk.probability (for calculating term frequency of tokens, included in Anaconda Python 3.6) \n* warnings (to ignore any warnings thrown whiel execution, included in Anaconda Python 3.6) \n* nltk.probability (for calculating term frequency of tokens, included in Anaconda Python 3.6) \n* nltk.stem (for stemming of tokens, included in Anaconda Python 3.6) \n* pandas(for creating dataframes, included in Anaconda Python 3.6) \n* matplotlib(for plotting dataframes, included in Anaconda Python 3.6) \n", "_____no_output_____" ], [ "## Introduction:\n\nThis analysis consists of parsing of 219/250 resumes, cleaning the resumes' files by removing stopwords, characters less than 3length, most frequent and less frequent words. \nOnce the tokens are generated, bigrams and unigrams are merged to form final vocab list.\nThen count of each tokens present in each resume is printed to files to form the vector matrix, which further helps for text processing.\n\n", "_____no_output_____" ], [ "## Import libraries ", "_____no_output_____" ] ], [ [ "# Importing libraries for assessment 1 - Task 2\n\nimport re\nimport sys\nimport nltk\nfrom pathlib import Path\nfrom nltk.tokenize import RegexpTokenizer\nimport os\nfrom nltk.util import ngrams\nfrom nltk.probability import *\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom nltk.tokenize import MWETokenizer\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import sent_tokenize\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n\n#Printing system version\nprint (sys.version_info)", "sys.version_info(major=3, minor=6, micro=1, releaselevel='final', serial=0)\n" ] ], [ [ "### 1) Identify the 250 resumes file numbers assigned to me. ", "_____no_output_____" ] ], [ [ "#Function used to clean the data while reading the files\n\ndef clean_data_fun(data):\n data=re.sub('[^\\s!-~]', '', data) #removes extra unwanted characters\n data = re.sub('[%s]' % re.escape(\"\\n\\|\\/\"), '', data) # removes punctuations\n data = re.sub('\\s+', ' ', data)# removes extra whitespace\n return data #returns cleaned data", "_____no_output_____" ], [ "#This step extracts the required 250 resumes' names assigned to me from the folder.\n\n#From the directory fetch the resumes numbers assigned to me\nos.chdir('C:/Users/vikra/Desktop/Python/wrangling/Assignment/A-1/Task2')\nwith open('student_dataset.txt','r') as input_file:\n resume_student_dataset=input_file.read()\ninput_file.close()\n\n# Once the file is opened, read the file inorder to find the resume's file numbers\n\nresume_student_dataset=clean_data_fun(resume_student_dataset) #clean the data set if any unwanted characters present\nresume_student_dataset=resume_student_dataset.split(\" \") #extract individual file string using str.split()\nresume_student_dataset=list(set(resume_student_dataset)) #fetch unique file numbers by applying set.\nprint(\"Result :Total resumes to be extracted-\",len(resume_student_dataset),\"out of 250 resumes\") #print result\nresume_student_dataset.sort() #The final list which contains the 219 file numbers to be extracted.\n\nprint(\"\\n\",resume_student_dataset)", "Result :Total resumes to be extracted- 219 out of 250 resumes\n\n ['1', '101', '102', '107', '108', '118', '119', '12', '120', '126', '128', '133', '135', '137', '14', '140', '142', '143', '145', '147', '150', '153', '156', '167', '169', '170', '173', '176', '177', '181', '186', '188', '19', '20', '207', '208', '227', '229', '231', '233', '243', '246', '252', '253', '257', '260', '262', '264', '271', '272', '275', '277', '278', '289', '290', '292', '297', '301', '303', '308', '314', '317', '32', '321', '324', '329', '335', '336', '340', '343', '344', '345', '347', '359', '365', '367', '371', '372', '381', '382', '385', '386', '39', '390', '395', '398', '405', '409', '410', '42', '421', '425', '429', '437', '439', '446', '448', '452', '459', '467', '472', '475', '477', '48', '492', '495', '496', '50', '500', '502', '503', '504', '508', '509', '51', '516', '517', '524', '525', '529', '535', '543', '545', '556', '563', '565', '569', '570', '572', '575', '579', '58', '580', '581', '583', '587', '590', '595', '596', '6', '603', '604', '609', '61', '610', '627', '638', '64', '640', '651', '654', '658', '659', '666', '677', '680', '682', '685', '686', '688', '689', '691', '695', '700', '703', '71', '712', '714', '718', '719', '722', '725', '729', '73', '730', '734', '738', '74', '745', '746', '748', '754', '758', '760', '766', '769', '772', '773', '774', '782', '784', '788', '791', '794', '797', '805', '806', '807', '809', '811', '818', '821', '824', '826', '827', '828', '830', '836', '837', '841', '842', '851', '852', '859', '861', '865', '88', '9', '98']\n" ], [ "#Next step: from previous step I got the numbers, now mapping these numbers with the resume file number inside directory\n\npath = \"C:/Users/vikra/Desktop/Python/wrangling/Assignment/A-1/Task2/resumeTxt\"\n\nFiles=list(sorted(os.listdir(path)))\nprint(\"Total files in the directory:\",len(Files))\n\nrequired_text_files=[] #To store only the required files. This will store 219 files' names out of 866\npattern =re.compile('(?<=resume_\\()[0-9]+(?=\\).txt)') #using regex, matching the files\nfor each in Files:\n for j in (pattern.findall(each)):\n if j in resume_student_dataset:\n required_text_files.append(each)\n \nprint(\"\\nThe required file names are:\",(required_text_files))\n", "Total files in the directory: 866\n\nThe required file names are: ['resume_(1).txt', 'resume_(101).txt', 'resume_(102).txt', 'resume_(107).txt', 'resume_(108).txt', 'resume_(118).txt', 'resume_(119).txt', 'resume_(12).txt', 'resume_(120).txt', 'resume_(126).txt', 'resume_(128).txt', 'resume_(133).txt', 'resume_(135).txt', 'resume_(137).txt', 'resume_(14).txt', 'resume_(140).txt', 'resume_(142).txt', 'resume_(143).txt', 'resume_(145).txt', 'resume_(147).txt', 'resume_(150).txt', 'resume_(153).txt', 'resume_(156).txt', 'resume_(167).txt', 'resume_(169).txt', 'resume_(170).txt', 'resume_(173).txt', 'resume_(176).txt', 'resume_(177).txt', 'resume_(181).txt', 'resume_(186).txt', 'resume_(188).txt', 'resume_(19).txt', 'resume_(20).txt', 'resume_(207).txt', 'resume_(208).txt', 'resume_(227).txt', 'resume_(229).txt', 'resume_(231).txt', 'resume_(233).txt', 'resume_(243).txt', 'resume_(246).txt', 'resume_(252).txt', 'resume_(253).txt', 'resume_(257).txt', 'resume_(260).txt', 'resume_(262).txt', 'resume_(264).txt', 'resume_(271).txt', 'resume_(272).txt', 'resume_(275).txt', 'resume_(277).txt', 'resume_(278).txt', 'resume_(289).txt', 'resume_(290).txt', 'resume_(292).txt', 'resume_(297).txt', 'resume_(301).txt', 'resume_(303).txt', 'resume_(308).txt', 'resume_(314).txt', 'resume_(317).txt', 'resume_(32).txt', 'resume_(321).txt', 'resume_(324).txt', 'resume_(329).txt', 'resume_(335).txt', 'resume_(336).txt', 'resume_(340).txt', 'resume_(343).txt', 'resume_(344).txt', 'resume_(345).txt', 'resume_(347).txt', 'resume_(359).txt', 'resume_(365).txt', 'resume_(367).txt', 'resume_(371).txt', 'resume_(372).txt', 'resume_(381).txt', 'resume_(382).txt', 'resume_(385).txt', 'resume_(386).txt', 'resume_(39).txt', 'resume_(390).txt', 'resume_(395).txt', 'resume_(398).txt', 'resume_(405).txt', 'resume_(409).txt', 'resume_(410).txt', 'resume_(42).txt', 'resume_(421).txt', 'resume_(425).txt', 'resume_(429).txt', 'resume_(437).txt', 'resume_(439).txt', 'resume_(446).txt', 'resume_(448).txt', 'resume_(452).txt', 'resume_(459).txt', 'resume_(467).txt', 'resume_(472).txt', 'resume_(475).txt', 'resume_(477).txt', 'resume_(48).txt', 'resume_(492).txt', 'resume_(495).txt', 'resume_(496).txt', 'resume_(50).txt', 'resume_(500).txt', 'resume_(502).txt', 'resume_(503).txt', 'resume_(504).txt', 'resume_(508).txt', 'resume_(509).txt', 'resume_(51).txt', 'resume_(516).txt', 'resume_(517).txt', 'resume_(524).txt', 'resume_(525).txt', 'resume_(529).txt', 'resume_(535).txt', 'resume_(543).txt', 'resume_(545).txt', 'resume_(556).txt', 'resume_(563).txt', 'resume_(565).txt', 'resume_(569).txt', 'resume_(570).txt', 'resume_(572).txt', 'resume_(575).txt', 'resume_(579).txt', 'resume_(58).txt', 'resume_(580).txt', 'resume_(581).txt', 'resume_(583).txt', 'resume_(587).txt', 'resume_(590).txt', 'resume_(595).txt', 'resume_(596).txt', 'resume_(6).txt', 'resume_(603).txt', 'resume_(604).txt', 'resume_(609).txt', 'resume_(61).txt', 'resume_(610).txt', 'resume_(627).txt', 'resume_(638).txt', 'resume_(64).txt', 'resume_(640).txt', 'resume_(651).txt', 'resume_(654).txt', 'resume_(658).txt', 'resume_(659).txt', 'resume_(666).txt', 'resume_(677).txt', 'resume_(680).txt', 'resume_(682).txt', 'resume_(685).txt', 'resume_(686).txt', 'resume_(688).txt', 'resume_(689).txt', 'resume_(691).txt', 'resume_(695).txt', 'resume_(700).txt', 'resume_(703).txt', 'resume_(71).txt', 'resume_(712).txt', 'resume_(714).txt', 'resume_(718).txt', 'resume_(719).txt', 'resume_(722).txt', 'resume_(725).txt', 'resume_(729).txt', 'resume_(73).txt', 'resume_(730).txt', 'resume_(734).txt', 'resume_(738).txt', 'resume_(74).txt', 'resume_(745).txt', 'resume_(746).txt', 'resume_(748).txt', 'resume_(754).txt', 'resume_(758).txt', 'resume_(760).txt', 'resume_(766).txt', 'resume_(769).txt', 'resume_(772).txt', 'resume_(773).txt', 'resume_(774).txt', 'resume_(782).txt', 'resume_(784).txt', 'resume_(788).txt', 'resume_(791).txt', 'resume_(794).txt', 'resume_(797).txt', 'resume_(805).txt', 'resume_(806).txt', 'resume_(807).txt', 'resume_(809).txt', 'resume_(811).txt', 'resume_(818).txt', 'resume_(821).txt', 'resume_(824).txt', 'resume_(826).txt', 'resume_(827).txt', 'resume_(828).txt', 'resume_(830).txt', 'resume_(836).txt', 'resume_(837).txt', 'resume_(841).txt', 'resume_(842).txt', 'resume_(851).txt', 'resume_(852).txt', 'resume_(859).txt', 'resume_(861).txt', 'resume_(865).txt', 'resume_(88).txt', 'resume_(9).txt', 'resume_(98).txt']\n" ] ], [ [ "--------------\n### 2) Read 219 resumes' data from the directory. Additionally, sentence segmentation and case normalization is done.", "_____no_output_____" ] ], [ [ "# Case normalization function, this function will lower case the first word of every sentence. \n\ndef normalize_fun(data):\n new_list=''\n new_list=''.join(re.sub(r'(^\\s?\\w+)',lambda m: m.group().lower(),data)) # lower case the first word of sentence\n return new_list\n", "_____no_output_____" ], [ "# Read all 219 files and store in a dictionary format.\n\nos.chdir('C:/Users/vikra/Desktop/Python/wrangling/Assignment/A-1/Task2/resumeTxt')\n\n#This method opens and reads the file\ndef read_file_contents(file_name):\n with open(file_name,'r',encoding='UTF-8') as f:\n data = f.read()\n return data\n f.close()\n\n# This step performs sentence segmentation and case normalization\ndef perform_seg_norm(data):\n new_str=''\n sent_tokenize_list = sent_tokenize(data)\n sent_tokenize_list=[normalize_fun(each) for each in sent_tokenize_list]\n new_str = ''.join(str(e) for e in sent_tokenize_list)\n return new_str\n\n\n#Read all 219 texts, clean and store them\nall_text=[read_file_contents(name) for name in required_text_files] #Read all 219 data into one list\nall_text=[clean_data_fun(each) for each in all_text] #remove unwanted characters for each file\n\n# Applying sentence segmentation + case normalization, the result is final 219 data stored in a list\nfinal_data=[]\nfinal_data+=[perform_seg_norm(each) for each in all_text]\n \nfinal_all_text=''.join(each for each in final_data) #Convert whole 219 data into one string. This is required for further steps\n\n\n#Storing all resumes and respective data into a dictionary format\nresume_dict = dict(zip(resume_student_dataset, final_data))\n#resume_dict\n\nprint(\"'final_data' contains normalised 219 resumes' data in a list format\")\nprint(\"'final_all_text' is a string, which contains all 219 data in one file\")", "'final_data' contains normalised 219 resumes' data in a list format\n'final_all_text' is a string, which contains all 219 data in one file\n" ] ], [ [ "-----------\n### 3) Perform tokenisation. \n", "_____no_output_____" ] ], [ [ "#Below function tokenises based on the given regular expression.\n\ndef f_tokenise(data):\n tokenizer = RegexpTokenizer(r\"\\w+(?:[-']\\w+)?\") # Regular expression used for tokenization\n tokens = tokenizer.tokenize(data)\n return tokens\n\ntokenised_data=f_tokenise(final_all_text)\nprint(\"Total tokens present after reading all 219 resumes:\",len(tokenised_data))\n", "Total tokens present after reading all 219 resumes: 137702\n" ] ], [ [ "------------\n### 4) Removal of stop words from tokens", "_____no_output_____" ] ], [ [ "#Fetching stop_words from the directory\n\nos.chdir('C://Users//vikra//Desktop//Python//wrangling//Assignment//A-1//Task2')\n\nwith open('stopwords_en.txt','r') as input_file:\n stop_words=input_file.read()\ninput_file.close()\n\nstop_words=stop_words.split()\nstop_words=set(stop_words) #this list contains the stop words given for the assessment.\n\n#Filter stop words from tokens\n\nstopped_stopwords=[w for w in tokenised_data if w not in stop_words]\nprint(\"Tokens filtered from stop words:\",len(stopped_stopwords))\n", "Tokens filtered from stop words: 105116\n" ] ], [ [ "-----------------\n### 5) Filtering tokens with less than 3 characters.", "_____no_output_____" ] ], [ [ "#Removes data with less than 3character\n\n#A function which identifies and removes tokens with less than 3 characters\ndef filter_word(data):\n filter_list=[]\n for each in data:\n if len(each)>3:\n filter_list.append(each)\n return filter_list\n\n#A function which identifies 3char\ndef find_3charword(data):\n filter_list=[]\n for each in data:\n if len(each)<3:\n filter_list.append(each)\n return filter_list\n\nfiltered_tokens=filter_word(stopped_stopwords) # calling the function to remove less than 3 characters \ntokens_3char=find_3charword(stopped_stopwords)\n\nprint(\"Tokens less than 3 character removed and new filtered tokens=\",len(filtered_tokens))\n", "Tokens less than 3 character removed and new filtered tokens= 91722\n" ] ], [ [ "-----------------------------\n### 6) Filtering context-dependent (with the threshold set to %98) and rare tokens (< than 2%)", "_____no_output_____" ], [ "#### This section finds the 98% and 2% tokens and removes them. Process is divided into 3 steps.\n---------------------------------------------------------\n#### 6-1: Function(count_tokens_sen) accepts the tokens and all the resume files data and returns the count of each tokens presence against each file.", "_____no_output_____" ] ], [ [ "def count_tokens_sen(word_set,phrase_set):\n word_set=list(set(word_set))\n matches=[]\n for sen in phrase_set:\n words=sen.split()\n words=list(set(words))\n matches+=[x for x in word_set if x in words]\n \n counts={}\n for each in matches:\n if each in counts:counts[each] += 1\n else:counts[each] = 1\n return counts\n\n\ntokens_count=count_tokens_sen(filtered_tokens,final_data) # stores the count of presence of each token in all files\n# filtered_tokens contains the previous filtered tokens\n# final_data here is the list which contains 219 resumes' data", "_____no_output_____" ] ], [ [ "#### Output of 6-1: The count of each token against presenece of 219 is calculated\n---------------------------------------------------------------------------------\n#### 6-2: This section finds the 98% and 2% tokens", "_____no_output_____" ] ], [ [ "# Defining a function which calculates the percentage of presenece of each token\ndef filter_contextdep(data):\n tokens_to_be_filtered=[]\n for key,value in data.items():\n value=round((value/219)*100,2)\n if value >= 98 or value <= 2:\n #print(key,value,\"%\")\n tokens_to_be_filtered.append(key)\n return tokens_to_be_filtered\n\ntokens_to_be_filtered_contextdep=filter_contextdep(tokens_count)", "_____no_output_____" ] ], [ [ "#### Output of 6-2: calculates the tokens which are context dependent\n--------------------------------------------------------------------\n#### 6-3: Filter tokens which are context dependent", "_____no_output_____" ] ], [ [ "filt_tokens=[w for w in filtered_tokens if w not in tokens_to_be_filtered_contextdep]\n\n#-------------------------------------------------------------------------------\nprint(\"Output of step-3: tokens greater than 98% and less than 2% are removed.\")\n#-------------------------------------------------------------------------------\n\n\nprint(\"\\nTotal words to be removed which are occuring more than 98% or less than 2%=\",len(tokens_to_be_filtered_contextdep))\n\nprint(\"\\nFiltered tokens after removing context dependent=\",len(filt_tokens))\n", "Output of step-3: tokens greater than 98% and less than 2% are removed.\n\nTotal words to be removed which are occuring more than 98% or less than 2%= 10321\n\nFiltered tokens after removing context dependent= 71962\n" ] ], [ [ "--------------------------------------\n### 7) Stemming process. Since stemmer works only for lower-case tokens, hence process is to filter the lowercase and apply stemming", "_____no_output_____" ], [ "#### 7-1: Finding all Uppercase and lowercase tokens", "_____no_output_____" ] ], [ [ "uppercase_pattern=re.compile(r'^[A-Z].*\\b') # A Regex pattern to identify uppercase tokens\nlowercase_pattern=re.compile(r'^[a-z].*\\b') # A Regex pattern to identify lowercase tokens\n\nuppercase_list=[]\nlowercase_list=[]\n\nfor each in filt_tokens:\n uppercase_list+=uppercase_pattern.findall(each)\n\nfor each in filt_tokens:\n lowercase_list+=lowercase_pattern.findall(each)\n \nprint(\"Total upper case tokens:\",len(uppercase_list))\nprint(\"Total lower case tokens:\",len(lowercase_list))", "Total upper case tokens: 27742\nTotal lower case tokens: 41260\n" ] ], [ [ "#### 7-2: Stemming using the Porter stemmer for lower case tokens", "_____no_output_____" ] ], [ [ "stemmer = PorterStemmer()\n#print(['{0} -> {1}'.format(w, stemmer.stem(w)) for w in lowercase_list])\n\n\nlowercase_tokens = [stemmer.stem(word) for word in lowercase_list]\nprint(\"Stemmed lower-case tokens:\",len(lowercase_tokens))", "Stemmed lower-case tokens: 41260\n" ] ], [ [ "#### 7-3: Combine both uppercase and stemmed lowercase to form the unigram", "_____no_output_____" ] ], [ [ "#combining lowercase and uppercase\n\nfinal_unigram_tokens=list(lowercase_tokens+uppercase_list)\nprint(\"Total unigrams:\",len(final_unigram_tokens))\n#final_unigram_tokens", "Total unigrams: 69002\n" ] ], [ [ "----------------------------------\n### 8) Finding Bigrams\n\n#### From previous step(7-3), top 200 bigrams are calculated from final uni-grams", "_____no_output_____" ] ], [ [ "bigram_measures = nltk.collocations.BigramAssocMeasures()\nbigram_finder = nltk.collocations.BigramCollocationFinder.from_words(final_unigram_tokens)\nbigram_finder.apply_freq_filter(1)\nbigram_finder.apply_word_filter(lambda w: len(w) < 3)# or w.lower() in ignored_words)\ntop_200_bigrams = bigram_finder.nbest(bigram_measures.pmi, 200) # Top-200 bigrams\n#top_200_bigrams", "_____no_output_____" ] ], [ [ "------------------------\n### 9) Re-tokenization using MWETokenizer.\n\n#### 9-1) Combine unigrams and bi-grams and make one vocab", "_____no_output_____" ] ], [ [ "mwe_tokenizer = MWETokenizer(top_200_bigrams)\nmwe_tokens = mwe_tokenizer.tokenize(final_unigram_tokens)\nmwe_tokens.sort()\nmwe_tokens=set(mwe_tokens)\nlen(mwe_tokens)\n#mwe_tokens contains the final tokens(unigrams and bi-grams)", "_____no_output_____" ] ], [ [ "#### 9-2) Convert mwe tokens to formated list", "_____no_output_____" ] ], [ [ "#This step is required to search the bi-grams in resume data set.\n#Convert mwe tokens to formated list\n\nnew_list=[]\nfor each in mwe_tokens:\n if re.match(\".*\\w+_\\w+\",each):\n new_list.append(re.sub(r'_',' ',each))\n else:\n new_list.append(each)", "_____no_output_____" ] ], [ [ "#### 9-3) From the filtered tokens,checking for any context-dependent words and filtering them.", "_____no_output_____" ] ], [ [ "#From the filtered tokens,checking for any context-dependent words and filtering them.\n\ntokens_tobefilt=count_tokens_sen(new_list,final_data) #from the tokens finding the count of presence in each resume file.\ntokens_tobefilt=filter_contextdep(tokens_tobefilt) #finding the 98% and 2% tokens.\nfinal_vocab=[w for w in new_list if w not in tokens_tobefilt] # cleaned tokens\nfinal_vocab=[w for w in final_vocab if w not in stop_words]#removing any stopwords if present\nfinal_vocab=filter_word(final_vocab) # removing any 3charc tokens\nfinal_vocab.sort()\nprint(\"Final vocab:\",len(final_vocab))\n", "Final vocab: 3296\n" ] ], [ [ "#### 9-4) Creating a final vocab index dictionary, which is output to file", "_____no_output_____" ] ], [ [ "# storing final vocab with dictionary with index\nfinal_vocab_dict=dict(enumerate(final_vocab))\n#print(final_vocab_dict)", "_____no_output_____" ] ], [ [ "--------------------------------------\n### 10) Clean individual resume files and find the ocunt of each token in resume files", "_____no_output_____" ], [ "#### 10-1) This step is required to clean indivual resume files, in order to capture the count of tokens in each file", "_____no_output_____" ] ], [ [ "#The purpose of this step is to clean individual resume from stopwords, context dependent, context independent \n#and less than 3 characters length\n\n#Update the stop words with tokens with context dependent and tokens with less than 3 characters.\nstop_words.update(tokens_to_be_filtered_contextdep+tokens_tobefilt+tokens_3char)\n\n#Fucntion to remove stop words\ndef final_clean(data):\n new_list=[]\n new_list+=[w for w in data if w not in stop_words]\n return new_list\n\n#Access each resume and clean them.\nfinal_cleaned_data=[]\nfor each in final_data:\n new_str=''\n token=f_tokenise(each)\n new_str=final_clean(token)\n final_cleaned_data.append(' '.join(new_str))\n\nprint(\"'final_cleaned_data' contains a list of cleaned individual resumes\")", "'final_cleaned_data' contains a list of cleaned individual resumes\n" ] ], [ [ "#### 10-2) Find the count of each tokens in each resume files", "_____no_output_____" ] ], [ [ "#Next find the count of each tokens in each resume files\n\n#This function tokenises and calculates the count of each token appearing in the resume file\ndef word_count(data):\n counts = dict()\n words = f_tokenise(data)\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts\n\ncount_data_tokens=[]\nfor each_sen in final_cleaned_data:\n count_data_tokens.append(word_count(each_sen))\n\nprint(\"'count_data_tokens' contains count of each token for each resume file\")", "'count_data_tokens' contains count of each token for each resume file\n" ] ], [ [ "#### 10-3) Now map the count of each token with the index of the final_vocab, which is the desired output\n\nHere, 'resume_final_dict' contains the output in the format= token_index:count", "_____no_output_____" ] ], [ [ "final_dict=[]\nfor each_tokenised_file in count_data_tokens:\n new_dict = dict((k, each_tokenised_file.get(v)) for k, v in final_vocab_dict.items())\n new_dict={k:v for k,v in new_dict.items() if v is not None}\n final_dict.append(new_dict)\n \nresume_final_dict = dict(zip(resume_student_dataset, final_dict))\n", "_____no_output_____" ] ], [ [ "-----------------\n### 11) Writing final vocab to a file", "_____no_output_____" ], [ "#### 11-1) Printing final vocab to a text file", "_____no_output_____" ] ], [ [ "import json \n\nwith open('29389690_vocab.txt','w') as output_file:\n output_file.write(\"Vocab of Unigrams & Bigrams:\\n\") \n output_file.write(json.dumps(final_vocab_dict))\noutput_file.close()", "_____no_output_____" ] ], [ [ "#### 11-2) Printing count vector to a text file", "_____no_output_____" ] ], [ [ "with open('29389690_countVec.txt','w') as output_file:\n output_file.write(\"Count Vector:\\n\\n\") \n for k, v in resume_final_dict.items():\n output_file.write('resume_'+str(k) + ','+ str(v).replace(\"{\",\"\").replace(\"}\", \"\") + '\\n\\n')\noutput_file.close()\n", "_____no_output_____" ] ], [ [ "### 12) References\n\n10-2) word_count referred from this link https://www.w3resource.com/python-exercises/string/python-data-type-string-exercise-12.php", "_____no_output_____" ], [ "### 13) Summary\n\n#### Logic used for assessment:\n\nSection 1) Identify the 250 resumes file numbers assigned to me. \n ##### Result: Out of 250, 219 were unqiue file numbers. \n--------------\nSection 2) Then out of total resumes(867) available, read only the 219 resumes' data from the directory. Additionally, sentence segmentation and case normalization is done. \n ##### Result: Out of 250, 219 were read, all sentences were segmented and case normalization was performed. \n----------\nSection 3) Perform tokenisation. \n ##### Result: Total tokens present after reading all 219 resumes: 137702 \n--------\nSection 4) Tokens filtering from stop words. \n ##### Result: Tokens filtered from stop words: 105116 \n-------------\nSection 5) Filtering tokens with less than 3 characters. \n ##### Result: Tokens less than 3 character removed and new filtered tokens= 91722 \n-----------\nSection 6) Filtering context-dependent (with the threshold set to %98) and rare tokens (less than 2%). \n ##### Result: Filtered tokens after removing context dependent= 71962 \n-------------\nSection 7) Stemming process only for lower-case tokens. \n ##### Result: After stemming, combining of uppercase and lowercase tokens, we get total tokens: 69002. \n-------------\nSection 8) Finding Bi-grams. \n ##### Result: Top 200 bi-grams is found. \n-----------\nSection 9) Re-tokenization using MWETokenizer. \n ##### Result: Using mwe tokeniser, bi-grams and unigrams are mixed and final vocab: 3296 is found. \n--------------\nSection 10) Calculating the term frequency and creating a vector for desired output. \n ##### Result: For each resume, count of each token is calculated. \n---------------\nSection 11) Writing output to files.\n ##### Result: Result is printed in two files\n\n--------\n#### The wrangling process illustrated above, shows how a text starting with bellow statistics, ended up reduced to more sparse text while conserving the main text feature.\n\n######################### Text Statistics Before Wrangling ##################################\n\nTotal number of vocabs: 137702\n\n\n######################### Final Text Statistics After Wranling ##################################\n\nTotal number of vocabs: 3296\n", "_____no_output_____" ], [ "### Creating Dataframe and plotting the values", "_____no_output_____" ] ], [ [ "df= pd.DataFrame({\"count\":[137702,105116,91722,71962,69002,3296]},\n index=['Total tokens present after reading all 219 resumes','Tokens filtered from stop words','Tokens less than 3 character','Tokens removed context dependent','Count of tokens post stemming','Final vocab count post mwe tokenise'])", "_____no_output_____" ], [ "df.plot.bar()\nplt.xlabel('Wrangling stage')\nplt.ylabel(\"Frequency\")\nplt.title(\"Token reduction post each wrangling stage\")\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
4af46e7a60070b92248a8ef55f04a2b092947dc0
21,879
ipynb
Jupyter Notebook
Exploration/RL/RandSearch/Notebooks/MountainCar.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
1
2018-10-31T09:59:06.000Z
2018-10-31T09:59:06.000Z
Exploration/RL/RandSearch/Notebooks/MountainCar.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
null
null
null
Exploration/RL/RandSearch/Notebooks/MountainCar.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
2
2018-03-20T16:09:05.000Z
2020-03-28T17:43:59.000Z
76.5
2,508
0.648978
[ [ [ "# Setup", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "from tqdm import tqdm_notebook", "_____no_output_____" ], [ "import gym", "_____no_output_____" ] ], [ [ "## Define Useful Features", "_____no_output_____" ] ], [ [ "env = gym.make('MountainCar-v0')", "_____no_output_____" ], [ "n = env.observation_space.shape[0]\nh = 10\na = env.action_space.n\nmax_reward = -110", "_____no_output_____" ], [ "param_length = (n + 1) * h + (h + 1) * a\nrefresh_params = lambda: np.random.randn(param_length) * 100\nparams = refresh_params()", "_____no_output_____" ], [ "best_params = params\nbest_reward = -np.inf", "_____no_output_____" ], [ "def relu(x):\n mask = x < 0\n x[mask] = 0\n return x", "_____no_output_____" ], [ "def get_action(obs):\n Woh = params[:n * h].reshape((n, h))\n bh = params[n * h:(n + 1) * h]\n xh = relu(obs.dot(Woh) + bh)\n Why = params[(n + 1) * h:(n + 1) * h + h * a].reshape((h, a))\n by = params[(n + 1) * h + h * a:(n + 1) * h + (h + 1) * a]\n s = xh.dot(Why) + by\n return s.argmax()", "_____no_output_____" ], [ "def sample(episodes=1, observe=True):\n epoch_reward = 0\n for episode in range(episodes):\n env.reset()d\n done = False\n obs = env.observation_space.sample()\n episode_reward = 0\n while not done:\n if observe: env.render()\n obs, r, done, _ = env.step(get_action(obs)) # take a random action\n episode_reward += r\n \n epoch_reward += episode_reward\n return epoch_reward", "_____no_output_____" ], [ "def train(epochs=0, episodes=100, show_improvements=True):\n global params, best_params, best_reward\n \n def train_epoch():\n global params, best_params, best_reward\n params = refresh_params()\n epoch_reward = sample(episodes, False)\n\n if epoch_reward > best_reward:\n best_reward = epoch_reward\n best_params = params\n sample()\n return True\n return False\n \n if epochs <= 0:\n while best_reward / episodes < max_reward:\n if train_epoch():\n print('Average reward:', int(best_reward / episodes), end='\\r', flush=True)\n else:\n for _ in tqdm_notebook(range(epochs)):\n train_epoch()\n\n params = best_params\n print('Average reward:', int(best_reward / episodes), end='\\r', flush=True)", "_____no_output_____" ] ], [ [ "# Train", "_____no_output_____" ] ], [ [ "train()", "_____no_output_____" ] ], [ [ "# Sample", "_____no_output_____" ] ], [ [ "sample()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4af475adb174cf31072f139c06131b9661097833
19,579
ipynb
Jupyter Notebook
Deep-Learning-Book-Review/Linear_Algebra_with_Python_and_NumPy.ipynb
IshrakHasin/AI-DL-Enthusiasts-Meetup
293bf6cae1dbb1a3f5b787fea4253bd89f659279
[ "MIT" ]
99
2017-08-06T18:42:55.000Z
2021-08-30T14:54:28.000Z
Deep-Learning-Book-Review/Linear_Algebra_with_Python_and_NumPy.ipynb
IshrakHasin/AI-DL-Enthusiasts-Meetup
293bf6cae1dbb1a3f5b787fea4253bd89f659279
[ "MIT" ]
null
null
null
Deep-Learning-Book-Review/Linear_Algebra_with_Python_and_NumPy.ipynb
IshrakHasin/AI-DL-Enthusiasts-Meetup
293bf6cae1dbb1a3f5b787fea4253bd89f659279
[ "MIT" ]
35
2017-07-30T19:53:28.000Z
2021-08-30T14:54:28.000Z
26.894231
507
0.486082
[ [ [ "# Linear Algebra with Python and NumPy", "_____no_output_____" ] ], [ [ "# First, we need to import the package NumPy, which is the library enabling all the fun with algebraic structures.\nfrom numpy import *", "_____no_output_____" ] ], [ [ "## Complex Numbers", "_____no_output_____" ], [ "A complex number is a number of the form $z = x + jy$, where $x$ and $y$ are real numbers and $j$ is the **_imaginary unit_**, satisfying $j^2 = −1$. Note that the imaginary unit, often denoted as $i$, is denoted as $j$ in Python.\n\nThe set $\\mathbb{C}$ of all complex numbers can be actually defined as the set of ordered pairs of real numbers $\\{(x,y) \\mid x,y\\in\\mathbb{R} \\}$ that satisfies the following operations\n\n<img src=\"https://betterexplained.com/wp-content/uploads/complex/complex_conjugates.png\" style=\"float:right\"/>\n\n- *addition:* $(a,b)+(c,d) = (a+c,b+d)$\n- *multiplication:* $(a,b)\\cdot(c,d) = (ac-bd,ad+bc)$\n\nThen, it is just a matter of notation to express a complex number as $(x, y)$ or as $x + jy$.\n\nWhen we have a complex number $z\\in\\mathbb{C}$, we can denote its real and imaginary part as\n\n$$ x = \\Re(z), \\quad y = \\Im(z). $$\n\nThe **_complex conjugate_** of the complex number $z = x + jy$ is denoted by either $\\bar{z}$ or $z^*$ and defined as\n\n$$\\bar{z} = x − jy .$$\n\nThe **_absolute value_** (or modulus or magnitude) of a complex number $z = x + jy$ is\n\n$$ | z | = \\sqrt{x^2+y^2} = \\sqrt{z \\bar{z}} .$$", "_____no_output_____" ] ], [ [ "z = 3 + 4j # Define complex number z\nprint('z =', z) \nprint('Re(z) =', real(z)) # Get real part of z\nprint('Im(z) =', imag(z)) # Get imaginary part of z\nprint('|z| =', abs(z)) # Get absolute value of z", "('z =', (3+4j))\n('Re(z) =', 3.0)\n('Im(z) =', 4.0)\n('|z| =', 5.0)\n" ] ], [ [ "Note that to obtain $j=\\sqrt{-1}$ we must write the argument of `sqrt` function as a complex number (even if has zero imaginary part), otherwise Python tries to compute sqrt on real numbers and throws an error.", "_____no_output_____" ] ], [ [ "z = sqrt(-1+0j)\nprint('sqrt(-1) =', z)", "('sqrt(-1) =', 1j)\n" ] ], [ [ "## Vectors and Matrices", "_____no_output_____" ], [ "Using NumPy we can define vectors and matrices with both real or complex elements. Although, in contrast to Matlab, where matrix is the default type, in Python we need to define vectors and matrices as `array` or `matrix` type from NumPy package.\n\n<img src=\"http://www.math.cornell.edu/~mec/Winter2009/RalucaRemus/Lecture1/Images/matrix.gif\"/>", "_____no_output_____" ] ], [ [ "a = array([10,20,30]) # Define a vector of size 3 using type 'array'\nprint(a)\nprint(a.shape) # Size/shape of vector", "[10 20 30]\n(3,)\n" ], [ "b = matrix('10 20 30') # Define a vector of size 3 using type 'matrix'\nprint(b)\nprint(b.shape) # Size/shape of vector", "[[10 20 30]]\n(1, 3)\n" ], [ "c = linspace(10,20,6) # Define vector as 6 values evenly spaced from 10 to 20\nprint(c)", "[ 10. 12. 14. 16. 18. 20.]\n" ] ], [ [ "Note that matrix and array elements in Python are indexed from 0, in contrast to Matlab where indexing starts from 1.", "_____no_output_____" ] ], [ [ "print(c[:]) # Get all elements\nprint(c[0]) # The first element\nprint(c[-1]) # The last element\nprint(c[:3]) # The first 3 elements\nprint(c[-3:]) # The last 3 elemnets\nprint(c[2:4]) # 2:4 selects elements of indexes 2 and 3", "[ 10. 12. 14. 16. 18. 20.]\n10.0\n20.0\n[ 10. 12. 14.]\n[ 16. 18. 20.]\n[ 14. 16.]\n" ] ], [ [ "**_Euclidean norm_** of vector is returned by method `numpy.linalg.norm`", "_____no_output_____" ] ], [ [ "norm = linalg.norm(a) # Euclidean norm of vector a\nprint('a =', a)\nprint('norm(a) =', norm)\n\nx = a/linalg.norm(a) # Make normalized/unit vector from a\nprint('x =', x)\nprint('norm(x) =', linalg.norm(x))", "('a =', array([10, 20, 30]))\n('norm(a) =', 37.416573867739416)\n('x =', array([ 0.26726124, 0.53452248, 0.80178373]))\n('norm(x) =', 0.99999999999999989)\n" ] ], [ [ "**_Transposition_** of vectors is not so intuitive as in Matlab, especially if a vector is defined as 1D `array` and you cannot distinguish between row and column vector. However, using the keyword `newaxis` it's possible to shape the vector into 2D array (as matrix of size $1 \\times n$ or $n \\times 1$), where transposition makes sense and can be obtained by attribute `.T`.", "_____no_output_____" ] ], [ [ "x = a[:,newaxis] # Make column vector from vector a (defined as array)\nprint(x)\nprint(x.shape) # Now size of column vector is 3x1\nprint(x.T) # Make row vector by transpostion of column vector", "[[10]\n [20]\n [30]]\n(3, 1)\n[[10 20 30]]\n" ] ], [ [ "If a vector was defined as 2D array of type `matrix`, transportation is not a problem.", "_____no_output_____" ] ], [ [ "x = b.T # Make column vector from vector b (defined as matrix)\nprint(x)\nprint(x.shape) # Now size of column vector is 3x1\nprint(x.T) # Make row vector by transpostion of column vector", "[[10]\n [20]\n [30]]\n(3, 1)\n[[10 20 30]]\n" ] ], [ [ "**_Matrices_** can be defined as 2D arrays of type `array` or `matrix` (there is no problem with transposition with any type).", "_____no_output_____" ] ], [ [ "A = array([[11,12,13], [21,22,23], [31,32,33]]) # Define matrix of size 3x3 as 2D 'array-type'\nprint(A)\nprint(A.shape)", "[[11 12 13]\n [21 22 23]\n [31 32 33]]\n(3, 3)\n" ], [ "B = matrix('11 12 13; 21 22 23; 31 32 33') # Define matrix of size 3x3 as 'matrix-type'\nprint(B)\nprint(B.shape)", "[[11 12 13]\n [21 22 23]\n [31 32 33]]\n(3, 3)\n" ], [ "print(B[0,1]) # Get matrix element at row 0, column 1\nprint(B[0,:]) # Get 1st row of matrix (A[0] returns also 1st row)\nprint(B[:,0]) # Get 1st column of matrix", "12\n[[11 12 13]]\n[[11]\n [21]\n [31]]\n" ], [ "print(A[:,0]) # Note that column from 'array-type' matrix is returned as 1D array\nprint(B[:,0]) # Column from 'matrix-type' matrix is returned as true column as expected", "[11 21 31]\n[[11]\n [21]\n [31]]\n" ] ], [ [ "NumPy can generate some essential matrices exactly like Matlab.", "_____no_output_____" ] ], [ [ "print('3x3 Matrix full of zeros:')\nprint(zeros([3,3]))\n\nprint('\\n3x3 Matrix full of ones:')\nprint(ones([3,3]))\n\nprint('\\n3x3 identity matrix:')\nprint(eye(3))\n\nprint('\\n3x3 diagonal matrix:')\nx = array([1.,2.,3.])\nprint(diag(x))\n\nprint('\\n3x3 random matrix:')\nprint(random.rand(3,3))", "3x3 Matrix full of zeros:\n[[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\n\n3x3 Matrix full of ones:\n[[ 1. 1. 1.]\n [ 1. 1. 1.]\n [ 1. 1. 1.]]\n\n3x3 identity matrix:\n[[ 1. 0. 0.]\n [ 0. 1. 0.]\n [ 0. 0. 1.]]\n\n3x3 diagonal matrix:\n[[ 1. 0. 0.]\n [ 0. 2. 0.]\n [ 0. 0. 3.]]\n\n3x3 random matrix:\n[[ 0.57488073 0.37926603 0.50942703]\n [ 0.30616018 0.31027854 0.95497125]\n [ 0.94306375 0.12918588 0.50445312]]\n" ] ], [ [ "For merging matrices or vectors methods `numpy.hstack` and `numpy.vstack` can be used.", "_____no_output_____" ] ], [ [ "print(vstack([ A, ones([1,3]) ])) # Add row vector to matrix\nprint(hstack([ A, ones([3,1]) ])) # Add column vector to matrix\nprint(hstack([ A, eye(3) ])) # Merge two matrices horizontally", "[[ 11. 12. 13.]\n [ 21. 22. 23.]\n [ 31. 32. 33.]\n [ 1. 1. 1.]]\n[[ 11. 12. 13. 1.]\n [ 21. 22. 23. 1.]\n [ 31. 32. 33. 1.]]\n[[ 11. 12. 13. 1. 0. 0.]\n [ 21. 22. 23. 0. 1. 0.]\n [ 31. 32. 33. 0. 0. 1.]]\n" ] ], [ [ "## Operations with Matrices\n\n**_Matrix transposition_** is obtained by attribute `.T`", "_____no_output_____" ] ], [ [ "X = ones([2,5]) # Generate 2x5 matrix full of ones\nY = X.T # Obtain transpose of matrix X\n\nprint('Matrix X of size', X.shape, ':\\n', X)\nprint('\\nMatrix Y=X.T of size', Y.shape, ':\\n', Y)", "('Matrix X of size', (2, 5), ':\\n', array([[ 1., 1., 1., 1., 1.],\n [ 1., 1., 1., 1., 1.]]))\n('\\nMatrix Y=X.T of size', (5, 2), ':\\n', array([[ 1., 1.],\n [ 1., 1.],\n [ 1., 1.],\n [ 1., 1.],\n [ 1., 1.]]))\n" ] ], [ [ "**_Hermitian transpose_** (or conjugate transpose) of complex matrix $\\mathbf{A}\\in\\mathbb{C}^{m\\times n}$ is obtained by taking the transpose of $\\mathbf{A}$ and then taking the complex conjugate of each element. Note that for real matrices Hermitian transpose and plain transpose does not differ. In NumPy this kind of transposition is obtained by attribute `.H` (exists only for matrix type).", "_____no_output_____" ] ], [ [ "X = matrix((3+4j)*ones([2,5])) # Generate matrix full of complex elements 3+4j\nY = X.H # Obtain Hermitian transpose of matrix X\n\nprint('Matrix X of size', X.shape, ':\\n', X)\nprint('\\nMatrix Y=X.H of size', Y.shape, ':\\n', Y)", "('Matrix X of size', (2, 5), ':\\n', matrix([[ 3.+4.j, 3.+4.j, 3.+4.j, 3.+4.j, 3.+4.j],\n [ 3.+4.j, 3.+4.j, 3.+4.j, 3.+4.j, 3.+4.j]]))\n('\\nMatrix Y=X.H of size', (5, 2), ':\\n', matrix([[ 3.-4.j, 3.-4.j],\n [ 3.-4.j, 3.-4.j],\n [ 3.-4.j, 3.-4.j],\n [ 3.-4.j, 3.-4.j],\n [ 3.-4.j, 3.-4.j]]))\n" ] ], [ [ "**_Matrix multiplication_** must be executed by method for dot product `numpy.dot`. Operator `*` produces only element-wise multiplication in Python.", "_____no_output_____" ] ], [ [ "print('Matrix A:')\nprint(A)\n\nprint('\\nMatrix B:')\nB = ones([3,3])\nprint(B)\n\nprint('\\nElement-wise multiplication A*B:')\nprint(A*B)\n\nprint('\\nMatrix multiplication A by B:')\nprint(dot(A,B))\n\nprint('\\nMatrix multiplication B by A:')\nprint(dot(B,A))", "Matrix A:\n[[11 12 13]\n [21 22 23]\n [31 32 33]]\n\nMatrix B:\n[[ 1. 1. 1.]\n [ 1. 1. 1.]\n [ 1. 1. 1.]]\n\nElement-wise multiplication A*B:\n[[ 11. 12. 13.]\n [ 21. 22. 23.]\n [ 31. 32. 33.]]\n\nMatrix multiplication A by B:\n[[ 36. 36. 36.]\n [ 66. 66. 66.]\n [ 96. 96. 96.]]\n\nMatrix multiplication B by A:\n[[ 63. 66. 69.]\n [ 63. 66. 69.]\n [ 63. 66. 69.]]\n" ] ], [ [ "There are also methods for essential matrix features like **_Frobenius norm_**, **_rank_** or **_determinant_**.", "_____no_output_____" ] ], [ [ "print('Matrix A of size', A.shape, ':\\n', A)\n\n# Frobenius norm of matrix\nprint('\\nFrobenius norm: ||A|| =', linalg.norm(A))\n\n# Rank of matrix\nprint('rank(A) =', linalg.matrix_rank(A))\n\n# Determinant of matrix\nprint('det(A) =', linalg.det(A))", "('Matrix A of size', (3, 3), ':\\n', array([[11, 12, 13],\n [21, 22, 23],\n [31, 32, 33]]))\n('\\nFrobenius norm: ||A|| =', 70.441465061425291)\n('rank(A) =', 2)\n('det(A) =', -3.3879709074045058e-14)\n" ] ], [ [ "In example above, note that the matrix $\\mathbf{A}$ is a singular matrix, because its rank is lower than number of its rows, thus also its detemninat is zero.", "_____no_output_____" ], [ "## Conclusion\n\nAs we can see from this article, Python and NumPy package can be used to perform all the usual matrix manipulations. There are only few annoying things one need to keep in mind when writing Python code. For example, operator `*` applied to matrices doesn't produce matrix product, but only element-wise multiplication. Or vectors, many methods return them just as 1D `array`, so we need to convert them into 2D `array` or `matrix` type first, to be able to distinguish between row and column vector.\n", "_____no_output_____" ], [ "### References:\n\n- [Complex numbers](https://en.wikipedia.org/wiki/Complex_number)\n- [Vectors](https://en.wikipedia.org/wiki/Coordinate_vector)\n- [Matrix][1]\n- [Hermitian transpose](https://en.wikipedia.org/wiki/Conjugate_transpose)\n- [Linear algebra](https://en.wikipedia.org/wiki/Linear_algebra)\n- [Vector space](https://en.wikipedia.org/wiki/Vector_space)\n\n\n- [NumPy documentation](http://docs.scipy.org/doc/numpy/)\n- [NumPy for Matlab users](https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html)\n- [Matplotlib documentation](http://matplotlib.org/)\n\n\n[1]:https://en.wikipedia.org/wiki/Matrix_(mathematics)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4af47f0814de7fa6c89509d26f3ff9b65ccdef78
179,264
ipynb
Jupyter Notebook
notebooks/papers/liquidity/Two Market Makers - via Pontryagin.ipynb
vegaprotocol/research
59c73254b658b82c55bd9bcb2c4b44240fbe45ad
[ "MIT" ]
4
2021-04-01T03:42:07.000Z
2021-10-03T02:00:58.000Z
notebooks/papers/liquidity/Two Market Makers - via Pontryagin.ipynb
vegaprotocol/research
59c73254b658b82c55bd9bcb2c4b44240fbe45ad
[ "MIT" ]
1
2021-09-22T12:00:50.000Z
2021-10-05T20:53:55.000Z
notebooks/papers/liquidity/Two Market Makers - via Pontryagin.ipynb
vegaprotocol/research
59c73254b658b82c55bd9bcb2c4b44240fbe45ad
[ "MIT" ]
1
2021-03-08T11:10:48.000Z
2021-03-08T11:10:48.000Z
299.27212
67,080
0.921038
[ [ [ "# Two Market Makers - via Pontryagin\n\nThis notebook corresponds to section 4 (**Agent based models**) of \"Market Based Mechanisms for Incentivising Exchange Liquidity Provision\" available [here](https://vega.xyz/papers/liquidity.pdf). It models two market makers and solves the resulting game by an iterative scheme based on the Pontryagin optimality principle.", "_____no_output_____" ] ], [ [ "import math, sys \nimport numpy as np\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom os import path\ncount = 0\n\nfrom matplotlib.backends.backend_pdf import PdfPages", "_____no_output_____" ], [ "T = 0.4;\nsigma0 = 3\nsigma1 = 0.5\nlambd = 0.1\nr = 0.0\nrRisk0 = 0.3\nrRisk1 = 0.1\n\ndelta_a = 1e-4\nfee_scaling = 0.1\n\n\n# This is key; how does instantenaous trading volume react \n# to market making stake\n# and to fees. You could specify different beleifs for the two different agents.\n\ndef fee_volume_response(f):\n f = np.maximum(f, np.zeros(np.size(f)))\n f = np.minimum(f, np.ones(np.size(f)))\n return 1.0/(f+0.01) - f\n\ndef stake_volume_response(S):\n return 1.0 / (1+np.exp(-0.05*S+2)) - 1.0 / (1+np.exp(2))", "_____no_output_____" ], [ "# Check that the shape below is concave (i.e. there is a single maximum) we need \n# this if we want the optimization procedure to converge\nx_span = np.linspace(0,1, 1000) \ny = fee_scaling * fee_volume_response(x_span) * x_span\nprint('Max %f' % max(y))\nmax_idx=np.argmax(y)\nplt.xlabel('fee in %')\nplt.ylabel('volume in %')\nplt.title('Fee response times fee')\nplt.plot(x_span,y)", "Max 0.091563\n" ], [ "# Check that the shape below is concave (i.e. there is a single maximum) we need \n# this if we want the optimization procedure to converge.\n# Of course you may be lucky and things will work even in the case when it's not exactly convex... \nx_span = np.linspace(0,200, 200) \ny = stake_volume_response(x_span)\nplt.xlabel('stake')\nplt.ylabel('volume in %')\nplt.title('Stake response')\nplt.plot(x_span,y)", "_____no_output_____" ], [ "# As things are set-up at moment the agents only differ in their belief about\n# the maximum trading volume they'd expect to see\ndef trading_volume0(f,S):\n N_max = 10000\n return N_max * fee_volume_response(f) * stake_volume_response(S)\n\ndef trading_volume1(f,S):\n N_max = 50000\n return N_max * fee_volume_response(f) * stake_volume_response(S)\n\ndef running_gain0(t,f,S0,S1,a0):\n frac = S0/(S0+S1)\n stake = S0+S1\n return np.exp(-r*t) * (frac * fee_scaling * f * trading_volume0(f,stake) - max(lambd * sigma0 * S0,0)) - max(np.exp(rRisk0*t)*S0, 0) \\\n - delta_a * a0*a0\n\ndef running_gain1(t,f,S0,S1,a1):\n frac = S1/(S0+S1)\n stake = S0+S1\n return np.exp(-r*t) * (frac * fee_scaling * f * trading_volume1(f,stake) - max(lambd * sigma1 * S1,0)) - max(np.exp(rRisk1*t)*S1, 0) \\\n - delta_a * a1*a1\n\ndef running_gain_x_0(t,x,S_1, a0):\n f = x[0]\n S_0 = x[1]\n return running_gain0(t,f,S_0,S_1, a0)\n\ndef running_gain_x_1(t,x,S_0, a1):\n f = x[0]\n S_1 = x[1]\n return running_gain1(t,f,S_0,S_1, a1)", "_____no_output_____" ], [ "# Below we define the gradients (using finite difference)\n# of the running gain specified above - this is just a technicality\n# used in the subsequent optimization.\n\ndef grad_x_of_running_gain_0(t,x,S1,a):\n delta = 1e-8\n grad = np.zeros(2)\n\n #print(x)\n x_plus = x + np.array([delta, 0])\n x_minus = x - np.array([delta, 0])\n rg_plus = running_gain_x_0(t,x_plus,S1,a)\n rg_minus = running_gain_x_0(t,x_minus,S1,a)\n #print(x_plus)\n grad[0] = (rg_plus - rg_minus)/(2*delta)\n \n x_plus = x + np.array([0, delta])\n x_minus = x - np.array([0, delta])\n rg_plus = running_gain_x_0(t,x_plus,S1,a)\n rg_minus = running_gain_x_0(t,x_minus,S1,a)\n grad[1] = (rg_plus - rg_minus)/(2*delta)\n \n return grad\n\ndef grad_x_of_running_gain_1(t,x,S0,a):\n delta = 1e-8\n grad = np.zeros(2)\n\n x_plus = x + np.array([delta, 0])\n x_minus = x - np.array([delta, 0])\n rg_plus = running_gain_x_1(t,x_plus,S0,a)\n rg_minus = running_gain_x_1(t,x_minus,S0,a)\n grad[0] = (rg_plus - rg_minus)/(2*delta)\n \n x_plus = x + np.array([0, delta])\n x_minus = x - np.array([0, delta])\n rg_plus = running_gain_x_1(t,x_plus,S0,a)\n rg_minus = running_gain_x_1(t,x_minus,S0,a)\n grad[1] = (rg_plus - rg_minus)/(2*delta)\n \n return grad", "_____no_output_____" ], [ "# Initialization \nL_S = 150;\nL_f = 1;\n\nN_T = 200; delta_t = T / (N_T-1);\nN_S = 45; \nN_f = 45; \n\nt_span = np.linspace(0, T, N_T)\nf_span = np.linspace(0, L_f, N_f)\nS_span = np.linspace(0, L_S, N_S)\n\ndef grid_idx_from(S,S_span):\n min_S = S_span[0]\n N_S = np.size(S_span)\n max_S = S_span[N_S-1]\n delta_S = (max_S-min_S)/(N_S-1)\n return max(min(int(round(S/delta_S)), N_S-1),0)", "_____no_output_____" ], [ "F_vals = np.zeros([np.size(f_span), np.size(S_span)])\nf_times_V_vals = np.zeros([np.size(f_span), np.size(S_span)])\ngrad_F_vals = np.zeros([np.size(f_span), np.size(S_span), 2])\nfor f_idx in range(0, np.size(f_span)):\n for S_idx in range(0, np.size(S_span)):\n f = f_span[f_idx]\n S = S_span[S_idx]\n F_vals[f_idx,S_idx] = running_gain0(T, f, S, 10, 0)\n f_times_V_vals[f_idx,S_idx] = f*trading_volume0(f,S)\n grad_F_vals[f_idx,S_idx,:] = grad_x_of_running_gain_0(T, np.array([f, S]), 10, 0)\n \nmax_idx = np.unravel_index(np.argmax(F_vals, axis=None),F_vals.shape)\nprint(f_span[max_idx[0]])\nprint(S_span[max_idx[1]])", "0.1590909090909091\n105.68181818181819\n" ], [ "plotGridX, plotGridY = np.meshgrid(S_span, f_span)\nfig = plt.figure()\n#ax1 = fig.add_subplot(111,projection='3d')\nax1 = fig.gca(projection='3d')\nsurf = ax1.plot_surface(plotGridX, plotGridY, f_times_V_vals[:,:], cmap=cm.autumn, antialiased=True)\nax1.set_xlabel('stake')\nax1.set_ylabel('fee')\nax1.set_zlabel('V')\nax1.set_zlim(0, 40000)\nax1.view_init(30, 20)\nax1.set_title('Agent 1')\nplt.savefig('response1.pdf')\n", "_____no_output_____" ], [ "gamma_f = -0.02\ngamma_S = 5\nm = 1\n\ndef drift_0(a0,a1):\n b = np.zeros(2)\n b[0] = gamma_f*(a0+a1)\n b[1] = gamma_S*a0\n return b\n\ndef drift_1(a0,a1):\n b = np.zeros(2)\n b[0] = gamma_f*(a0+a1)\n b[1] = gamma_S*a1\n return b \n\ndef grad_a0_H0(y,a0,a1):\n val = gamma_f*y[0] + gamma_S*y[1] - 2*delta_a*a0 \n return val\n\ndef grad_a1_H1(y,a0,a1):\n val = gamma_f*y[0] + gamma_S*y[1] - 2*delta_a*a1\n return val", "_____no_output_____" ], [ "# Fix initial fee & and stake of two players\nfee_init = 0.5 # has to be between 0 and 1\nplayer0_stake = 250\nplayer1_stake = 10\n\n# Learning params:\n# higher value means faster convergence but less stability i.e.: \n# if you see stupid output (explosion, negative fees etc.) set this lower. \nrho = 0.05 \n\n# learning takes a long time and if it says \"failed at the end it might just means that it's still updating a bit.\"\nmax_iter = 6000 \n\n#stopping criteria: once the updates are smaller than this in l-infinity then stop \nmax_error = 0.1 \n\n# fees are the 0th component, stake is the 1st component\n# first player, index 0\nactions0 = np.zeros([1,N_T+1])\n\nx_vals0 = np.zeros([2,N_T+1])\nx_vals0[:,0] = np.array([fee_init, player0_stake])\ny_vals0 = np.zeros([2,N_T+1])\n# second player, index 1\nactions1 = np.zeros([1,N_T+1])\n\nx_vals1 = np.zeros([2,N_T+1])\nx_vals1[:,0] = np.array([fee_init, player1_stake])\ny_vals1 = np.zeros([2,N_T+1])\n\n\n\ndef run_iterative_system(max_iter,max_error):\n actions_old0 = np.zeros([1,N_T+1])\n actions_old1 = np.zeros([1,N_T+1])\n diff = 0; failed_to_converge=True\n \n for iter_idx in range(0,max_iter):\n # Run x0, x1 forwards\n for i in range(0,N_T):\n x_vals0[:,i+1] = x_vals0[:,i] + drift_0(actions0[0,i], actions1[0,i]) * delta_t\n # second guy only updates the stake\n # but the fee evolution is copied from first\n x_vals1[0,i+1] = x_vals0[0,i+1]\n x_vals1[1,i+1] = x_vals1[1,i] + drift_1(actions0[0,i], actions1[0,i])[1] * delta_t\n\n \n # Run y0, y1 backwards\n y_vals0[:,N_T] = np.zeros(2)\n y_vals1[:,N_T] = np.zeros(2)\n for i in reversed(range(0,N_T)):\n S0 = x_vals0[1,i]\n S1 = x_vals1[1,i]\n grad_x_F_0 = grad_x_of_running_gain_0(t_span[i], x_vals0[:,i], S1, actions0[0,i])\n grad_x_F_1 = grad_x_of_running_gain_1(t_span[i], x_vals1[:,i], S0, actions1[0,i])\n y_vals0[:,i] = y_vals0[:,i+1] + grad_x_F_0 * delta_t \n y_vals1[:,i] = y_vals1[:,i+1] + grad_x_F_1 * delta_t \n \n for i in range(0,N_T):\n # Do one gradient ascent step (we are maximizing) \n actions0[0,i] = actions0[0,i] + rho*grad_a0_H0(y_vals0[:,i],actions0[0,i],actions1[0,i])\n actions1[0,i] = actions1[0,i] + rho*grad_a1_H1(y_vals1[:,i],actions0[0,i],actions1[0,i])\n \n diff0 = np.max(np.abs(actions0 - actions_old0))\n diff1 = np.max(np.abs(actions1 - actions_old1))\n if (diff0 < max_error) and (diff1 < max_error) :\n print('Converged; iteration %d, diff0 is %f, diff1 is %f' % (iter_idx, diff0, diff1))\n failed_to_converge = False\n break \n actions_old0 = np.copy(actions0)\n actions_old1 = np.copy(actions1) \n\n if failed_to_converge:\n print('Failed after %d iteration, diff0 is %f, diff1 is %f' % (max_iter, diff0,diff1))\n \n \n%timeit -n1 -r1 run_iterative_system(max_iter, max_error)", "Converged; iteration 3595, diff0 is 0.021513, diff1 is 0.099986\n3min 4s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)\n" ], [ "plt.plot(t_span, 1000 * fee_scaling * x_vals0[0,0:N_T].T,label='f0 in 10 x %')\nplt.plot(t_span, 1000 * fee_scaling * x_vals1[0,0:N_T].T,color='green',label='f1 in 10 x %')\nplt.xlabel('time')\nplt.plot(t_span, x_vals0[1,0:N_T].T,color='red',label='stake 0')\nplt.plot(t_span, x_vals1[1,0:N_T].T,color='pink',label='stake 1')\nplt.title('State evolution - fees and stake')\nplt.xlabel('time')\nplt.ylabel('level')\nplt.legend()\nplt.savefig('state.pdf')\n\nfig = plt.figure()\nplt.plot(t_span, actions0[0,0:N_T].T,label='a - 0')\nplt.plot(t_span, actions1[0,0:N_T].T, color='green',label='a - 1')\nplt.title('Actions evolution')\nplt.xlabel('time')\nplt.ylabel('actions fees')\nplt.xlabel('time')\nplt.ylabel('level')\nplt.legend()\nplt.savefig('actions.pdf')", "_____no_output_____" ], [ "print('Minimum fee %.2f%%. Final fee %.2f%%.' % (fee_scaling * 100*min(x_vals1[0,0:N_T]),fee_scaling * 100*x_vals1[0,N_T-1]))\nprint('Minimum stake %.0f. Maximum stake %.0f. Final stake %.0f.' % (min(x_vals0[1,0:N_T]+x_vals1[1,0:N_T]),max(x_vals0[1,0:N_T]+x_vals1[1,0:N_T]),x_vals0[1,N_T-1]+x_vals1[1,N_T-1]))", "Minimum fee 0.96%. Final fee 1.43%.\nMinimum stake 260. Maximum stake 361. Final stake 349.\n" ], [ "# Adjoint process plot: this is a 'dummy' process used in the optimization\n# and you can ignore it if all goes well\n\nfig = plt.figure()\nplt.plot(t_span, 0.1*y_vals0[0,0:N_T].T, label='adj. fees 0')\nplt.plot(t_span, 0.1*y_vals1[0,0:N_T].T, color='green', label='adj. fees 1')\nplt.xlabel('time')\nplt.plot(t_span, y_vals0[1,0:N_T].T, color = 'red', label='adj. stake 0')\nplt.plot(t_span, y_vals1[1,0:N_T].T, color = 'pink', label='adj. stake 0')\nplt.title('Adjoint evolution - fees and stake')\nplt.xlabel('time')\nplt.legend()\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af4b37b99da62eeff28333ec1248bd32b873ebf
8,573
ipynb
Jupyter Notebook
notebooks/DatasetVisualizationTool.ipynb
samuelesabella/Detecting-network-anomalies-using-the-feature-space-latent-representation
4b94a75e370ba7789229644f61390768ea881595
[ "MIT" ]
1
2021-03-13T17:40:05.000Z
2021-03-13T17:40:05.000Z
notebooks/DatasetVisualizationTool.ipynb
samuelesabella/Detecting-network-anomalies-using-the-feature-space-latent-representation
4b94a75e370ba7789229644f61390768ea881595
[ "MIT" ]
null
null
null
notebooks/DatasetVisualizationTool.ipynb
samuelesabella/Detecting-network-anomalies-using-the-feature-space-latent-representation
4b94a75e370ba7789229644f61390768ea881595
[ "MIT" ]
1
2021-09-04T12:39:23.000Z
2021-09-04T12:39:23.000Z
33.357977
120
0.491893
[ [ [ "import sys\nimport os\nsys.path.insert(0, os.path.abspath('../src/'))", "_____no_output_____" ] ], [ [ "# Plotting", "_____no_output_____" ] ], [ [ "from pathlib import Path\nimport SimplePreprocessor as sp\n\nDATASETPATH = Path(\"../dataset/\")\n\npr = sp.SimplePreprocessor(deltas=True, discretize=False, flevel=\"MAGIK\")\nnetdata = pr.load_path(DATASETPATH)\nnetdata[\"_date\"] = netdata.index.get_level_values(\"_time\").strftime('%a %d %b %y')", "../dataset/Scenario_22-Phillips_HUE.pkl\n" ], [ "import numpy as np\nimport ipywidgets as widgets\nfrom IPython.display import display, Markdown\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom ipywidgets import HBox, VBox, interactive, Layout\n\n\ndevices_idxs = netdata.index.droplevel(2).unique()\ndevices = [f\"{host} ({cat})\" for cat, host in devices_idxs]\ndevices.sort()\n\navailable_channels = [c for c in netdata.columns if ((\"time\" not in c) and (c[0] != \"_\"))]\navailable_channels.sort()\n\navailable_days = np.unique(netdata[\"_date\"])\n\n\n# ----- ----- WIDGETS ----- ----- #\n# ----- ----- ------- ----- ----- #\ndevice_w_list = widgets.Dropdown(options=devices)\ndays_w_list = widgets.Dropdown(options=available_days)\nselectedc_w_list = widgets.SelectMultiple(options=available_channels,\n description='Channel',\n layout=Layout(width='400px'))\ntimerange_slider = widgets.FloatSlider(min=.005, max=1., step=.005)\nsmoothing_slider = widgets.FloatSlider(min=0, max=79, step=4,\n description=\"Smoothing (aggregate x minutes)\")\noffset_slider = widgets.FloatSlider(min=.0, max=1., step=.01)\nts_selector = HBox([device_w_list, days_w_list])\ncol_selector = HBox([selectedc_w_list])\nts_shifting = HBox([timerange_slider, offset_slider])\nwlist = VBox([ts_selector, col_selector, ts_shifting, smoothing_slider])\n\n\n# ----- ----- PLOTTER ----- ----- #\n# ----- ----- ------- ----- ----- #\ndef mprint(s):\n display(Markdown(s))\n \ndef randcolors(n):\n hexl = list('0123456789ABCDEF')\n hexc = np.random.choice(hexl, size=(n, 6))\n return ['#' + ''.join(x) for x in hexc]\n \ndef remove_empty(data):\n empty_cols = [ c for c in data.columns if (data[c]==0).all() ]\n for c in empty_cols:\n mprint(f\"**<span style='color: red'>Empty series:</span> {c}**\")\n return data.drop(empty_cols, axis=1)\n \ndef datetime2xaxis(dtseries, smoothing):\n if len(dtseries) <= 50:\n return \"%a - %H:%M:%S\"\n elif len(dtseries) <= 100:\n return \"%a - %H:%M\"\n else:\n return \"%a - %H\"\n \ndef describe_mtimeseries(plotname, data, smoothing=1):\n # Data description ..... #\n mprint(f\"### {plotname}\")\n start = min(data.index)\n end = max(data.index)\n mprint(f\"**Time range**: {start} **/** {end}\")\n mprint(f\"**Total data range:** {end-start}\")\n mprint(f\"**Samples shown**: {len(data)}\")\n mprint(f\"**Smoothing**: {int(smoothing / 4)} minutes\")\n\n if len(data) <= 50:\n xaxis_format = \"%a - %H:%M:%S\"\n elif len(data) <= 100:\n xaxis_format = \"%a - %H:%M\"\n else:\n xaxis_format = \"%a - %H\"\n \n # Plotting clean data ..... #\n empty_cols = []\n legend = []\n data = remove_empty(data)\n \n # Smoothing ..... #\n channels = data.drop([\"_isanomaly\"], axis=1).columns\n data[channels] = data[channels].rolling(smoothing, center=True).sum() / smoothing\n data = data.dropna()\n \n anomaly_mask = (data[\"_isanomaly\"] != \"none\")\n for idx, c in enumerate(channels):\n legend.append(c)\n fig, ax = plt.subplots(figsize=(12, 6))\n ax.format_xdata = mdates.DateFormatter(xaxis_format)\n \n ax.plot(data.index, data[c])\n fig.autofmt_xdate()\n \n if anomaly_mask.any():\n attack_data = data[anomaly_mask]\n for anomalyname, anomalydata in attack_data.groupby(\"_isanomaly\"):\n legend.append(anomalyname)\n anomalydata = anomalydata.drop(\"_isanomaly\", axis=1)\n ax.plot(anomalydata.index, anomalydata.values)\n fig.autofmt_xdate()\n \n fig.suptitle(f\"{c}\", fontweight=\"bold\")\n plt.legend(legend)\n plt.show()\n\n \n# ----- ----- INTERACTOR ----- ----- #\n# ----- ----- ---------- ----- ----- #\ndef whandler(device, day, channel, timerange, offset, smoothing):\n split = device.split(\" \")\n host = split[0].strip()\n category = \" \".join(split[1:]).replace(\"(\", \"\").replace(\")\", \"\").strip()\n \n data = netdata[netdata[\"_date\"]==day]\n chs = set(channel)\n chs.add(\"_isanomaly\")\n chs = list(chs)\n data = data.loc[category, host][chs]\n \n # Filtering time range\n full_length = len(data)\n start_idx = int(full_length * offset)\n end_idx = min(start_idx + int(full_length * timerange), full_length)\n data = data.iloc[start_idx:end_idx]\n \n describe_mtimeseries(device, data, int(smoothing+1))\n\n%matplotlib inline\noutput = widgets.interactive(whandler,\n device=device_w_list, day=days_w_list, \n channel=selectedc_w_list, \n timerange=timerange_slider, \n offset=offset_slider,\n smoothing=smoothing_slider).children[-1]\ndisplay(wlist)\ndisplay(output)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4af4b662824c15ce0faf72a6e75dc83a1cda70b9
16,701
ipynb
Jupyter Notebook
notebooks/zaverecny_test.ipynb
cedeerwe/slobodna-akademia
10ea3ac5935da19419af5600934cc5df8b45a4f6
[ "MIT" ]
3
2020-04-11T18:53:55.000Z
2020-04-20T13:48:19.000Z
notebooks/zaverecny_test.ipynb
cedeerwe/slobodna-akademia
10ea3ac5935da19419af5600934cc5df8b45a4f6
[ "MIT" ]
7
2019-07-21T17:54:16.000Z
2020-02-24T20:37:25.000Z
notebooks/zaverecny_test.ipynb
cedeerwe/brutalna-akademia
10ea3ac5935da19419af5600934cc5df8b45a4f6
[ "MIT" ]
null
null
null
31.100559
285
0.455063
[ [ [ "<a href=\"https://colab.research.google.com/github/cedeerwe/brutalna-akademia/blob/master/notebooks/zaverecny_test.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Inštrukcie\n\nTest pozostáva zo 7 príkladov, dokopy za 50 bodov. Na test máš 3 hodiny času, ktoré si musíš odsledovať sám/sama. Časovač si spusti vtedy, keď si začneš čítať zadanie prvej úlohy.\n\nKaždá úloha má v názve uvedený počet bodov, ktoré môžeš získať za kompletné riešenie. Čiastočné riešenia budú tiež bodované. Ak si to úloha vyžaduje, k riešeniu patrí aj vysvetlenie, prečo je vaše riešenie riešením.\n\nÚlohy riešte priamo v kópii tohto colabu. Po dokončení nám svoj colab pošlite mailom. O teste prosím žiadným spôsobom nekomunikovať s ostatnými, kým k tomu nebudete vyzvaní (všetci ho odovzdajú).\n\nDržíme palce!", "_____no_output_____" ], [ "# Úlohy", "_____no_output_____" ], [ "## Šípky (6 bodov)\n", "_____no_output_____" ], [ "### Zadanie\n\nIdete sa zúčastniť súťaže v hádzaní šípok ([referencia](https://en.wikipedia.org/wiki/Darts#/media/File:Dartboard_diagram.svg)). Zlepšiť sa už síce nestihnete, ale môžte sa aspoň zamyslieť nad svojou stratégiou, prípadne si hodiť pár skúšobných hodov.\n\nKam by ste mali mieriť so svojími schopnosťami, aby ste maximalizovali svoj bodový zisk?", "_____no_output_____" ], [ "### Riešenie", "_____no_output_____" ], [ "## Poker (5 bodov)\n", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "Prihlásili ste sa do súťaže v hraní matematického pokru. Pravidlá sú nasledovné:\n1. hru hrajú dvaja hráči,\n1. obaja hráči na začiatku do hry vložia 1€\n1. každý hráč si vytiahne rovnomerne náhodné číslo od 0 po 1, predstavujúce silu jeho kariet, \n1. náhodne sa určí začínajúci hráč,\n1. začínajúci hráč môže: a) *fold* a prehrať, b) *raise* a zvýšiť hru o 1€, c) *check* a nechať ísť druhého hráča,\n1. druhý hráč môže: a) *fold* a prehrať, b) *call* a dorovnať prvého hráča, ak zvýšil, c) *check* a pokračovať v hre, ak prvý hráč nezvýšil,\n1. ak žiadny z hráčov nezložil, porovnajú sa čísla a víťaz si berie všetky stávky\n\nAká by mala byť vaša optimálna stratégia v tejto hre?\n\nAko by sa zmenila vaša stratégia, keby prvý hráč mohol zvýšiť o ľubovoľnú sumu, nie iba 1€?", "_____no_output_____" ], [ "### Riešenie", "_____no_output_____" ], [ "## Random playlist (10 bodov)", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "Vlastníte službu na streamovanie hudby a máte veľa zákazníkov, ktorí majú veľa obľubených pesničiek. Vyžiadali si od vás funkcionalitu, aby si mohli púšťať svoje pesničky ako \"random shuffle\", teda v náhodnom poradí.\n\nKaždá pesnička má vo vašom katalógu niekoľko vlastností, konkrétne:\n- interpret - reprezentovaný ako \"a\", \"b\", \"c\", ...\n- žáner - reprezentovaný ako \"A\", \"B\", \"C\", ...\n- číslo pesničky - reprezentované ako 1, 2, 3, ...\n\nToto celé je reprezentované ako trojica:", "_____no_output_____" ] ], [ [ "priklad = (\"a\", \"F\", 9)", "_____no_output_____" ] ], [ [ "Dostali ste zoznam 100 obľúbených pesničiek istého uživateľa. Vygenerujte z nej postupnosť 10,000 pesničiek vyskladaných z týchto 100 pesničiek v takom poradí, ako by ste mu ich pustili za sebou, keby mal svoj \"random shuffle\" pustený fakt dlho. \n\nOhodnotení budete na základe spokojnosti zákazníka po vypočutí všetkých 10,000 pesničiek. Zákazník očakáva od \"random shuffle\", že keď si ho pustí, tak pesničky budú chodiť rozumne náhodne a nebude počúvať za sebou príliš veľa podobných.\n", "_____no_output_____" ] ], [ [ "zoznam_pesniciek = [\n ('f', 'D', 0),\n ('j', 'C', 1),\n ('h', 'B', 2),\n ('e', 'D', 3),\n ('c', 'A', 4),\n ('a', 'C', 5),\n ('j', 'B', 6),\n ('i', 'D', 7),\n ('a', 'C', 8),\n ('d', 'B', 9),\n ('i', 'C', 10),\n ('i', 'D', 11),\n ('g', 'D', 12),\n ('f', 'B', 13),\n ('b', 'C', 14),\n ('b', 'D', 15),\n ('g', 'A', 16),\n ('c', 'A', 17),\n ('j', 'C', 18),\n ('h', 'A', 19),\n ('f', 'B', 20),\n ('e', 'C', 21),\n ('c', 'E', 22),\n ('i', 'B', 23),\n ('b', 'A', 24),\n ('g', 'D', 25),\n ('b', 'D', 26),\n ('b', 'A', 27),\n ('i', 'C', 28),\n ('g', 'E', 29),\n ('c', 'C', 30),\n ('a', 'D', 31),\n ('g', 'B', 32),\n ('d', 'B', 33),\n ('g', 'B', 34),\n ('f', 'A', 35),\n ('g', 'C', 36),\n ('a', 'B', 37),\n ('f', 'D', 38),\n ('i', 'A', 39),\n ('g', 'C', 40),\n ('d', 'D', 41),\n ('d', 'A', 42),\n ('e', 'A', 43),\n ('g', 'E', 44),\n ('d', 'D', 45),\n ('b', 'A', 46),\n ('e', 'E', 47),\n ('f', 'B', 48),\n ('i', 'A', 49),\n ('e', 'D', 50),\n ('c', 'A', 51),\n ('i', 'E', 52),\n ('j', 'E', 53),\n ('d', 'A', 54),\n ('d', 'C', 55),\n ('e', 'C', 56),\n ('a', 'C', 57),\n ('h', 'C', 58),\n ('i', 'E', 59),\n ('h', 'B', 60),\n ('e', 'C', 61),\n ('a', 'A', 62),\n ('f', 'A', 63),\n ('d', 'A', 64),\n ('f', 'D', 65),\n ('d', 'A', 66),\n ('a', 'E', 67),\n ('e', 'E', 68),\n ('d', 'E', 69),\n ('b', 'B', 70),\n ('i', 'A', 71),\n ('j', 'D', 72),\n ('h', 'B', 73),\n ('c', 'E', 74),\n ('i', 'D', 75),\n ('j', 'B', 76),\n ('e', 'C', 77),\n ('e', 'B', 78),\n ('g', 'A', 79),\n ('d', 'E', 80),\n ('i', 'E', 81),\n ('b', 'A', 82),\n ('d', 'E', 83),\n ('b', 'C', 84),\n ('c', 'B', 85),\n ('j', 'D', 86),\n ('a', 'E', 87),\n ('h', 'E', 88),\n ('i', 'C', 89),\n ('c', 'A', 90),\n ('i', 'C', 91),\n ('e', 'D', 92),\n ('a', 'E', 93),\n ('g', 'A', 94),\n ('b', 'B', 95),\n ('h', 'D', 96),\n ('a', 'A', 97),\n ('d', 'E', 98),\n ('i', 'B', 99)\n]", "_____no_output_____" ] ], [ [ "### Riešenie", "_____no_output_____" ], [ "## Štvorsten (6 bodov)", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "Idete sa zúčastniť skutočného turnaja v navrhovaní hracích štvorstenov, ktorého sa zúčastnia všetci účastníci, ktorí odovzdajú riešenie tejto úlohy. Počet bodov za túto úlohu bude záležať od vášho skutočného umiestnenia.\n\nHracie štvorsteny sa od hracích kociek tým, že majú iba 4 steny a počet bodiek na každej stene je v súčte iba 6. Navyše môžu byť tieto bodky ľubovoľne rozdelené po všetkých stenách. \n\nHracie štvorsteny sa porovnávajú tak, že spočítame kto má väčšiu šancu hodiť vyššie číslo - ten hráč vyhrá a získa 2 body. V prípade rovnosti je remíza za 1 bod pre každého. \n\nAko príklad, ak by sme porovnávali štvorsten [6, 0, 0, 0] a štvorsten [3, 2, 1, 0], prvý štvorsten vyhrá so šancou $\\tfrac{1}{4}$, druhý štvorsten vyhrá so šancou $\\tfrac{9}{16}$ a vo zvyšných $\\tfrac{3}{16}$ je remíza. Druhý hráč by teda získal 2 body a prvý 0 bodov. \n\nTurnaj má dve rôzne disciplíny:\n1. Navrhnite jeden štvorsten, ktorý pôjde do turnaja a bude sa porovnávať s ostatnými. *Príklad riešenia: [3, 2, 1, 0].*\n2. Navrhnite pravdepodobnostné rozdelenie cez všetky štvorsteny, aby ste v priemere získali čo najviac bodov pri porovnaní s pravdepodobnostnými rozdeleniami ostatných hráčov. *Príklad riešenia: 50% [3,2,1,0], 30% [3,3,0,0], 20% [6, 0, 0, 0].*", "_____no_output_____" ], [ "### Riešenie", "_____no_output_____" ], [ "## Internet (5 bodov)", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "Dostali ste otázku \"Ako funguje internet?\" od istej osoby. Vašou úlohou je to tejto osobe vysvetliť jednou vetou a jedným odkazom na stránku, z ktorej pochopí viac, ak by ste chcela.\n\nSpomínaná osoba je jednou z nasledujúcich možností:\n1. prváčik v škole\n1. váš rovesník z akadémie\n1. učiteľ informatiky na univerzite\n1. Bill Gates\n1. babička na ulici\n\nZodpovedajte túto úlohu pre **každú** z vyššie uvedených možností.", "_____no_output_____" ], [ "### Riešenie", "_____no_output_____" ], [ "## Rosnička (9 bodov)\n", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "V tejto úlohe máte za cieľ predpovedať isté hodnoty, pre konkrétny deň - 1. mája. Pre každú z týchto hodnôt si prosíme \n- bodový odhad,\n- 80% interval spoľahlivosti.\n\nHodnoty, ktoré nás zaujímajú:\n- počet vykonaných testov na koronavírus na Slovensku\n- počet Facebookových statusov od premiéra Igora Matoviča https://www.facebook.com/igor.matovic.7\n- maximálna denná teplota v obci Bukovina podľa [SHMÚ](http://www.shmu.sk/sk/?page=1)\n- minimálna cena za barel ropy podľa https://markets.businessinsider.com/commodities/oil-price?type=wti\n- počet návštev na stránke https://en.wikipedia.org/wiki/Education\n- počet 250g balení masla v mrazničke v domácnosti vášho lektora - Dominika Csibu - rátajú sa iba tie s obsahom tuku 82%", "_____no_output_____" ], [ "### Riešenie\n\n", "_____no_output_____" ], [ "## Ale fakt? (9 bodov)\n\n", "_____no_output_____" ], [ "### Zadanie", "_____no_output_____" ], [ "Zodpovedajte na nasledujúce otázky:\n1. najviac koľkokrát za deň by sme mohli osláviť nový rok?\n1. ktorý ostrov nazval Christopher Columbus *holy glory*?\n1. ako najrýchlejšie zvládol človek postrkať nosom pomaranč, aby ním prešiel jeden kilometer?\n1. aká je najdlhšia kosť v tele autora knihy *Life without limits*?\n1. ktorý kuchynský projekt na kickstarteri mal svoj cieľ prekonaný viac ako 5000x násobne?\n", "_____no_output_____" ], [ "### Riešenie", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4af4b958d5c1b756e5b94838129e8c0405e69809
11,981
ipynb
Jupyter Notebook
notebooks/Tutorial_04_DataTypesStructures.ipynb
python4oceanography/ocean_python_tutorial
7215b3c740ab8fb4b177a8ae764411c273cc7a06
[ "Apache-2.0" ]
12
2019-09-01T19:00:25.000Z
2021-12-01T00:45:49.000Z
notebooks/Tutorial_04_DataTypesStructures.ipynb
ognancy4life/ocean_python_tutorial
7215b3c740ab8fb4b177a8ae764411c273cc7a06
[ "Apache-2.0" ]
1
2021-07-20T02:02:58.000Z
2021-07-20T02:02:58.000Z
notebooks/Tutorial_04_DataTypesStructures.ipynb
ognancy4life/ocean_python_tutorial
7215b3c740ab8fb4b177a8ae764411c273cc7a06
[ "Apache-2.0" ]
6
2019-09-14T19:10:52.000Z
2021-06-25T20:08:47.000Z
25.276371
224
0.526667
[ [ [ "# Data types & Structures", "_____no_output_____" ], [ "### A great advatage of `Python` is the type of data it can handle & combine\nPython has been widely used to handle internet related operations, which means lots and lots of text and numbers. combined!", "_____no_output_____" ], [ "***", "_____no_output_____" ], [ "## Let's start with the basic types!\n### Like other programing languages, `Python` data types include integers, floats, complex, and strings & boolean", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>In the next cell, assign a float value to <b>x</b> and execute the cell\n</div>", "_____no_output_____" ] ], [ [ "lat = 20\nprint(lat)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>Assign an integer value to <b>y</b> and execute the cell\n</div>", "_____no_output_____" ] ], [ [ "y = \nprint(y, lat*y)", "_____no_output_____" ] ], [ [ "### Complex values are identified by a `j` at the end\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>Assign a complex value to <b>z</b> and execute the cell\n</div>", "_____no_output_____" ] ], [ [ "z = \nprint(z, type(z))", "_____no_output_____" ] ], [ [ "### Variables can be reassigned to other types anytime\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>Execute the next cell\n</div>", "_____no_output_____" ] ], [ [ "lat = 'Latitude' \nprint(lat)\nstep = True\nprint(step)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>Define your own string and boolean variables and print their type in the next cell\n</div>", "_____no_output_____" ], [ "## One of the best types: datetime!\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Execute the code in the next cell to define two datetime variables: <b>today1</b> & <b>today2</b>\n</div>", "_____no_output_____" ] ], [ [ "from datetime import date # call date function inside the datetime package\ntoday1 = date.today()\nfrom datetime import datetime # call datetime function inside the datetime package\ntoday2 = datetime.now()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br> - Now print both variables\n <br>\n - Try printing <b>today1.month</b>\n <br>\n - Try printing the following <b>today2.strftime('%c')</b>\n <br>\n - Now print the type of one of your date variables\n</div> ", "_____no_output_____" ], [ "<br>Note the use of <b>.</b> after a variable. This refers to a method of the variable or, as Python refers to it, the object.", "_____no_output_____" ], [ "We will use other functions of the datetime package later, and you could find more details about the attributes of the datetime object (variable): https://www.guru99.com/date-time-and-datetime-classes-in-python.html\n***", "_____no_output_____" ], [ "## Python has a some basic data collections, we will talk about three of them:\n List - ordered, changeable, allows duplicates\n Tuple - ordered, unchangeable, allows duplicates\n Dictionary - unordered, changeable, no duplicates allowed\n***", "_____no_output_____" ], [ "## Lists: ordered, changeable, allows dupplicates\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Execute the code below and print the list\n <br>\n - Print the type of <b>mylist</b>\n</div>", "_____no_output_____" ] ], [ [ "mylist=['temperature', 'wind', 'salinity'] # note the use of [ ]", "_____no_output_____" ] ], [ [ "### To access an element of a list we use the indices, that start at `0`\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Try printing: <b>mylist[0]</b>\n <br>\n - Now try reassigning the value of the second value to <b>'current velocity'</b>\n</div>", "_____no_output_____" ], [ "### To add an element to the list use the method append\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Try <b>mylist.append('wind speed')</b>\n <br>\n - Then execute the next cell to print the entire list with a for loop\n</div>", "_____no_output_____" ] ], [ [ "for myvar in mylist:\n print(myvar+\" has been recorded\")", "_____no_output_____" ] ], [ [ "### Copying a list (or another object) needs to be done explicitely, other wise is just a new name for your variable\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Try these two codes:\n <br>\n <b> yourlist1 = mylist.copy()</b>\n <br>\n <b> yourlist2 = mylist</b>\n <br>\n - Then modify <b>yourlist1</b> and print it along with <b>mylist</b>\n <br>\n - Now modify <b>yourlist2</b> and print it along with <b>mylist</b>\n</div>", "_____no_output_____" ], [ "***\n## Tuples: ordered, unchangeable, allows duplicates\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Execute the code below and print the tuple\n <br>\n - Print the type of <b>mytuple</b>\n <br>\n - Print one element of <b>mytuple</b>\n</div>", "_____no_output_____" ] ], [ [ "mytuple = ('latitude', 'longitude', 'time') # note the use of ( )", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br> - Now try reassigning an element of <b>mytuple</b>\n</div>", "_____no_output_____" ], [ "*** \n## Dictionaries: unordered, changeable, no duplicates allowed\n### Indexed pair of keys and values\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>- Execute the code below, and print the dictionary\n <br>\n - Add a new element to <b>mydict</b> with <b>mydict['units']='C'</b>\n <br>\n - Print one element of <b>mydict</b>. <i>Hint: the key is the key</i> \n</div>", "_____no_output_____" ] ], [ [ "mydict = {'instrument': 'temperature sensor', 'measurement':'SST','depth': 5}", "_____no_output_____" ] ], [ [ "Certain libraries have specific data structures - arrays, data frames, and datasets\nexamples of each, but we will go back talking about each library.", "_____no_output_____" ], [ "***\n***\n# Few words about `Objects`, `Attributes` & `Methods`\n## `Python` is an object oriented programming language. This means almost everything is an object or instances of a class. Variables are objects. And therefore they have `attributes` & `methods`\n\n### `Properties` or `Attributes` are accessed with `.attribute` after the object\n### `Methods` are functions, & are accessed with `.method(arguments)` after the object\nWe're not going to teach you how to create classes with properties or methods, but how to access them, because we will use them extensively\n<div class=\"alert alert-block alert-info\">\n <b>Try it out!</b>\n <br><br>Execute the code in the next cell to access the attributes and one method of the class <b>date</b>\n </div>", "_____no_output_____" ] ], [ [ "today = date.today()\nprint(today)\n## Date object attributes\nprint(today.year, today.month, today.day) \n## Date object method 'ctime' - do not need arguments\nprint(today.ctime())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4af4d049bcc8d797ec5b525dbc1d0f25bbfeb279
199,134
ipynb
Jupyter Notebook
benchmarks/en-nya/jw-300-baseline/en_nya_starter_notebook.ipynb
JenaleaR/masakhane-mt
a524d267e95d7c42785cecb3249d704398fd272f
[ "MIT" ]
135
2020-05-04T18:27:22.000Z
2022-03-14T07:58:33.000Z
benchmarks/en-nya/jw-300-baseline/en_nya_starter_notebook.ipynb
JenaleaR/masakhane-mt
a524d267e95d7c42785cecb3249d704398fd272f
[ "MIT" ]
67
2020-05-03T19:14:38.000Z
2022-02-01T19:14:22.000Z
benchmarks/en-nya/jw-300-baseline/en_nya_starter_notebook.ipynb
JenaleaR/masakhane-mt
a524d267e95d7c42785cecb3249d704398fd272f
[ "MIT" ]
120
2020-05-02T20:10:34.000Z
2022-03-18T03:05:54.000Z
80.784584
12,103
0.601675
[ [ [ "# Masakhane - Machine Translation for African Languages (Using JoeyNMT)", "_____no_output_____" ], [ "## Note before beginning:\n### - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. \n\n### - The tl;dr: Go to the **\"TODO\"** comments which will tell you what to update to get up and running\n\n### - If you actually want to have a clue what you're doing, read the text and peek at the links\n\n### - With 100 epochs, it should take around 7 hours to run in Google Colab\n\n### - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected]\n\n### - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)", "_____no_output_____" ], [ "## Retrieve your data & make a parallel corpus\n\nIf you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details.\n\nOnce you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe. ", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "# TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here:\n# These will also become the suffix's of all vocab and corpus files used throughout\nimport os\nsource_language = \"en\"\ntarget_language = \"nya\" \nlc = False # If True, lowercase the data.\nseed = 42 # Random seed for shuffling.\ntag = \"baseline\" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted\n\nos.environ[\"src\"] = source_language # Sets them in bash as well, since we often use bash scripts\nos.environ[\"tgt\"] = target_language\nos.environ[\"tag\"] = tag\n\n# This will save it to a folder in our gdrive instead! \n!mkdir -p \"/content/drive/My Drive/masakhane/$src-$tgt-$tag\"\ng_drive_path = \"/content/drive/My Drive/masakhane/%s-%s-%s\" % (source_language, target_language, tag)\nos.environ[\"gdrive_path\"] = g_drive_path\nmodels_path = '%s/models/%s%s_transformer'% (g_drive_path, source_language, target_language)\n# model temporary directory for training\nmodel_temp_dir = \"/content/drive/My Drive/masakhane/model-temp\"\n# model permanent storage on the drive\n!mkdir -p \"$gdrive_path/models/${src}${tgt}_transformer/\"", "_____no_output_____" ], [ "!echo $gdrive_path", "/content/drive/My Drive/masakhane/en-nya-baseline\n" ], [ "#TODO: Skip for retrain\n# Install opus-tools\n! pip install opustools-pkg ", "Collecting opustools-pkg\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/6c/9f/e829a0cceccc603450cd18e1ff80807b6237a88d9a8df2c0bb320796e900/opustools_pkg-0.0.52-py3-none-any.whl (80kB)\n\r\u001b[K |████ | 10kB 29.9MB/s eta 0:00:01\r\u001b[K |████████ | 20kB 6.3MB/s eta 0:00:01\r\u001b[K |████████████▏ | 30kB 7.6MB/s eta 0:00:01\r\u001b[K |████████████████▏ | 40kB 8.1MB/s eta 0:00:01\r\u001b[K |████████████████████▎ | 51kB 7.3MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 61kB 8.3MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 71kB 8.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 81kB 5.6MB/s \n\u001b[?25hInstalling collected packages: opustools-pkg\nSuccessfully installed opustools-pkg-0.0.52\n" ], [ "#TODO: Skip for retrain\n# Downloading our corpus\n! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q\n\n# extract the corpus file\n! gunzip JW300_latest_xml_$src-$tgt.xml.gz", "\nAlignment file /proj/nlpl/data/OPUS/JW300/latest/xml/en-nya.xml.gz not found. The following files are available for downloading:\n\n ./JW300_latest_xml_en.zip already exists\n ./JW300_latest_xml_nya.zip already exists\n 572 KB https://object.pouta.csc.fi/OPUS-JW300/v1/xml/en-nya.xml.gz\n\n 572 KB Total size\n./JW300_latest_xml_en-nya.xml.gz ... 100% of 572 KB\ngzip: JW300_latest_xml_en-nya.xml already exists; do you wish to overwrite (y or n)? n\n\tnot overwritten\n" ], [ "# extract the corpus file\n! gunzip JW300_latest_xml_$tgt-$src.xml.gz", "gzip: JW300_latest_xml_nya-en.xml.gz: No such file or directory\n" ], [ "#TODO: Skip for retrain\n# Download the global test set.\n! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en\n \n# And the specific test set for this language pair.\nos.environ[\"trg\"] = target_language \nos.environ[\"src\"] = source_language \n\n! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en \n! mv test.en-$trg.en test.en\n! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg \n! mv test.en-$trg.$trg test.$trg", "--2020-07-12 20:08:28-- https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 277791 (271K) [text/plain]\nSaving to: ‘test.en-any.en.1’\n\n\rtest.en-any.en.1 0%[ ] 0 --.-KB/s \rtest.en-any.en.1 100%[===================>] 271.28K --.-KB/s in 0.02s \n\n2020-07-12 20:08:28 (11.8 MB/s) - ‘test.en-any.en.1’ saved [277791/277791]\n\n--2020-07-12 20:08:30-- https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-nya.en\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 203330 (199K) [text/plain]\nSaving to: ‘test.en-nya.en’\n\ntest.en-nya.en 100%[===================>] 198.56K --.-KB/s in 0.01s \n\n2020-07-12 20:08:30 (13.0 MB/s) - ‘test.en-nya.en’ saved [203330/203330]\n\n--2020-07-12 20:08:32-- https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-nya.nya\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 226404 (221K) [text/plain]\nSaving to: ‘test.en-nya.nya’\n\ntest.en-nya.nya 100%[===================>] 221.10K --.-KB/s in 0.02s \n\n2020-07-12 20:08:33 (11.1 MB/s) - ‘test.en-nya.nya’ saved [226404/226404]\n\n" ], [ "#TODO: Skip for retrain\n# Read the test data to filter from train and dev splits.\n# Store english portion in set for quick filtering checks.\nen_test_sents = set()\nfilter_test_sents = \"test.en-any.en\"\nj = 0\nwith open(filter_test_sents) as f:\n for line in f:\n en_test_sents.add(line.strip())\n j += 1\nprint('Loaded {} global test sentences to filter from the training/dev data.'.format(j))", "Loaded 3571 global test sentences to filter from the training/dev data.\n" ], [ "#TODO: Skip for retrain\nimport pandas as pd\n\n# TMX file to dataframe\nsource_file = 'jw300.' + source_language\ntarget_file = 'jw300.' + target_language\n\nsource = []\ntarget = []\nskip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion.\nwith open(source_file) as f:\n for i, line in enumerate(f):\n # Skip sentences that are contained in the test set.\n if line.strip() not in en_test_sents:\n source.append(line.strip())\n else:\n skip_lines.append(i) \nwith open(target_file) as f:\n for j, line in enumerate(f):\n # Only add to corpus if corresponding source was not skipped.\n if j not in skip_lines:\n target.append(line.strip())\n \nprint('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i))\n \ndf = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence'])\n# if you get TypeError: data argument can't be an iterator is because of your zip version run this below\n#df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence'])\ndf.head(10)", "Loaded data and skipped 4429/60566 lines since contained in test set.\n" ] ], [ [ "## Pre-processing and export\n\nIt is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned.\n\nIn addition we will split our data into dev/test/train and export to the filesystem.", "_____no_output_____" ] ], [ [ "#TODO: Skip for retrain\n# drop duplicate translations\ndf_pp = df.drop_duplicates()\n\n# drop conflicting translations\n# (this is optional and something that you might want to comment out \n# depending on the size of your corpus)\ndf_pp.drop_duplicates(subset='source_sentence', inplace=True)\ndf_pp.drop_duplicates(subset='target_sentence', inplace=True)\n\n# Shuffle the data to remove bias in dev set selection.\ndf_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True)", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:8: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:9: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n if __name__ == '__main__':\n" ], [ "#TODO: Skip for retrain\n# Install fuzzy wuzzy to remove \"almost duplicate\" sentences in the\n# test and training sets.\n! pip install fuzzywuzzy\n! pip install python-Levenshtein\nimport time\nfrom fuzzywuzzy import process\nimport numpy as np\n\n# reset the index of the training set after previous filtering\ndf_pp.reset_index(drop=False, inplace=True)\n\n# Remove samples from the training data set if they \"almost overlap\" with the\n# samples in the test set.\n\n# Filtering function. Adjust pad to narrow down the candidate matches to\n# within a certain length of characters of the given sample.\ndef fuzzfilter(sample, candidates, pad):\n candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad] \n if len(candidates) > 0:\n return process.extractOne(sample, candidates)[1]\n else:\n return np.nan\n\n# NOTE - This might run slow depending on the size of your training set. We are\n# printing some information to help you track how long it would take. \nscores = []\nstart_time = time.time()\nfor idx, row in df_pp.iterrows():\n scores.append(fuzzfilter(row['source_sentence'], list(en_test_sents), 5))\n if idx % 1000 == 0:\n hours, rem = divmod(time.time() - start_time, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds), \"%0.2f percent complete\" % (100.0*float(idx)/float(len(df_pp))))\n\n# Filter out \"almost overlapping samples\"\ndf_pp['scores'] = scores\ndf_pp = df_pp[df_pp['scores'] < 95]", "Collecting fuzzywuzzy\n Downloading https://files.pythonhosted.org/packages/43/ff/74f23998ad2f93b945c0309f825be92e04e0348e062026998b5eefef4c33/fuzzywuzzy-0.18.0-py2.py3-none-any.whl\nInstalling collected packages: fuzzywuzzy\nSuccessfully installed fuzzywuzzy-0.18.0\nCollecting python-Levenshtein\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/42/a9/d1785c85ebf9b7dfacd08938dd028209c34a0ea3b1bcdb895208bd40a67d/python-Levenshtein-0.12.0.tar.gz (48kB)\n\u001b[K |████████████████████████████████| 51kB 4.1MB/s \n\u001b[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from python-Levenshtein) (47.3.1)\nBuilding wheels for collected packages: python-Levenshtein\n Building wheel for python-Levenshtein (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for python-Levenshtein: filename=python_Levenshtein-0.12.0-cp36-cp36m-linux_x86_64.whl size=144805 sha256=e7a3cd1d3b3a56457aa3239d36cb86516c41ead306b3503e9238370e3113acb6\n Stored in directory: /root/.cache/pip/wheels/de/c2/93/660fd5f7559049268ad2dc6d81c4e39e9e36518766eaf7e342\nSuccessfully built python-Levenshtein\nInstalling collected packages: python-Levenshtein\nSuccessfully installed python-Levenshtein-0.12.0\n00:00:00.02 0.00 percent complete\n00:00:19.79 1.96 percent complete\n00:00:39.92 3.92 percent complete\n00:00:59.16 5.88 percent complete\n00:01:18.63 7.84 percent complete\n00:01:38.33 9.79 percent complete\n00:01:57.66 11.75 percent complete\n00:02:17.39 13.71 percent complete\n00:02:36.66 15.67 percent complete\n" ], [ "#TODO: Skip for retrain\n# This section does the split between train/dev for the parallel corpora then saves them as separate files\n# We use 1000 dev test and the given test set.\nimport csv\n\n# Do the split between dev/train and create parallel corpora\nnum_dev_patterns = 1000\n\n# Optional: lower case the corpora - this will make it easier to generalize, but without proper casing.\nif lc: # Julia: making lowercasing optional\n df_pp[\"source_sentence\"] = df_pp[\"source_sentence\"].str.lower()\n df_pp[\"target_sentence\"] = df_pp[\"target_sentence\"].str.lower()\n\n# Julia: test sets are already generated\ndev = df_pp.tail(num_dev_patterns) # Herman: Error in original\nstripped = df_pp.drop(df_pp.tail(num_dev_patterns).index)\n\nwith open(\"train.\"+source_language, \"w\") as src_file, open(\"train.\"+target_language, \"w\") as trg_file:\n for index, row in stripped.iterrows():\n src_file.write(row[\"source_sentence\"]+\"\\n\")\n trg_file.write(row[\"target_sentence\"]+\"\\n\")\n \nwith open(\"dev.\"+source_language, \"w\") as src_file, open(\"dev.\"+target_language, \"w\") as trg_file:\n for index, row in dev.iterrows():\n src_file.write(row[\"source_sentence\"]+\"\\n\")\n trg_file.write(row[\"target_sentence\"]+\"\\n\")\n\n#stripped[[\"source_sentence\"]].to_csv(\"train.\"+source_language, header=False, index=False) # Herman: Added `header=False` everywhere\n#stripped[[\"target_sentence\"]].to_csv(\"train.\"+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks.\n\n#dev[[\"source_sentence\"]].to_csv(\"dev.\"+source_language, header=False, index=False)\n#dev[[\"target_sentence\"]].to_csv(\"dev.\"+target_language, header=False, index=False)\n\n# Doublecheck the format below. There should be no extra quotation marks or weird characters.\n! head train.*\n! head dev.*", "==> train.en <==\nHow Job came to know Jehovah .\nImagine Jacob gently wiping the tears from Joseph’s eyes , comforting him with the same hope that had once comforted Jacob’s grandfather Abraham .\nWhat gathering work is referred to at Matthew 24 : 31 ?\n( b ) Why is that ancient event significant for us ?\nConsider what is involved in making that amazing feat possible .\nNeither are we required to adopt a special posture .\nOr what if you lose your job , and you are having difficulty finding another ?\nThe Bible explains : “ By the trespass of the one man [ Adam ] death ruled as king ” over Adam’s descendants .\nThe next article will address this concern .\nHowever , more is involved than simply telling your children what is right and what is wrong .\n\n==> train.nya <==\nMmene Yobu anadziŵila Yehova .\nYelekezani kuti mukuona Yakobo atate ake a Yosefe akumupukuta misozi mokoma mtima , kenako akumutonthoza pomulimbikitsa kukhala ndi ciyembekezo cimene cinatonthoza agogo ake a Abulahamu .\nNdi nchito yosonkhanitsa iti imene ikuchulidwa pa Mateyu 24 : 31 ?\n( b ) N’cifukwa ciani cocitika cakale cimeneci ndi nkhani yaikulu kwa ife ?\nOnani nchito imene imakhalapo kuti Baibulo lizipezeka m’zinenelo zambili .\nSafunanso kuti popemphela tizitsatila kakhalidwe ka thupi kapadela .\nNanga bwanji ngati nchito imene munali kugwila inatha ndipo simukupezanso ina ?\nBaibulo imati : “ Cifukwa ca ucimo wa munthu mmodziyo [ Adamu ] imfa inalamulila monga mfumu ” kwa mbadwa za Adamu .\nNkhani yotsatila idzafotokoza mmene tingacitile zimenezi .\nKomabe , kuuza ana anu kuti ici n’cabwino ici n’coipa , pakokha si kokwanila .\n==> dev.en <==\nOur professional ballet careers took us around the world to dance\nHe may be plagued with guilt about something he did in the past , even many years ago .\nWhat events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\nSpread the Good News of Undeserved Kindness , July\nNow , do you think that it bothers Satan that he and his invisible cohorts have been relegated to the world of folklore ?\nTo some people , it is nothing less than divine approval of revenge .\nYou could add , “ Actually , that passage says much more . ”\nWhen we first arrived in Pine Bluff , we moved in with the brother who was the congregation servant at the time .\nWithout their loving support , I could never have served where the need is greater . ” Simon\nBut , instead , my employer thanked me for my good work .\n\n==> dev.nya <==\nCifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\nTaona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\nNdi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\nLalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\nKoma kodi muganiza kuti Satana ndi ziwanda zake amakhumudwa ndi zimene anthu amanena zoti iwo ndi ongopeka ?\n( Ekisodo 21 : 24 ) Anthu ena amakhulupilila kuti mau amenewo akuonetsa kuti Mulungu amavomeleza kubwezela .\nMwina mungakambenso kuti , “ Lembali lili ndi zambili . ”\nPamene tinafika ku Pine Bluff , tinayamba kukhala ku nyumba kwa m’bale amene anali mtumiki wa mpingo pa nthawiyo .\nPopanda thandizo lao la cikondi , sindikanakwanitsa kutumikila kumalo osoŵa . ”\nKoma iwo ananiyamikila cifukwa cogwila bwino nchito .\n" ] ], [ [ "\n\n---\n\n\n## Installation of JoeyNMT\n\nJoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io) ", "_____no_output_____" ] ], [ [ "# Install JoeyNMT\n! git clone https://github.com/joeynmt/joeynmt.git\n! cd joeynmt; pip3 install .", "Cloning into 'joeynmt'...\nremote: Enumerating objects: 2467, done.\u001b[K\nremote: Total 2467 (delta 0), reused 0 (delta 0), pack-reused 2467\u001b[K\nReceiving objects: 100% (2467/2467), 2.64 MiB | 4.34 MiB/s, done.\nResolving deltas: 100% (1725/1725), done.\nProcessing /content/joeynmt\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.16.0)\nRequirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (7.0.0)\nRequirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.18.5)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (47.3.1)\nRequirement already satisfied: torch>=1.1 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.5.1+cu101)\nRequirement already satisfied: tensorflow>=1.14 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (2.2.0)\nRequirement already satisfied: torchtext in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.3.1)\nCollecting sacrebleu>=1.3.6\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/66/5b/cf661da8e9b0229f5d98c2961b072a5728fd11a0758957f8c0fd36081c06/sacrebleu-1.4.12-py3-none-any.whl (54kB)\n\u001b[K |████████████████████████████████| 61kB 4.5MB/s \n\u001b[?25hCollecting subword-nmt\n Downloading https://files.pythonhosted.org/packages/74/60/6600a7bc09e7ab38bc53a48a20d8cae49b837f93f5842a41fe513a694912/subword_nmt-0.3.7-py2.py3-none-any.whl\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (3.2.2)\nRequirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.10.1)\nCollecting pyyaml>=5.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz (269kB)\n\u001b[K |████████████████████████████████| 276kB 9.7MB/s \n\u001b[?25hCollecting pylint\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e8/fb/734960c55474c8f74e6ad4c8588fc44073fb9d69e223269d26a3c2435d16/pylint-2.5.3-py3-none-any.whl (324kB)\n\u001b[K |████████████████████████████████| 327kB 19.6MB/s \n\u001b[?25hRequirement already satisfied: six==1.12 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.12.0)\nCollecting wrapt==1.11.1\n Downloading https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz\nRequirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.10.0)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.30.0)\nRequirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.2.0)\nRequirement already satisfied: scipy==1.4.1; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.4.1)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.9.0)\nRequirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.2)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.2.1)\nRequirement already satisfied: tensorflow-estimator<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.2.0)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0)\nRequirement already satisfied: wheel>=0.26; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.34.2)\nRequirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.6.3)\nRequirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.3.3)\nRequirement already satisfied: tensorboard<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.2.2)\nRequirement already satisfied: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.10.0)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (2.23.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (4.41.1)\nCollecting mecab-python3==0.996.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/18/49/b55a839a77189042960bf96490640c44816073f917d489acbc5d79fa5cc3/mecab_python3-0.996.5-cp36-cp36m-manylinux2010_x86_64.whl (17.1MB)\n\u001b[K |████████████████████████████████| 17.1MB 205kB/s \n\u001b[?25hCollecting portalocker\n Downloading https://files.pythonhosted.org/packages/53/84/7b3146ec6378d28abc73ab484f09f47dfa008ad6f03f33d90a369f880e25/portalocker-1.7.0-py2.py3-none-any.whl\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (1.2.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.8.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (0.10.0)\nRequirement already satisfied: pandas>=0.22.0 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (1.0.5)\nCollecting astroid<=2.5,>=2.4.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/24/a8/5133f51967fb21e46ee50831c3f5dda49e976b7f915408d670b1603d41d6/astroid-2.4.2-py3-none-any.whl (213kB)\n\u001b[K |████████████████████████████████| 215kB 57.1MB/s \n\u001b[?25hCollecting toml>=0.7.1\n Downloading https://files.pythonhosted.org/packages/9f/e1/1b40b80f2e1663a6b9f497123c11d7d988c0919abbf3c3f2688e448c5363/toml-0.10.1-py2.py3-none-any.whl\nCollecting isort<5,>=4.2.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)\n\u001b[K |████████████████████████████████| 51kB 8.6MB/s \n\u001b[?25hCollecting mccabe<0.7,>=0.6\n Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.17.2)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.0.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.2.2)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.6.0.post3)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.4.1)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2020.6.20)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.22.0->seaborn->joeynmt==0.0.1) (2018.9)\nCollecting lazy-object-proxy==1.4.*\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/0b/dd/b1e3407e9e6913cf178e506cd0dee818e58694d9a5cd1984e3f6a8b9a10f/lazy_object_proxy-1.4.3-cp36-cp36m-manylinux1_x86_64.whl (55kB)\n\u001b[K |████████████████████████████████| 61kB 10.3MB/s \n\u001b[?25hCollecting typed-ast<1.5,>=1.4.0; implementation_name == \"cpython\" and python_version < \"3.8\"\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/90/ed/5459080d95eb87a02fe860d447197be63b6e2b5e9ff73c2b0a85622994f4/typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl (737kB)\n\u001b[K |████████████████████████████████| 747kB 57.5MB/s \n\u001b[?25hRequirement already satisfied: rsa<5,>=3.1.4; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (4.6)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.2.8)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (4.1.0)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.6.1)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.3.0)\nRequirement already satisfied: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<5,>=3.1.4; python_version >= \"3\"->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.4.8)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.0)\nBuilding wheels for collected packages: joeynmt, pyyaml, wrapt\n Building wheel for joeynmt (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for joeynmt: filename=joeynmt-0.0.1-cp36-none-any.whl size=77293 sha256=3c8fae0f346f99b8904b109c79858a5b46d70c5f4a22b0f98cafbd4f266a220e\n Stored in directory: /tmp/pip-ephem-wheel-cache-tty7q62f/wheels/db/01/db/751cc9f3e7f6faec127c43644ba250a3ea7ad200594aeda70a\n Building wheel for pyyaml (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyyaml: filename=PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl size=44621 sha256=dbad71b916be69235a8fee7be2bbb6b3bd66580496180dc64d349a9d2a140ca7\n Stored in directory: /root/.cache/pip/wheels/a7/c1/ea/cf5bd31012e735dc1dfea3131a2d5eae7978b251083d6247bd\n Building wheel for wrapt (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for wrapt: filename=wrapt-1.11.1-cp36-cp36m-linux_x86_64.whl size=67430 sha256=f843d38d6a7a3933324cc1b0272e730a5b15ac748347d4cf7fc5eb5660fb1139\n Stored in directory: /root/.cache/pip/wheels/89/67/41/63cbf0f6ac0a6156588b9587be4db5565f8c6d8ccef98202fc\nSuccessfully built joeynmt pyyaml wrapt\nInstalling collected packages: mecab-python3, portalocker, sacrebleu, subword-nmt, pyyaml, lazy-object-proxy, wrapt, typed-ast, astroid, toml, isort, mccabe, pylint, joeynmt\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\n Found existing installation: wrapt 1.12.1\n Uninstalling wrapt-1.12.1:\n Successfully uninstalled wrapt-1.12.1\nSuccessfully installed astroid-2.4.2 isort-4.3.21 joeynmt-0.0.1 lazy-object-proxy-1.4.3 mccabe-0.6.1 mecab-python3-0.996.5 portalocker-1.7.0 pylint-2.5.3 pyyaml-5.3.1 sacrebleu-1.4.12 subword-nmt-0.3.7 toml-0.10.1 typed-ast-1.4.1 wrapt-1.11.1\n" ] ], [ [ "# Preprocessing the Data into Subword BPE Tokens\n\n- One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909).\n\n- It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)\n\n- Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable. ", "_____no_output_____" ] ], [ [ "#TODO: Skip for retrain\n# One of the huge boosts in NMT performance was to use a different method of tokenizing. \n# Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance\n\n# Do subword NMT\nfrom os import path\nos.environ[\"src\"] = source_language # Sets them in bash as well, since we often use bash scripts\nos.environ[\"tgt\"] = target_language\n\n# Learn BPEs on the training data.\nos.environ[\"data_path\"] = path.join(\"joeynmt\", \"data\", source_language + target_language) # Herman! \n! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt\n\n# Apply BPE splits to the development and test data.\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt\n\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src\n! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt\n\n# Create directory, move everyone we care about to the correct location\n! mkdir -p $data_path\n! cp train.* $data_path\n! cp test.* $data_path\n! cp dev.* $data_path\n! cp bpe.codes.4000 $data_path\n! ls $data_path\n\n# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path\n! cp train.* \"$gdrive_path\"\n! cp test.* \"$gdrive_path\"\n! cp dev.* \"$gdrive_path\"\n! cp bpe.codes.4000 \"$gdrive_path\"\n! ls \"$gdrive_path\"\n\n# Create that vocab using build_vocab\n! sudo chmod 777 joeynmt/scripts/build_vocab.py\n! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path \"$gdrive_path/vocab.txt\"\n\n# Some output\n! echo \"BPE Nyanja Sentences\"\n! tail -n 5 test.bpe.$tgt\n! echo \"Combined BPE Vocab\"\n! tail -n 10 \"$gdrive_path/vocab.txt\" # Herman", "bpe.codes.4000\tdev.en\t test.bpe.nya test.en-any.en.1 train.bpe.nya\ndev.bpe.en\tdev.nya test.en\t test.nya\t train.en\ndev.bpe.nya\ttest.bpe.en test.en-any.en train.bpe.en train.nya\nbpe.codes.4000\tdev.nya test.en\t\ttrain.bpe.en\ndev.bpe.en\tmodels\t test.en-any.en\ttrain.bpe.nya\ndev.bpe.nya\ttest.bpe.en test.en-any.en.1\ttrain.en\ndev.en\t\ttest.bpe.nya test.nya\t\ttrain.nya\nBPE Xhosa Sentences\nIzi zin@@ acititsa kuti nd@@ iz@@ idz@@ iŵika monga munthu wos@@ aona mtima .\nN’@@ t@@ aphunzila coonadi , ndin@@ al@@ eka n@@ chit@@ oyo ngakhale kuti n’nali kulandila ndalama zambili .\nN@@ apeleka citsanzo cabwino kwa ana anga aŵili a@@ amuna , ndipo tsopano n’n@@ a@@ ikidwa pau@@ d@@ indo mumpingo .\nCifukwa co@@ khala wo@@ ona mtima , n@@ ili ndi mbili yabwino kwa anthu amene amat@@ enga mis@@ onkh@@ o ndi ena amene nim@@ acita nawo b@@ iz@@ in@@ esi . ”\nKum@@ en@@ eko anayamba kulambila Mulungu wo@@ ona .\nCombined BPE Vocab\nʺ\nrighte@@\nambuyo\namvu\ndif@@\nSONG@@\ncogn@@\nʼ@@\nÓ@@\nYosef@@\n" ] ], [ [ "# Creating the JoeyNMT Config\n\nJoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with!\n\n- We used Transformer architecture \n- We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021))\n\nThings worth playing with:\n- The batch size (also recommended to change for low-resourced languages)\n- The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes)\n- The decoder options (beam_size, alpha)\n- Evaluation metrics (BLEU versus Crhf4)", "_____no_output_____" ] ], [ [ "def get_last_checkpoint(directory):\n last_checkpoint = ''\n try:\n for filename in os.listdir(directory):\n if 'best' in filename and filename.endswith(\".ckpt\"):\n return filename\n if not 'best' in filename and filename.endswith(\".ckpt\"):\n if not last_checkpoint or int(filename.split('.')[0]) > int(last_checkpoint.split('.')[0]):\n last_checkpoint = filename\n except FileNotFoundError as e:\n print('Error Occur ', e)\n return last_checkpoint", "_____no_output_____" ], [ "# Copy the created models from the temporary storage to main storage on google drive for persistant storage \n# the content of te folder will be overwrite when you start trainin\n!cp -r \"/content/drive/My Drive/masakhane/model-temp/\"* \"$gdrive_path/models/${src}${tgt}_transformer/\"\nlast_checkpoint = get_last_checkpoint(models_path)\nprint('Last checkpoint :',last_checkpoint)", "cp: cannot stat '/content/drive/My Drive/masakhane/model-temp/*': No such file or directory\nLast checkpoint : \n" ], [ "# This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update\n# (You can of course play with all the parameters if you'd like!)\n\nname = '%s%s' % (source_language, target_language)\ngdrive_path = os.environ[\"gdrive_path\"]\n\n# Create the config\nconfig = \"\"\"\nname: \"{name}_transformer\"\n\ndata:\n src: \"{source_language}\"\n trg: \"{target_language}\"\n train: \"{gdrive_path}/train.bpe\"\n dev: \"{gdrive_path}/dev.bpe\"\n test: \"{gdrive_path}/test.bpe\"\n level: \"bpe\"\n lowercase: False\n max_sent_length: 100\n src_vocab: \"{gdrive_path}/vocab.txt\"\n trg_vocab: \"{gdrive_path}/vocab.txt\"\n\ntesting:\n beam_size: 5\n alpha: 1.0\n\ntraining:\n #load_model: \"{gdrive_path}/models/{name}_transformer/{last_checkpoint}\" # TODO: uncommented to load a pre-trained model from last checkpoint\n random_seed: 42\n optimizer: \"adam\"\n normalization: \"tokens\"\n adam_betas: [0.9, 0.999] \n scheduling: \"plateau\" # TODO: try switching from plateau to Noam scheduling\n patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds.\n learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer)\n learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer)\n decrease_factor: 0.7\n loss: \"crossentropy\"\n learning_rate: 0.0003\n learning_rate_min: 0.00000001\n weight_decay: 0.0\n label_smoothing: 0.1\n batch_size: 4096\n batch_type: \"token\"\n eval_batch_size: 3600\n eval_batch_type: \"token\"\n batch_multiplier: 1\n early_stopping_metric: \"ppl\"\n epochs: 50 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all\n validation_freq: 1000 # TODO: Set to at least once per epoch.\n logging_freq: 100\n eval_metric: \"bleu\"\n model_dir: \"{model_temp_dir}\"\n overwrite: True # TODO: Set to True if you want to overwrite possibly existing models. \n shuffle: True\n use_cuda: True\n max_output_length: 100\n print_valid_sents: [0, 1, 2, 3]\n keep_last_ckpts: 3\n\nmodel:\n initializer: \"xavier\"\n bias_initializer: \"zeros\"\n init_gain: 1.0\n embed_initializer: \"xavier\"\n embed_init_gain: 1.0\n tied_embeddings: True\n tied_softmax: True\n encoder:\n type: \"transformer\"\n num_layers: 6\n num_heads: 4 # TODO: Increase to 8 for larger data.\n embeddings:\n embedding_dim: 256 # TODO: Increase to 512 for larger data.\n scale: True\n dropout: 0.2\n # typically ff_size = 4 x hidden_size\n hidden_size: 256 # TODO: Increase to 512 for larger data.\n ff_size: 1024 # TODO: Increase to 2048 for larger data.\n dropout: 0.3\n decoder:\n type: \"transformer\"\n num_layers: 6\n num_heads: 4 # TODO: Increase to 8 for larger data.\n embeddings:\n embedding_dim: 256 # TODO: Increase to 512 for larger data.\n scale: True\n dropout: 0.2\n # typically ff_size = 4 x hidden_size\n hidden_size: 256 # TODO: Increase to 512 for larger data.\n ff_size: 1024 # TODO: Increase to 2048 for larger data.\n dropout: 0.3\n\"\"\".format(name=name, gdrive_path=os.environ[\"gdrive_path\"], source_language=source_language, target_language=target_language, model_temp_dir=model_temp_dir, last_checkpoint=last_checkpoint)\nwith open(\"joeynmt/configs/transformer_{name}.yaml\".format(name=name),'w') as f:\n f.write(config)", "_____no_output_____" ] ], [ [ "# Train the Model\n\nThis single line of joeynmt runs the training using the config we made above", "_____no_output_____" ] ], [ [ "# Train the model\n# You can press Ctrl-C to stop. And then run the next cell to save your checkpoints! \n!cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml", "2020-07-12 20:38:52,275 Hello! This is Joey-NMT.\n2020-07-12 20:38:52.392375: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-07-12 20:38:53,554 Total params: 12128768\n2020-07-12 20:38:53,556 Trainable parameters: ['decoder.layer_norm.bias', 'decoder.layer_norm.weight', 'decoder.layers.0.dec_layer_norm.bias', 'decoder.layers.0.dec_layer_norm.weight', 'decoder.layers.0.feed_forward.layer_norm.bias', 'decoder.layers.0.feed_forward.layer_norm.weight', 'decoder.layers.0.feed_forward.pwff_layer.0.bias', 'decoder.layers.0.feed_forward.pwff_layer.0.weight', 'decoder.layers.0.feed_forward.pwff_layer.3.bias', 'decoder.layers.0.feed_forward.pwff_layer.3.weight', 'decoder.layers.0.src_trg_att.k_layer.bias', 'decoder.layers.0.src_trg_att.k_layer.weight', 'decoder.layers.0.src_trg_att.output_layer.bias', 'decoder.layers.0.src_trg_att.output_layer.weight', 'decoder.layers.0.src_trg_att.q_layer.bias', 'decoder.layers.0.src_trg_att.q_layer.weight', 'decoder.layers.0.src_trg_att.v_layer.bias', 'decoder.layers.0.src_trg_att.v_layer.weight', 'decoder.layers.0.trg_trg_att.k_layer.bias', 'decoder.layers.0.trg_trg_att.k_layer.weight', 'decoder.layers.0.trg_trg_att.output_layer.bias', 'decoder.layers.0.trg_trg_att.output_layer.weight', 'decoder.layers.0.trg_trg_att.q_layer.bias', 'decoder.layers.0.trg_trg_att.q_layer.weight', 'decoder.layers.0.trg_trg_att.v_layer.bias', 'decoder.layers.0.trg_trg_att.v_layer.weight', 'decoder.layers.0.x_layer_norm.bias', 'decoder.layers.0.x_layer_norm.weight', 'decoder.layers.1.dec_layer_norm.bias', 'decoder.layers.1.dec_layer_norm.weight', 'decoder.layers.1.feed_forward.layer_norm.bias', 'decoder.layers.1.feed_forward.layer_norm.weight', 'decoder.layers.1.feed_forward.pwff_layer.0.bias', 'decoder.layers.1.feed_forward.pwff_layer.0.weight', 'decoder.layers.1.feed_forward.pwff_layer.3.bias', 'decoder.layers.1.feed_forward.pwff_layer.3.weight', 'decoder.layers.1.src_trg_att.k_layer.bias', 'decoder.layers.1.src_trg_att.k_layer.weight', 'decoder.layers.1.src_trg_att.output_layer.bias', 'decoder.layers.1.src_trg_att.output_layer.weight', 'decoder.layers.1.src_trg_att.q_layer.bias', 'decoder.layers.1.src_trg_att.q_layer.weight', 'decoder.layers.1.src_trg_att.v_layer.bias', 'decoder.layers.1.src_trg_att.v_layer.weight', 'decoder.layers.1.trg_trg_att.k_layer.bias', 'decoder.layers.1.trg_trg_att.k_layer.weight', 'decoder.layers.1.trg_trg_att.output_layer.bias', 'decoder.layers.1.trg_trg_att.output_layer.weight', 'decoder.layers.1.trg_trg_att.q_layer.bias', 'decoder.layers.1.trg_trg_att.q_layer.weight', 'decoder.layers.1.trg_trg_att.v_layer.bias', 'decoder.layers.1.trg_trg_att.v_layer.weight', 'decoder.layers.1.x_layer_norm.bias', 'decoder.layers.1.x_layer_norm.weight', 'decoder.layers.2.dec_layer_norm.bias', 'decoder.layers.2.dec_layer_norm.weight', 'decoder.layers.2.feed_forward.layer_norm.bias', 'decoder.layers.2.feed_forward.layer_norm.weight', 'decoder.layers.2.feed_forward.pwff_layer.0.bias', 'decoder.layers.2.feed_forward.pwff_layer.0.weight', 'decoder.layers.2.feed_forward.pwff_layer.3.bias', 'decoder.layers.2.feed_forward.pwff_layer.3.weight', 'decoder.layers.2.src_trg_att.k_layer.bias', 'decoder.layers.2.src_trg_att.k_layer.weight', 'decoder.layers.2.src_trg_att.output_layer.bias', 'decoder.layers.2.src_trg_att.output_layer.weight', 'decoder.layers.2.src_trg_att.q_layer.bias', 'decoder.layers.2.src_trg_att.q_layer.weight', 'decoder.layers.2.src_trg_att.v_layer.bias', 'decoder.layers.2.src_trg_att.v_layer.weight', 'decoder.layers.2.trg_trg_att.k_layer.bias', 'decoder.layers.2.trg_trg_att.k_layer.weight', 'decoder.layers.2.trg_trg_att.output_layer.bias', 'decoder.layers.2.trg_trg_att.output_layer.weight', 'decoder.layers.2.trg_trg_att.q_layer.bias', 'decoder.layers.2.trg_trg_att.q_layer.weight', 'decoder.layers.2.trg_trg_att.v_layer.bias', 'decoder.layers.2.trg_trg_att.v_layer.weight', 'decoder.layers.2.x_layer_norm.bias', 'decoder.layers.2.x_layer_norm.weight', 'decoder.layers.3.dec_layer_norm.bias', 'decoder.layers.3.dec_layer_norm.weight', 'decoder.layers.3.feed_forward.layer_norm.bias', 'decoder.layers.3.feed_forward.layer_norm.weight', 'decoder.layers.3.feed_forward.pwff_layer.0.bias', 'decoder.layers.3.feed_forward.pwff_layer.0.weight', 'decoder.layers.3.feed_forward.pwff_layer.3.bias', 'decoder.layers.3.feed_forward.pwff_layer.3.weight', 'decoder.layers.3.src_trg_att.k_layer.bias', 'decoder.layers.3.src_trg_att.k_layer.weight', 'decoder.layers.3.src_trg_att.output_layer.bias', 'decoder.layers.3.src_trg_att.output_layer.weight', 'decoder.layers.3.src_trg_att.q_layer.bias', 'decoder.layers.3.src_trg_att.q_layer.weight', 'decoder.layers.3.src_trg_att.v_layer.bias', 'decoder.layers.3.src_trg_att.v_layer.weight', 'decoder.layers.3.trg_trg_att.k_layer.bias', 'decoder.layers.3.trg_trg_att.k_layer.weight', 'decoder.layers.3.trg_trg_att.output_layer.bias', 'decoder.layers.3.trg_trg_att.output_layer.weight', 'decoder.layers.3.trg_trg_att.q_layer.bias', 'decoder.layers.3.trg_trg_att.q_layer.weight', 'decoder.layers.3.trg_trg_att.v_layer.bias', 'decoder.layers.3.trg_trg_att.v_layer.weight', 'decoder.layers.3.x_layer_norm.bias', 'decoder.layers.3.x_layer_norm.weight', 'decoder.layers.4.dec_layer_norm.bias', 'decoder.layers.4.dec_layer_norm.weight', 'decoder.layers.4.feed_forward.layer_norm.bias', 'decoder.layers.4.feed_forward.layer_norm.weight', 'decoder.layers.4.feed_forward.pwff_layer.0.bias', 'decoder.layers.4.feed_forward.pwff_layer.0.weight', 'decoder.layers.4.feed_forward.pwff_layer.3.bias', 'decoder.layers.4.feed_forward.pwff_layer.3.weight', 'decoder.layers.4.src_trg_att.k_layer.bias', 'decoder.layers.4.src_trg_att.k_layer.weight', 'decoder.layers.4.src_trg_att.output_layer.bias', 'decoder.layers.4.src_trg_att.output_layer.weight', 'decoder.layers.4.src_trg_att.q_layer.bias', 'decoder.layers.4.src_trg_att.q_layer.weight', 'decoder.layers.4.src_trg_att.v_layer.bias', 'decoder.layers.4.src_trg_att.v_layer.weight', 'decoder.layers.4.trg_trg_att.k_layer.bias', 'decoder.layers.4.trg_trg_att.k_layer.weight', 'decoder.layers.4.trg_trg_att.output_layer.bias', 'decoder.layers.4.trg_trg_att.output_layer.weight', 'decoder.layers.4.trg_trg_att.q_layer.bias', 'decoder.layers.4.trg_trg_att.q_layer.weight', 'decoder.layers.4.trg_trg_att.v_layer.bias', 'decoder.layers.4.trg_trg_att.v_layer.weight', 'decoder.layers.4.x_layer_norm.bias', 'decoder.layers.4.x_layer_norm.weight', 'decoder.layers.5.dec_layer_norm.bias', 'decoder.layers.5.dec_layer_norm.weight', 'decoder.layers.5.feed_forward.layer_norm.bias', 'decoder.layers.5.feed_forward.layer_norm.weight', 'decoder.layers.5.feed_forward.pwff_layer.0.bias', 'decoder.layers.5.feed_forward.pwff_layer.0.weight', 'decoder.layers.5.feed_forward.pwff_layer.3.bias', 'decoder.layers.5.feed_forward.pwff_layer.3.weight', 'decoder.layers.5.src_trg_att.k_layer.bias', 'decoder.layers.5.src_trg_att.k_layer.weight', 'decoder.layers.5.src_trg_att.output_layer.bias', 'decoder.layers.5.src_trg_att.output_layer.weight', 'decoder.layers.5.src_trg_att.q_layer.bias', 'decoder.layers.5.src_trg_att.q_layer.weight', 'decoder.layers.5.src_trg_att.v_layer.bias', 'decoder.layers.5.src_trg_att.v_layer.weight', 'decoder.layers.5.trg_trg_att.k_layer.bias', 'decoder.layers.5.trg_trg_att.k_layer.weight', 'decoder.layers.5.trg_trg_att.output_layer.bias', 'decoder.layers.5.trg_trg_att.output_layer.weight', 'decoder.layers.5.trg_trg_att.q_layer.bias', 'decoder.layers.5.trg_trg_att.q_layer.weight', 'decoder.layers.5.trg_trg_att.v_layer.bias', 'decoder.layers.5.trg_trg_att.v_layer.weight', 'decoder.layers.5.x_layer_norm.bias', 'decoder.layers.5.x_layer_norm.weight', 'encoder.layer_norm.bias', 'encoder.layer_norm.weight', 'encoder.layers.0.feed_forward.layer_norm.bias', 'encoder.layers.0.feed_forward.layer_norm.weight', 'encoder.layers.0.feed_forward.pwff_layer.0.bias', 'encoder.layers.0.feed_forward.pwff_layer.0.weight', 'encoder.layers.0.feed_forward.pwff_layer.3.bias', 'encoder.layers.0.feed_forward.pwff_layer.3.weight', 'encoder.layers.0.layer_norm.bias', 'encoder.layers.0.layer_norm.weight', 'encoder.layers.0.src_src_att.k_layer.bias', 'encoder.layers.0.src_src_att.k_layer.weight', 'encoder.layers.0.src_src_att.output_layer.bias', 'encoder.layers.0.src_src_att.output_layer.weight', 'encoder.layers.0.src_src_att.q_layer.bias', 'encoder.layers.0.src_src_att.q_layer.weight', 'encoder.layers.0.src_src_att.v_layer.bias', 'encoder.layers.0.src_src_att.v_layer.weight', 'encoder.layers.1.feed_forward.layer_norm.bias', 'encoder.layers.1.feed_forward.layer_norm.weight', 'encoder.layers.1.feed_forward.pwff_layer.0.bias', 'encoder.layers.1.feed_forward.pwff_layer.0.weight', 'encoder.layers.1.feed_forward.pwff_layer.3.bias', 'encoder.layers.1.feed_forward.pwff_layer.3.weight', 'encoder.layers.1.layer_norm.bias', 'encoder.layers.1.layer_norm.weight', 'encoder.layers.1.src_src_att.k_layer.bias', 'encoder.layers.1.src_src_att.k_layer.weight', 'encoder.layers.1.src_src_att.output_layer.bias', 'encoder.layers.1.src_src_att.output_layer.weight', 'encoder.layers.1.src_src_att.q_layer.bias', 'encoder.layers.1.src_src_att.q_layer.weight', 'encoder.layers.1.src_src_att.v_layer.bias', 'encoder.layers.1.src_src_att.v_layer.weight', 'encoder.layers.2.feed_forward.layer_norm.bias', 'encoder.layers.2.feed_forward.layer_norm.weight', 'encoder.layers.2.feed_forward.pwff_layer.0.bias', 'encoder.layers.2.feed_forward.pwff_layer.0.weight', 'encoder.layers.2.feed_forward.pwff_layer.3.bias', 'encoder.layers.2.feed_forward.pwff_layer.3.weight', 'encoder.layers.2.layer_norm.bias', 'encoder.layers.2.layer_norm.weight', 'encoder.layers.2.src_src_att.k_layer.bias', 'encoder.layers.2.src_src_att.k_layer.weight', 'encoder.layers.2.src_src_att.output_layer.bias', 'encoder.layers.2.src_src_att.output_layer.weight', 'encoder.layers.2.src_src_att.q_layer.bias', 'encoder.layers.2.src_src_att.q_layer.weight', 'encoder.layers.2.src_src_att.v_layer.bias', 'encoder.layers.2.src_src_att.v_layer.weight', 'encoder.layers.3.feed_forward.layer_norm.bias', 'encoder.layers.3.feed_forward.layer_norm.weight', 'encoder.layers.3.feed_forward.pwff_layer.0.bias', 'encoder.layers.3.feed_forward.pwff_layer.0.weight', 'encoder.layers.3.feed_forward.pwff_layer.3.bias', 'encoder.layers.3.feed_forward.pwff_layer.3.weight', 'encoder.layers.3.layer_norm.bias', 'encoder.layers.3.layer_norm.weight', 'encoder.layers.3.src_src_att.k_layer.bias', 'encoder.layers.3.src_src_att.k_layer.weight', 'encoder.layers.3.src_src_att.output_layer.bias', 'encoder.layers.3.src_src_att.output_layer.weight', 'encoder.layers.3.src_src_att.q_layer.bias', 'encoder.layers.3.src_src_att.q_layer.weight', 'encoder.layers.3.src_src_att.v_layer.bias', 'encoder.layers.3.src_src_att.v_layer.weight', 'encoder.layers.4.feed_forward.layer_norm.bias', 'encoder.layers.4.feed_forward.layer_norm.weight', 'encoder.layers.4.feed_forward.pwff_layer.0.bias', 'encoder.layers.4.feed_forward.pwff_layer.0.weight', 'encoder.layers.4.feed_forward.pwff_layer.3.bias', 'encoder.layers.4.feed_forward.pwff_layer.3.weight', 'encoder.layers.4.layer_norm.bias', 'encoder.layers.4.layer_norm.weight', 'encoder.layers.4.src_src_att.k_layer.bias', 'encoder.layers.4.src_src_att.k_layer.weight', 'encoder.layers.4.src_src_att.output_layer.bias', 'encoder.layers.4.src_src_att.output_layer.weight', 'encoder.layers.4.src_src_att.q_layer.bias', 'encoder.layers.4.src_src_att.q_layer.weight', 'encoder.layers.4.src_src_att.v_layer.bias', 'encoder.layers.4.src_src_att.v_layer.weight', 'encoder.layers.5.feed_forward.layer_norm.bias', 'encoder.layers.5.feed_forward.layer_norm.weight', 'encoder.layers.5.feed_forward.pwff_layer.0.bias', 'encoder.layers.5.feed_forward.pwff_layer.0.weight', 'encoder.layers.5.feed_forward.pwff_layer.3.bias', 'encoder.layers.5.feed_forward.pwff_layer.3.weight', 'encoder.layers.5.layer_norm.bias', 'encoder.layers.5.layer_norm.weight', 'encoder.layers.5.src_src_att.k_layer.bias', 'encoder.layers.5.src_src_att.k_layer.weight', 'encoder.layers.5.src_src_att.output_layer.bias', 'encoder.layers.5.src_src_att.output_layer.weight', 'encoder.layers.5.src_src_att.q_layer.bias', 'encoder.layers.5.src_src_att.q_layer.weight', 'encoder.layers.5.src_src_att.v_layer.bias', 'encoder.layers.5.src_src_att.v_layer.weight', 'src_embed.lut.weight']\n2020-07-12 20:39:09,397 cfg.name : ennya_transformer\n2020-07-12 20:39:09,398 cfg.data.src : en\n2020-07-12 20:39:09,398 cfg.data.trg : nya\n2020-07-12 20:39:09,398 cfg.data.train : /content/drive/My Drive/masakhane/en-nya-baseline/train.bpe\n2020-07-12 20:39:09,398 cfg.data.dev : /content/drive/My Drive/masakhane/en-nya-baseline/dev.bpe\n2020-07-12 20:39:09,398 cfg.data.test : /content/drive/My Drive/masakhane/en-nya-baseline/test.bpe\n2020-07-12 20:39:09,398 cfg.data.level : bpe\n2020-07-12 20:39:09,399 cfg.data.lowercase : False\n2020-07-12 20:39:09,399 cfg.data.max_sent_length : 100\n2020-07-12 20:39:09,399 cfg.data.src_vocab : /content/drive/My Drive/masakhane/en-nya-baseline/vocab.txt\n2020-07-12 20:39:09,399 cfg.data.trg_vocab : /content/drive/My Drive/masakhane/en-nya-baseline/vocab.txt\n2020-07-12 20:39:09,399 cfg.testing.beam_size : 5\n2020-07-12 20:39:09,399 cfg.testing.alpha : 1.0\n2020-07-12 20:39:09,399 cfg.training.random_seed : 42\n2020-07-12 20:39:09,400 cfg.training.optimizer : adam\n2020-07-12 20:39:09,400 cfg.training.normalization : tokens\n2020-07-12 20:39:09,400 cfg.training.adam_betas : [0.9, 0.999]\n2020-07-12 20:39:09,400 cfg.training.scheduling : plateau\n2020-07-12 20:39:09,400 cfg.training.patience : 5\n2020-07-12 20:39:09,400 cfg.training.learning_rate_factor : 0.5\n2020-07-12 20:39:09,400 cfg.training.learning_rate_warmup : 1000\n2020-07-12 20:39:09,401 cfg.training.decrease_factor : 0.7\n2020-07-12 20:39:09,401 cfg.training.loss : crossentropy\n2020-07-12 20:39:09,401 cfg.training.learning_rate : 0.0003\n2020-07-12 20:39:09,401 cfg.training.learning_rate_min : 1e-08\n2020-07-12 20:39:09,401 cfg.training.weight_decay : 0.0\n2020-07-12 20:39:09,401 cfg.training.label_smoothing : 0.1\n2020-07-12 20:39:09,401 cfg.training.batch_size : 4096\n2020-07-12 20:39:09,401 cfg.training.batch_type : token\n2020-07-12 20:39:09,402 cfg.training.eval_batch_size : 3600\n2020-07-12 20:39:09,402 cfg.training.eval_batch_type : token\n2020-07-12 20:39:09,402 cfg.training.batch_multiplier : 1\n2020-07-12 20:39:09,402 cfg.training.early_stopping_metric : ppl\n2020-07-12 20:39:09,402 cfg.training.epochs : 50\n2020-07-12 20:39:09,402 cfg.training.validation_freq : 1000\n2020-07-12 20:39:09,402 cfg.training.logging_freq : 100\n2020-07-12 20:39:09,403 cfg.training.eval_metric : bleu\n2020-07-12 20:39:09,403 cfg.training.model_dir : /content/drive/My Drive/masakhane/model-temp\n2020-07-12 20:39:09,403 cfg.training.overwrite : True\n2020-07-12 20:39:09,403 cfg.training.shuffle : True\n2020-07-12 20:39:09,403 cfg.training.use_cuda : True\n2020-07-12 20:39:09,403 cfg.training.max_output_length : 100\n2020-07-12 20:39:09,403 cfg.training.print_valid_sents : [0, 1, 2, 3]\n2020-07-12 20:39:09,404 cfg.training.keep_last_ckpts : 3\n2020-07-12 20:39:09,404 cfg.model.initializer : xavier\n2020-07-12 20:39:09,404 cfg.model.bias_initializer : zeros\n2020-07-12 20:39:09,404 cfg.model.init_gain : 1.0\n2020-07-12 20:39:09,404 cfg.model.embed_initializer : xavier\n2020-07-12 20:39:09,404 cfg.model.embed_init_gain : 1.0\n2020-07-12 20:39:09,404 cfg.model.tied_embeddings : True\n2020-07-12 20:39:09,405 cfg.model.tied_softmax : True\n2020-07-12 20:39:09,405 cfg.model.encoder.type : transformer\n2020-07-12 20:39:09,405 cfg.model.encoder.num_layers : 6\n2020-07-12 20:39:09,405 cfg.model.encoder.num_heads : 4\n2020-07-12 20:39:09,405 cfg.model.encoder.embeddings.embedding_dim : 256\n2020-07-12 20:39:09,405 cfg.model.encoder.embeddings.scale : True\n2020-07-12 20:39:09,405 cfg.model.encoder.embeddings.dropout : 0.2\n2020-07-12 20:39:09,405 cfg.model.encoder.hidden_size : 256\n2020-07-12 20:39:09,406 cfg.model.encoder.ff_size : 1024\n2020-07-12 20:39:09,406 cfg.model.encoder.dropout : 0.3\n2020-07-12 20:39:09,406 cfg.model.decoder.type : transformer\n2020-07-12 20:39:09,406 cfg.model.decoder.num_layers : 6\n2020-07-12 20:39:09,406 cfg.model.decoder.num_heads : 4\n2020-07-12 20:39:09,406 cfg.model.decoder.embeddings.embedding_dim : 256\n2020-07-12 20:39:09,406 cfg.model.decoder.embeddings.scale : True\n2020-07-12 20:39:09,407 cfg.model.decoder.embeddings.dropout : 0.2\n2020-07-12 20:39:09,407 cfg.model.decoder.hidden_size : 256\n2020-07-12 20:39:09,407 cfg.model.decoder.ff_size : 1024\n2020-07-12 20:39:09,407 cfg.model.decoder.dropout : 0.3\n2020-07-12 20:39:09,407 Data set sizes: \n\ttrain 49851,\n\tvalid 1000,\n\ttest 2672\n2020-07-12 20:39:09,407 First training example:\n\t[SRC] How Job came to know Jehovah .\n\t[TRG] M@@ mene Yobu anadz@@ iŵ@@ ila Yehova .\n2020-07-12 20:39:09,408 First 10 words (src): (0) <unk> (1) <pad> (2) <s> (3) </s> (4) . (5) , (6) the (7) to (8) a (9) :\n2020-07-12 20:39:09,408 First 10 words (trg): (0) <unk> (1) <pad> (2) <s> (3) </s> (4) . (5) , (6) the (7) to (8) a (9) :\n2020-07-12 20:39:09,408 Number of Src words (types): 4174\n2020-07-12 20:39:09,409 Number of Trg words (types): 4174\n2020-07-12 20:39:09,409 Model(\n\tencoder=TransformerEncoder(num_layers=6, num_heads=4),\n\tdecoder=TransformerDecoder(num_layers=6, num_heads=4),\n\tsrc_embed=Embeddings(embedding_dim=256, vocab_size=4174),\n\ttrg_embed=Embeddings(embedding_dim=256, vocab_size=4174))\n2020-07-12 20:39:09,418 EPOCH 1\n/pytorch/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of nonzero is deprecated:\n\tnonzero(Tensor input, *, Tensor out)\nConsider using one of the following signatures instead:\n\tnonzero(Tensor input, *, bool as_tuple)\n2020-07-12 20:39:28,225 Epoch 1 Step: 100 Batch Loss: 5.851800 Tokens per Sec: 11050, Lr: 0.000300\n2020-07-12 20:39:46,905 Epoch 1 Step: 200 Batch Loss: 5.502884 Tokens per Sec: 11317, Lr: 0.000300\n2020-07-12 20:40:05,233 Epoch 1 Step: 300 Batch Loss: 5.292922 Tokens per Sec: 11160, Lr: 0.000300\n2020-07-12 20:40:23,613 Epoch 1 Step: 400 Batch Loss: 5.175645 Tokens per Sec: 11256, Lr: 0.000300\n2020-07-12 20:40:41,982 Epoch 1 Step: 500 Batch Loss: 4.725220 Tokens per Sec: 11361, Lr: 0.000300\n2020-07-12 20:40:56,727 Epoch 1: total training loss 3090.93\n2020-07-12 20:40:56,728 EPOCH 2\n2020-07-12 20:41:00,417 Epoch 2 Step: 600 Batch Loss: 4.830907 Tokens per Sec: 11316, Lr: 0.000300\n2020-07-12 20:41:18,572 Epoch 2 Step: 700 Batch Loss: 4.736766 Tokens per Sec: 11298, Lr: 0.000300\n2020-07-12 20:41:36,802 Epoch 2 Step: 800 Batch Loss: 4.635335 Tokens per Sec: 11134, Lr: 0.000300\n2020-07-12 20:41:55,103 Epoch 2 Step: 900 Batch Loss: 4.045403 Tokens per Sec: 11358, Lr: 0.000300\n2020-07-12 20:42:13,600 Epoch 2 Step: 1000 Batch Loss: 4.308612 Tokens per Sec: 11398, Lr: 0.000300\n2020-07-12 20:42:55,456 Hooray! New best validation result [ppl]!\n2020-07-12 20:42:55,456 Saving new checkpoint.\n2020-07-12 20:42:56,628 Example #0\n2020-07-12 20:42:56,628 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 20:42:56,629 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 20:42:56,629 \tHypothesis: ( 1 : 3 ) Kodi anthu amene anali kuthandiza kuti anali kuthandiza kuti anali kuthandiza .\n2020-07-12 20:42:56,629 Example #1\n2020-07-12 20:42:56,629 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 20:42:56,630 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 20:42:56,630 \tHypothesis: ( 1 : 1 ) Koma anthu amene anali kuthandiza kuti anali kukuthandiza .\n2020-07-12 20:42:56,630 Example #2\n2020-07-12 20:42:56,630 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 20:42:56,630 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 20:42:56,630 \tHypothesis: ( b ) Kodi anthu a Mulungu amene anali kuthandiza bwanji kuti : “ Kodi Yehova ?\n2020-07-12 20:42:56,631 Example #3\n2020-07-12 20:42:56,631 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 20:42:56,631 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 20:42:56,631 \tHypothesis: Kodi Malile\n2020-07-12 20:42:56,631 Validation result (greedy) at epoch 2, step 1000: bleu: 0.73, loss: 104985.1797, ppl: 69.0250, duration: 43.0307s\n2020-07-12 20:43:15,235 Epoch 2 Step: 1100 Batch Loss: 4.252433 Tokens per Sec: 11137, Lr: 0.000300\n2020-07-12 20:43:27,269 Epoch 2: total training loss 2596.20\n2020-07-12 20:43:27,270 EPOCH 3\n2020-07-12 20:43:33,992 Epoch 3 Step: 1200 Batch Loss: 3.654455 Tokens per Sec: 11098, Lr: 0.000300\n2020-07-12 20:43:52,339 Epoch 3 Step: 1300 Batch Loss: 3.389505 Tokens per Sec: 11076, Lr: 0.000300\n2020-07-12 20:44:11,141 Epoch 3 Step: 1400 Batch Loss: 3.965687 Tokens per Sec: 11462, Lr: 0.000300\n2020-07-12 20:44:29,325 Epoch 3 Step: 1500 Batch Loss: 3.584218 Tokens per Sec: 11146, Lr: 0.000300\n2020-07-12 20:44:47,940 Epoch 3 Step: 1600 Batch Loss: 3.427156 Tokens per Sec: 11108, Lr: 0.000300\n2020-07-12 20:45:06,608 Epoch 3 Step: 1700 Batch Loss: 4.065697 Tokens per Sec: 11517, Lr: 0.000300\n2020-07-12 20:45:14,535 Epoch 3: total training loss 2279.14\n2020-07-12 20:45:14,536 EPOCH 4\n2020-07-12 20:45:25,196 Epoch 4 Step: 1800 Batch Loss: 3.446399 Tokens per Sec: 11137, Lr: 0.000300\n2020-07-12 20:45:43,520 Epoch 4 Step: 1900 Batch Loss: 3.538134 Tokens per Sec: 11206, Lr: 0.000300\n2020-07-12 20:46:02,213 Epoch 4 Step: 2000 Batch Loss: 3.472388 Tokens per Sec: 11551, Lr: 0.000300\n2020-07-12 20:46:31,222 Hooray! New best validation result [ppl]!\n2020-07-12 20:46:31,223 Saving new checkpoint.\n2020-07-12 20:46:32,434 Example #0\n2020-07-12 20:46:32,434 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 20:46:32,434 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 20:46:32,434 \tHypothesis: Kodi anthu ambili ambili ambili anali kudya ku dziko lapansi ?\n2020-07-12 20:46:32,435 Example #1\n2020-07-12 20:46:32,435 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 20:46:32,435 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 20:46:32,435 \tHypothesis: Iye anali kudya , ndipo anali kudya ku Yerusalemu .\n2020-07-12 20:46:32,436 Example #2\n2020-07-12 20:46:32,436 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 20:46:32,436 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 20:46:32,436 \tHypothesis: Kodi Yesu anakamba kuti anthu a Yesu anali ndi mau a Yesu ?\n2020-07-12 20:46:32,437 Example #3\n2020-07-12 20:46:32,437 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 20:46:32,437 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 20:46:32,437 \tHypothesis: Kodi Mfundo ya Mudzi Wosiyana ndi Mtima\n2020-07-12 20:46:32,437 Validation result (greedy) at epoch 4, step 2000: bleu: 2.10, loss: 87342.0547, ppl: 33.8809, duration: 30.2243s\n2020-07-12 20:46:50,898 Epoch 4 Step: 2100 Batch Loss: 3.669708 Tokens per Sec: 11043, Lr: 0.000300\n2020-07-12 20:47:09,397 Epoch 4 Step: 2200 Batch Loss: 3.272874 Tokens per Sec: 11294, Lr: 0.000300\n2020-07-12 20:47:27,868 Epoch 4 Step: 2300 Batch Loss: 3.434987 Tokens per Sec: 11149, Lr: 0.000300\n2020-07-12 20:47:32,157 Epoch 4: total training loss 2088.17\n2020-07-12 20:47:32,158 EPOCH 5\n2020-07-12 20:47:46,655 Epoch 5 Step: 2400 Batch Loss: 2.883982 Tokens per Sec: 11188, Lr: 0.000300\n2020-07-12 20:48:05,133 Epoch 5 Step: 2500 Batch Loss: 3.808741 Tokens per Sec: 11522, Lr: 0.000300\n2020-07-12 20:48:23,395 Epoch 5 Step: 2600 Batch Loss: 3.231003 Tokens per Sec: 11131, Lr: 0.000300\n2020-07-12 20:48:41,768 Epoch 5 Step: 2700 Batch Loss: 3.547242 Tokens per Sec: 11405, Lr: 0.000300\n2020-07-12 20:49:00,056 Epoch 5 Step: 2800 Batch Loss: 3.122620 Tokens per Sec: 11173, Lr: 0.000300\n2020-07-12 20:49:18,411 Epoch 5 Step: 2900 Batch Loss: 3.458104 Tokens per Sec: 11189, Lr: 0.000300\n2020-07-12 20:49:19,182 Epoch 5: total training loss 1954.32\n2020-07-12 20:49:19,182 EPOCH 6\n2020-07-12 20:49:37,162 Epoch 6 Step: 3000 Batch Loss: 3.304617 Tokens per Sec: 11262, Lr: 0.000300\n2020-07-12 20:49:56,697 Hooray! New best validation result [ppl]!\n2020-07-12 20:49:56,697 Saving new checkpoint.\n2020-07-12 20:49:57,944 Example #0\n2020-07-12 20:49:57,945 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 20:49:57,945 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 20:49:57,945 \tHypothesis: Tinali kufunitsitsa kukonzekela nchito yolalikila yolalikila yolalikila yolalikila .\n2020-07-12 20:49:57,946 Example #1\n2020-07-12 20:49:57,946 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 20:49:57,947 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 20:49:57,947 \tHypothesis: Iye anali kufunitsitsa kucita zimenezi , ndipo anali kudya zaka zambili .\n2020-07-12 20:49:57,947 Example #2\n2020-07-12 20:49:57,948 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 20:49:57,948 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 20:49:57,948 \tHypothesis: Kodi Mose anacita ciani kuti ophunzila ake adzagwilitsila nchito mau a Paulo ?\n2020-07-12 20:49:57,948 Example #3\n2020-07-12 20:49:57,949 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 20:49:57,949 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 20:49:57,949 \tHypothesis: Kudziŵa kuti Mmene Mmene Mmene Mmene Mmene Mmene Mudziŵa\n2020-07-12 20:49:57,949 Validation result (greedy) at epoch 6, step 3000: bleu: 3.32, loss: 78307.3125, ppl: 23.5340, duration: 20.7865s\n2020-07-12 20:50:16,871 Epoch 6 Step: 3100 Batch Loss: 3.466974 Tokens per Sec: 11099, Lr: 0.000300\n2020-07-12 20:50:35,318 Epoch 6 Step: 3200 Batch Loss: 2.713333 Tokens per Sec: 10942, Lr: 0.000300\n2020-07-12 20:50:53,654 Epoch 6 Step: 3300 Batch Loss: 3.480570 Tokens per Sec: 11215, Lr: 0.000300\n2020-07-12 20:51:12,139 Epoch 6 Step: 3400 Batch Loss: 2.565523 Tokens per Sec: 11391, Lr: 0.000300\n2020-07-12 20:51:28,070 Epoch 6: total training loss 1860.15\n2020-07-12 20:51:28,070 EPOCH 7\n2020-07-12 20:51:30,605 Epoch 7 Step: 3500 Batch Loss: 3.121952 Tokens per Sec: 10914, Lr: 0.000300\n2020-07-12 20:51:49,109 Epoch 7 Step: 3600 Batch Loss: 2.306157 Tokens per Sec: 11148, Lr: 0.000300\n2020-07-12 20:52:07,713 Epoch 7 Step: 3700 Batch Loss: 2.988599 Tokens per Sec: 11346, Lr: 0.000300\n2020-07-12 20:52:26,172 Epoch 7 Step: 3800 Batch Loss: 3.353390 Tokens per Sec: 11405, Lr: 0.000300\n2020-07-12 20:52:44,412 Epoch 7 Step: 3900 Batch Loss: 2.968240 Tokens per Sec: 11083, Lr: 0.000300\n2020-07-12 20:53:02,675 Epoch 7 Step: 4000 Batch Loss: 2.396898 Tokens per Sec: 11183, Lr: 0.000300\n2020-07-12 20:53:26,332 Hooray! New best validation result [ppl]!\n2020-07-12 20:53:26,332 Saving new checkpoint.\n2020-07-12 20:53:27,500 Example #0\n2020-07-12 20:53:27,500 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 20:53:27,501 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 20:53:27,501 \tHypothesis: Tidzakhala ndi mavuto amene tinali kukumana nao padziko lonse .\n2020-07-12 20:53:27,501 Example #1\n2020-07-12 20:53:27,502 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 20:53:27,502 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 20:53:27,502 \tHypothesis: Iye angacite zimenezi , ndipo anali kudya zonsezi .\n2020-07-12 20:53:27,502 Example #2\n2020-07-12 20:53:27,502 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 20:53:27,502 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 20:53:27,503 \tHypothesis: Kodi Mose anakamba ciani ponena za anthu amene Paulo analemba kuti : “ Kodi mau a Paulo a Paulo ali ndi mau a Paulo ?\n2020-07-12 20:53:27,503 Example #3\n2020-07-12 20:53:27,503 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 20:53:27,503 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 20:53:27,503 \tHypothesis: Kuwonjezela apo , Nchito Yabwino Amene Ahabu\n2020-07-12 20:53:27,504 Validation result (greedy) at epoch 7, step 4000: bleu: 5.24, loss: 72736.8750, ppl: 18.7983, duration: 24.8278s\n2020-07-12 20:53:40,429 Epoch 7: total training loss 1776.31\n2020-07-12 20:53:40,429 EPOCH 8\n2020-07-12 20:53:46,268 Epoch 8 Step: 4100 Batch Loss: 2.750468 Tokens per Sec: 11046, Lr: 0.000300\n2020-07-12 20:54:04,739 Epoch 8 Step: 4200 Batch Loss: 3.271371 Tokens per Sec: 11177, Lr: 0.000300\n2020-07-12 20:54:23,319 Epoch 8 Step: 4300 Batch Loss: 3.047179 Tokens per Sec: 11402, Lr: 0.000300\n2020-07-12 20:54:41,779 Epoch 8 Step: 4400 Batch Loss: 2.842948 Tokens per Sec: 11260, Lr: 0.000300\n2020-07-12 20:55:00,279 Epoch 8 Step: 4500 Batch Loss: 2.115280 Tokens per Sec: 11292, Lr: 0.000300\n2020-07-12 20:55:18,684 Epoch 8 Step: 4600 Batch Loss: 1.979993 Tokens per Sec: 11151, Lr: 0.000300\n2020-07-12 20:55:27,832 Epoch 8: total training loss 1702.89\n2020-07-12 20:55:27,833 EPOCH 9\n2020-07-12 20:55:37,475 Epoch 9 Step: 4700 Batch Loss: 1.902806 Tokens per Sec: 11243, Lr: 0.000300\n2020-07-12 20:55:56,021 Epoch 9 Step: 4800 Batch Loss: 3.366707 Tokens per Sec: 11431, Lr: 0.000300\n2020-07-12 20:56:14,599 Epoch 9 Step: 4900 Batch Loss: 2.641436 Tokens per Sec: 11356, Lr: 0.000300\n2020-07-12 20:56:33,118 Epoch 9 Step: 5000 Batch Loss: 1.813956 Tokens per Sec: 11200, Lr: 0.000300\n2020-07-12 20:56:59,654 Hooray! New best validation result [ppl]!\n2020-07-12 20:56:59,654 Saving new checkpoint.\n2020-07-12 20:57:00,799 Example #0\n2020-07-12 20:57:00,800 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 20:57:00,800 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 20:57:00,800 \tHypothesis: Tinali kufufuza za dziko lathu limene tinalamula anthu ambili .\n2020-07-12 20:57:00,800 Example #1\n2020-07-12 20:57:00,801 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 20:57:00,801 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 20:57:00,801 \tHypothesis: Iye angacite zimenezo kuti akakhale ndi zaka zambili .\n2020-07-12 20:57:00,801 Example #2\n2020-07-12 20:57:00,802 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 20:57:00,802 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 20:57:00,802 \tHypothesis: Kodi Mose anaonetsa bwanji kuti anali ndi mau a Paulo ?\n2020-07-12 20:57:00,802 Example #3\n2020-07-12 20:57:00,802 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 20:57:00,803 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 20:57:00,803 \tHypothesis: Cikondi Cikondi Cikondi Cikondi Cikondi Cake Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cikondi Cake\n2020-07-12 20:57:00,803 Validation result (greedy) at epoch 9, step 5000: bleu: 6.91, loss: 68773.7266, ppl: 16.0213, duration: 27.6843s\n2020-07-12 20:57:19,792 Epoch 9 Step: 5100 Batch Loss: 3.378844 Tokens per Sec: 10709, Lr: 0.000300\n2020-07-12 20:57:38,438 Epoch 9 Step: 5200 Batch Loss: 3.053119 Tokens per Sec: 11093, Lr: 0.000300\n2020-07-12 20:57:43,714 Epoch 9: total training loss 1654.79\n2020-07-12 20:57:43,715 EPOCH 10\n2020-07-12 20:57:56,691 Epoch 10 Step: 5300 Batch Loss: 2.894244 Tokens per Sec: 11050, Lr: 0.000300\n2020-07-12 20:58:14,962 Epoch 10 Step: 5400 Batch Loss: 3.201298 Tokens per Sec: 11192, Lr: 0.000300\n2020-07-12 20:58:33,363 Epoch 10 Step: 5500 Batch Loss: 3.223227 Tokens per Sec: 11257, Lr: 0.000300\n2020-07-12 20:58:51,574 Epoch 10 Step: 5600 Batch Loss: 2.737585 Tokens per Sec: 11035, Lr: 0.000300\n2020-07-12 20:59:09,738 Epoch 10 Step: 5700 Batch Loss: 2.743144 Tokens per Sec: 11207, Lr: 0.000300\n2020-07-12 20:59:28,488 Epoch 10 Step: 5800 Batch Loss: 2.704981 Tokens per Sec: 11613, Lr: 0.000300\n2020-07-12 20:59:31,146 Epoch 10: total training loss 1624.04\n2020-07-12 20:59:31,146 EPOCH 11\n2020-07-12 20:59:47,229 Epoch 11 Step: 5900 Batch Loss: 2.886471 Tokens per Sec: 11408, Lr: 0.000300\n2020-07-12 21:00:05,688 Epoch 11 Step: 6000 Batch Loss: 2.807448 Tokens per Sec: 11172, Lr: 0.000300\n2020-07-12 21:00:38,757 Hooray! New best validation result [ppl]!\n2020-07-12 21:00:38,758 Saving new checkpoint.\n2020-07-12 21:00:39,876 Example #0\n2020-07-12 21:00:39,876 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:00:39,877 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:00:39,877 \tHypothesis: Mogwilizana ndi anthu amene tinali kukumana nao padziko lapansi , tinayamba kukumana ndi mavuto ambili\n2020-07-12 21:00:39,877 Example #1\n2020-07-12 21:00:39,877 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:00:39,877 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:00:39,878 \tHypothesis: Iye angacite bwino kuona mmene anali kucitila zinthu zambili , ngakhale kuti anali ndi zaka zambili .\n2020-07-12 21:00:39,878 Example #2\n2020-07-12 21:00:39,878 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:00:39,878 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:00:39,878 \tHypothesis: Kodi Mose anacita ciani pa tsiku la Paulo la 2 Timoteyo 2 : 19 ?\n2020-07-12 21:00:39,878 Example #3\n2020-07-12 21:00:39,879 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:00:39,879 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:00:39,879 \tHypothesis: Mmene Mumacita Mkulu Wabwino , July\n2020-07-12 21:00:39,879 Validation result (greedy) at epoch 11, step 6000: bleu: 8.22, loss: 66082.8594, ppl: 14.3735, duration: 34.1909s\n2020-07-12 21:00:58,498 Epoch 11 Step: 6100 Batch Loss: 2.824265 Tokens per Sec: 11075, Lr: 0.000300\n2020-07-12 21:01:17,161 Epoch 11 Step: 6200 Batch Loss: 2.993033 Tokens per Sec: 11128, Lr: 0.000300\n2020-07-12 21:01:35,768 Epoch 11 Step: 6300 Batch Loss: 2.654554 Tokens per Sec: 11077, Lr: 0.000300\n2020-07-12 21:01:53,341 Epoch 11: total training loss 1568.96\n2020-07-12 21:01:53,342 EPOCH 12\n2020-07-12 21:01:54,486 Epoch 12 Step: 6400 Batch Loss: 2.727532 Tokens per Sec: 9916, Lr: 0.000300\n2020-07-12 21:02:13,003 Epoch 12 Step: 6500 Batch Loss: 2.721638 Tokens per Sec: 11120, Lr: 0.000300\n2020-07-12 21:02:31,610 Epoch 12 Step: 6600 Batch Loss: 2.611042 Tokens per Sec: 11272, Lr: 0.000300\n2020-07-12 21:02:50,016 Epoch 12 Step: 6700 Batch Loss: 3.039977 Tokens per Sec: 11127, Lr: 0.000300\n2020-07-12 21:03:08,468 Epoch 12 Step: 6800 Batch Loss: 2.798577 Tokens per Sec: 11329, Lr: 0.000300\n2020-07-12 21:03:27,082 Epoch 12 Step: 6900 Batch Loss: 2.618927 Tokens per Sec: 11387, Lr: 0.000300\n2020-07-12 21:03:40,650 Epoch 12: total training loss 1529.47\n2020-07-12 21:03:40,651 EPOCH 13\n2020-07-12 21:03:45,869 Epoch 13 Step: 7000 Batch Loss: 2.779525 Tokens per Sec: 11528, Lr: 0.000300\n2020-07-12 21:04:08,411 Hooray! New best validation result [ppl]!\n2020-07-12 21:04:08,412 Saving new checkpoint.\n2020-07-12 21:04:09,588 Example #0\n2020-07-12 21:04:09,588 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:04:09,589 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:04:09,589 \tHypothesis: Tinali kudya zakudya zocepa kuti tikwanitse anthu ambili m’dzikoli\n2020-07-12 21:04:09,589 Example #1\n2020-07-12 21:04:09,589 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:04:09,590 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:04:09,590 \tHypothesis: Iye angacite zimenezi mwa kucita zinthu zimene zinacitika m’nthawi ya atumwi .\n2020-07-12 21:04:09,590 Example #2\n2020-07-12 21:04:09,591 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:04:09,591 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:04:09,591 \tHypothesis: Kodi Mose anacita ciani pa nthawi ya atumwi ?\n2020-07-12 21:04:09,591 Example #3\n2020-07-12 21:04:09,591 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:04:09,592 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:04:09,592 \tHypothesis: Kudziŵa Coonadi Conse Conse Conse Coonadi Conse , July\n2020-07-12 21:04:09,592 Validation result (greedy) at epoch 13, step 7000: bleu: 9.08, loss: 63526.4961, ppl: 12.9653, duration: 23.7224s\n2020-07-12 21:04:28,101 Epoch 13 Step: 7100 Batch Loss: 2.530882 Tokens per Sec: 11105, Lr: 0.000300\n2020-07-12 21:04:46,627 Epoch 13 Step: 7200 Batch Loss: 2.307031 Tokens per Sec: 11074, Lr: 0.000300\n2020-07-12 21:05:04,913 Epoch 13 Step: 7300 Batch Loss: 2.874648 Tokens per Sec: 11324, Lr: 0.000300\n2020-07-12 21:05:23,442 Epoch 13 Step: 7400 Batch Loss: 2.225077 Tokens per Sec: 10968, Lr: 0.000300\n2020-07-12 21:05:41,946 Epoch 13 Step: 7500 Batch Loss: 2.328564 Tokens per Sec: 11249, Lr: 0.000300\n2020-07-12 21:05:52,458 Epoch 13: total training loss 1513.63\n2020-07-12 21:05:52,458 EPOCH 14\n2020-07-12 21:06:00,668 Epoch 14 Step: 7600 Batch Loss: 2.770868 Tokens per Sec: 11209, Lr: 0.000300\n2020-07-12 21:06:19,075 Epoch 14 Step: 7700 Batch Loss: 2.845640 Tokens per Sec: 11402, Lr: 0.000300\n2020-07-12 21:06:37,491 Epoch 14 Step: 7800 Batch Loss: 2.488195 Tokens per Sec: 11250, Lr: 0.000300\n2020-07-12 21:06:55,905 Epoch 14 Step: 7900 Batch Loss: 2.452046 Tokens per Sec: 11275, Lr: 0.000300\n2020-07-12 21:07:14,267 Epoch 14 Step: 8000 Batch Loss: 2.044750 Tokens per Sec: 11336, Lr: 0.000300\n2020-07-12 21:07:32,905 Hooray! New best validation result [ppl]!\n2020-07-12 21:07:32,905 Saving new checkpoint.\n2020-07-12 21:07:34,103 Example #0\n2020-07-12 21:07:34,104 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:07:34,104 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:07:34,104 \tHypothesis: Anthu athu amene anatilamula kuti tikasokoneze dziko lonse\n2020-07-12 21:07:34,104 Example #1\n2020-07-12 21:07:34,105 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:07:34,105 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:07:34,105 \tHypothesis: Iye angacite zimenezo mwa kucita zinthu zimene zinacitika kwa zaka zambili .\n2020-07-12 21:07:34,105 Example #2\n2020-07-12 21:07:34,105 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:07:34,106 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:07:34,106 \tHypothesis: Kodi Mose anacita ciani pa lemba la 2 Timoteyo 2 : 19 ?\n2020-07-12 21:07:34,106 Example #3\n2020-07-12 21:07:34,106 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:07:34,107 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:07:34,107 \tHypothesis: Kuphunzila Cikondi Catsopano Catsopano Conse Cimalimba , July\n2020-07-12 21:07:34,107 Validation result (greedy) at epoch 14, step 8000: bleu: 9.82, loss: 61911.1172, ppl: 12.1475, duration: 19.8398s\n2020-07-12 21:07:52,888 Epoch 14 Step: 8100 Batch Loss: 1.988333 Tokens per Sec: 10812, Lr: 0.000300\n2020-07-12 21:07:59,991 Epoch 14: total training loss 1482.87\n2020-07-12 21:07:59,991 EPOCH 15\n2020-07-12 21:08:11,604 Epoch 15 Step: 8200 Batch Loss: 2.349521 Tokens per Sec: 11173, Lr: 0.000300\n2020-07-12 21:08:30,140 Epoch 15 Step: 8300 Batch Loss: 2.845519 Tokens per Sec: 11171, Lr: 0.000300\n2020-07-12 21:08:48,724 Epoch 15 Step: 8400 Batch Loss: 2.336199 Tokens per Sec: 11297, Lr: 0.000300\n2020-07-12 21:09:07,205 Epoch 15 Step: 8500 Batch Loss: 2.768113 Tokens per Sec: 11375, Lr: 0.000300\n2020-07-12 21:09:25,565 Epoch 15 Step: 8600 Batch Loss: 2.474854 Tokens per Sec: 10940, Lr: 0.000300\n2020-07-12 21:09:43,770 Epoch 15 Step: 8700 Batch Loss: 2.519118 Tokens per Sec: 11164, Lr: 0.000300\n2020-07-12 21:09:47,738 Epoch 15: total training loss 1457.46\n2020-07-12 21:09:47,739 EPOCH 16\n2020-07-12 21:10:02,372 Epoch 16 Step: 8800 Batch Loss: 1.913652 Tokens per Sec: 11166, Lr: 0.000300\n2020-07-12 21:10:20,707 Epoch 16 Step: 8900 Batch Loss: 2.514320 Tokens per Sec: 11140, Lr: 0.000300\n2020-07-12 21:10:39,333 Epoch 16 Step: 9000 Batch Loss: 1.852291 Tokens per Sec: 11105, Lr: 0.000300\n2020-07-12 21:11:06,572 Hooray! New best validation result [ppl]!\n2020-07-12 21:11:06,573 Saving new checkpoint.\n2020-07-12 21:11:08,349 Example #0\n2020-07-12 21:11:08,349 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:11:08,350 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:11:08,350 \tHypothesis: Tinali kugaŵila ku mbali ya anthu amene anatilimbikitsa kuti tikapulumuke m’dziko lathu\n2020-07-12 21:11:08,350 Example #1\n2020-07-12 21:11:08,350 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:11:08,350 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:11:08,350 \tHypothesis: Iye akanakhala ndi maganizo ake pa zinthu zimene anacita , ngakhale kuti anali kucita zaka zambili .\n2020-07-12 21:11:08,351 Example #2\n2020-07-12 21:11:08,351 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:11:08,351 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:11:08,351 \tHypothesis: Kodi Mose anali kuganizila ciani ponena za mau a Paulo a m’mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:11:08,351 Example #3\n2020-07-12 21:11:08,352 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:11:08,352 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:11:08,352 \tHypothesis: Kukhulupilila Moyo Wosaoneka , July\n2020-07-12 21:11:08,352 Validation result (greedy) at epoch 16, step 9000: bleu: 10.86, loss: 60281.3047, ppl: 11.3746, duration: 29.0181s\n2020-07-12 21:11:26,775 Epoch 16 Step: 9100 Batch Loss: 2.761579 Tokens per Sec: 11098, Lr: 0.000300\n2020-07-12 21:11:45,171 Epoch 16 Step: 9200 Batch Loss: 2.247161 Tokens per Sec: 11195, Lr: 0.000300\n2020-07-12 21:12:03,743 Epoch 16 Step: 9300 Batch Loss: 2.698262 Tokens per Sec: 11143, Lr: 0.000300\n2020-07-12 21:12:05,049 Epoch 16: total training loss 1439.43\n2020-07-12 21:12:05,049 EPOCH 17\n2020-07-12 21:12:22,048 Epoch 17 Step: 9400 Batch Loss: 2.614497 Tokens per Sec: 10893, Lr: 0.000300\n2020-07-12 21:12:40,624 Epoch 17 Step: 9500 Batch Loss: 2.584436 Tokens per Sec: 11356, Lr: 0.000300\n2020-07-12 21:12:59,127 Epoch 17 Step: 9600 Batch Loss: 2.364741 Tokens per Sec: 11265, Lr: 0.000300\n2020-07-12 21:13:17,584 Epoch 17 Step: 9700 Batch Loss: 2.226366 Tokens per Sec: 11219, Lr: 0.000300\n2020-07-12 21:13:36,308 Epoch 17 Step: 9800 Batch Loss: 2.059721 Tokens per Sec: 11340, Lr: 0.000300\n2020-07-12 21:13:52,792 Epoch 17: total training loss 1409.13\n2020-07-12 21:13:52,793 EPOCH 18\n2020-07-12 21:13:54,731 Epoch 18 Step: 9900 Batch Loss: 2.092695 Tokens per Sec: 11694, Lr: 0.000300\n2020-07-12 21:14:13,348 Epoch 18 Step: 10000 Batch Loss: 2.355742 Tokens per Sec: 11207, Lr: 0.000300\n2020-07-12 21:14:46,358 Hooray! New best validation result [ppl]!\n2020-07-12 21:14:46,358 Saving new checkpoint.\n2020-07-12 21:14:47,559 Example #0\n2020-07-12 21:14:47,560 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:14:47,560 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:14:47,560 \tHypothesis: Tinali kufunitsitsa kutengako mbali m’madela ambili m’dziko lathu\n2020-07-12 21:14:47,560 Example #1\n2020-07-12 21:14:47,560 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:14:47,561 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:14:47,561 \tHypothesis: Iye akanaika maganizo ake pa zinthu zina zimene anali kucita , ngakhale kuti anali kucita zinthu zambili .\n2020-07-12 21:14:47,561 Example #2\n2020-07-12 21:14:47,561 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:14:47,561 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:14:47,561 \tHypothesis: Kodi zocitika za Mose zinathandiza bwanji Timoteyo kukhala na mau a Paulo pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:14:47,562 Example #3\n2020-07-12 21:14:47,562 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:14:47,562 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:14:47,562 \tHypothesis: Kugwila Nchito Yopambana Kupilila Kupilila , July\n2020-07-12 21:14:47,562 Validation result (greedy) at epoch 18, step 10000: bleu: 11.17, loss: 58961.6133, ppl: 10.7850, duration: 34.2140s\n2020-07-12 21:15:06,506 Epoch 18 Step: 10100 Batch Loss: 2.428898 Tokens per Sec: 11052, Lr: 0.000300\n2020-07-12 21:15:25,191 Epoch 18 Step: 10200 Batch Loss: 2.221598 Tokens per Sec: 11326, Lr: 0.000300\n2020-07-12 21:15:43,883 Epoch 18 Step: 10300 Batch Loss: 2.580553 Tokens per Sec: 11107, Lr: 0.000300\n2020-07-12 21:16:02,403 Epoch 18 Step: 10400 Batch Loss: 2.540907 Tokens per Sec: 11155, Lr: 0.000300\n2020-07-12 21:16:15,084 Epoch 18: total training loss 1378.97\n2020-07-12 21:16:15,084 EPOCH 19\n2020-07-12 21:16:20,903 Epoch 19 Step: 10500 Batch Loss: 1.792645 Tokens per Sec: 10919, Lr: 0.000300\n2020-07-12 21:16:39,369 Epoch 19 Step: 10600 Batch Loss: 2.224898 Tokens per Sec: 11440, Lr: 0.000300\n2020-07-12 21:16:57,628 Epoch 19 Step: 10700 Batch Loss: 1.972942 Tokens per Sec: 11074, Lr: 0.000300\n2020-07-12 21:17:16,274 Epoch 19 Step: 10800 Batch Loss: 2.240098 Tokens per Sec: 11328, Lr: 0.000300\n2020-07-12 21:17:34,932 Epoch 19 Step: 10900 Batch Loss: 1.967205 Tokens per Sec: 11344, Lr: 0.000300\n2020-07-12 21:17:53,616 Epoch 19 Step: 11000 Batch Loss: 2.066128 Tokens per Sec: 11285, Lr: 0.000300\n2020-07-12 21:18:15,331 Hooray! New best validation result [ppl]!\n2020-07-12 21:18:15,331 Saving new checkpoint.\n2020-07-12 21:18:16,431 Example #0\n2020-07-12 21:18:16,432 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:18:16,432 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:18:16,432 \tHypothesis: Pali pano , tinayamba kusamalila dziko lonse\n2020-07-12 21:18:16,432 Example #1\n2020-07-12 21:18:16,433 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:18:16,433 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:18:16,433 \tHypothesis: Iye angafune kuti akambilane za zinthu zimene zinacitika m’zaka zambili , ngakhale kuti anali ndi zaka zambili .\n2020-07-12 21:18:16,433 Example #2\n2020-07-12 21:18:16,434 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:18:16,434 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:18:16,434 \tHypothesis: Kodi Mose anali kuganizila ciani pa lemba la 2 Timoteyo 2 : 19 ?\n2020-07-12 21:18:16,434 Example #3\n2020-07-12 21:18:16,435 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:18:16,435 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:18:16,435 \tHypothesis: Kucokela kwa Pabanja , July\n2020-07-12 21:18:16,435 Validation result (greedy) at epoch 19, step 11000: bleu: 11.81, loss: 57576.3789, ppl: 10.1989, duration: 22.8182s\n2020-07-12 21:18:25,562 Epoch 19: total training loss 1364.70\n2020-07-12 21:18:25,562 EPOCH 20\n2020-07-12 21:18:34,940 Epoch 20 Step: 11100 Batch Loss: 2.185124 Tokens per Sec: 11070, Lr: 0.000300\n2020-07-12 21:18:53,094 Epoch 20 Step: 11200 Batch Loss: 2.498385 Tokens per Sec: 11016, Lr: 0.000300\n2020-07-12 21:19:11,695 Epoch 20 Step: 11300 Batch Loss: 2.564159 Tokens per Sec: 11289, Lr: 0.000300\n2020-07-12 21:19:30,054 Epoch 20 Step: 11400 Batch Loss: 2.303161 Tokens per Sec: 11185, Lr: 0.000300\n2020-07-12 21:19:48,614 Epoch 20 Step: 11500 Batch Loss: 2.259850 Tokens per Sec: 11408, Lr: 0.000300\n2020-07-12 21:20:06,981 Epoch 20 Step: 11600 Batch Loss: 2.186822 Tokens per Sec: 11156, Lr: 0.000300\n2020-07-12 21:20:13,477 Epoch 20: total training loss 1357.13\n2020-07-12 21:20:13,478 EPOCH 21\n2020-07-12 21:20:25,606 Epoch 21 Step: 11700 Batch Loss: 2.087318 Tokens per Sec: 11080, Lr: 0.000300\n2020-07-12 21:20:44,269 Epoch 21 Step: 11800 Batch Loss: 1.906124 Tokens per Sec: 11319, Lr: 0.000300\n2020-07-12 21:21:02,649 Epoch 21 Step: 11900 Batch Loss: 2.099954 Tokens per Sec: 11268, Lr: 0.000300\n2020-07-12 21:21:21,091 Epoch 21 Step: 12000 Batch Loss: 2.294059 Tokens per Sec: 11470, Lr: 0.000300\n2020-07-12 21:21:52,392 Hooray! New best validation result [ppl]!\n2020-07-12 21:21:52,393 Saving new checkpoint.\n2020-07-12 21:21:53,628 Example #0\n2020-07-12 21:21:53,629 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:21:53,629 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:21:53,629 \tHypothesis: Tinali kugona katundu wathu pa malo athu amene tinali kukumana nao m’dziko lonse\n2020-07-12 21:21:53,629 Example #1\n2020-07-12 21:21:53,630 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:21:53,630 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:21:53,630 \tHypothesis: Iye angafunike kucita zinthu zimene anacita kwa zaka zambili .\n2020-07-12 21:21:53,630 Example #2\n2020-07-12 21:21:53,630 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:21:53,630 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:21:53,631 \tHypothesis: Kodi zocitika za Mose zinasonkhezela mau a Paulo pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:21:53,631 Example #3\n2020-07-12 21:21:53,631 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:21:53,631 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:21:53,631 \tHypothesis: Tili na Ciyembekezo Copambana , July\n2020-07-12 21:21:53,632 Validation result (greedy) at epoch 21, step 12000: bleu: 12.65, loss: 56646.6211, ppl: 9.8236, duration: 32.5401s\n2020-07-12 21:22:12,422 Epoch 21 Step: 12100 Batch Loss: 2.145493 Tokens per Sec: 11082, Lr: 0.000300\n2020-07-12 21:22:30,791 Epoch 21 Step: 12200 Batch Loss: 2.475222 Tokens per Sec: 11296, Lr: 0.000300\n2020-07-12 21:22:33,189 Epoch 21: total training loss 1322.48\n2020-07-12 21:22:33,190 EPOCH 22\n2020-07-12 21:22:49,387 Epoch 22 Step: 12300 Batch Loss: 2.111602 Tokens per Sec: 11117, Lr: 0.000300\n2020-07-12 21:23:07,757 Epoch 22 Step: 12400 Batch Loss: 2.236841 Tokens per Sec: 11236, Lr: 0.000300\n2020-07-12 21:23:26,279 Epoch 22 Step: 12500 Batch Loss: 2.356989 Tokens per Sec: 11113, Lr: 0.000300\n2020-07-12 21:23:44,949 Epoch 22 Step: 12600 Batch Loss: 2.198585 Tokens per Sec: 11457, Lr: 0.000300\n2020-07-12 21:24:03,276 Epoch 22 Step: 12700 Batch Loss: 2.277119 Tokens per Sec: 11271, Lr: 0.000300\n2020-07-12 21:24:20,559 Epoch 22: total training loss 1309.67\n2020-07-12 21:24:20,559 EPOCH 23\n2020-07-12 21:24:21,723 Epoch 23 Step: 12800 Batch Loss: 2.211040 Tokens per Sec: 11522, Lr: 0.000300\n2020-07-12 21:24:39,921 Epoch 23 Step: 12900 Batch Loss: 2.081254 Tokens per Sec: 10983, Lr: 0.000300\n2020-07-12 21:24:58,378 Epoch 23 Step: 13000 Batch Loss: 2.620756 Tokens per Sec: 11204, Lr: 0.000300\n2020-07-12 21:25:24,313 Hooray! New best validation result [ppl]!\n2020-07-12 21:25:24,314 Saving new checkpoint.\n2020-07-12 21:25:25,485 Example #0\n2020-07-12 21:25:25,486 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:25:25,486 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:25:25,486 \tHypothesis: Anthu athu anali kutilimbikitsa kwambili padziko lonse lapansi\n2020-07-12 21:25:25,487 Example #1\n2020-07-12 21:25:25,487 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:25:25,487 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:25:25,487 \tHypothesis: Iye angaganize kuti anali ndi vuto laciwelewele , ngakhale kuti zaka zambili zapitazo .\n2020-07-12 21:25:25,487 Example #2\n2020-07-12 21:25:25,488 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:25:25,488 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:25:25,488 \tHypothesis: Kodi zocitika za Mose zinathandiza bwanji masiku ano ?\n2020-07-12 21:25:25,488 Example #3\n2020-07-12 21:25:25,488 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:25:25,489 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:25:25,489 \tHypothesis: Ndani Kuti Kuti Kuti Kuti Mumulile Uthenga Wabwino , July\n2020-07-12 21:25:25,489 Validation result (greedy) at epoch 23, step 13000: bleu: 13.00, loss: 55712.6133, ppl: 9.4604, duration: 27.1106s\n2020-07-12 21:25:44,251 Epoch 23 Step: 13100 Batch Loss: 2.018826 Tokens per Sec: 11284, Lr: 0.000300\n2020-07-12 21:26:02,419 Epoch 23 Step: 13200 Batch Loss: 2.134311 Tokens per Sec: 11186, Lr: 0.000300\n2020-07-12 21:26:21,019 Epoch 23 Step: 13300 Batch Loss: 2.013390 Tokens per Sec: 11224, Lr: 0.000300\n2020-07-12 21:26:35,650 Epoch 23: total training loss 1304.75\n2020-07-12 21:26:35,651 EPOCH 24\n2020-07-12 21:26:39,568 Epoch 24 Step: 13400 Batch Loss: 1.963058 Tokens per Sec: 10727, Lr: 0.000300\n2020-07-12 21:26:58,041 Epoch 24 Step: 13500 Batch Loss: 2.269552 Tokens per Sec: 11266, Lr: 0.000300\n2020-07-12 21:27:16,574 Epoch 24 Step: 13600 Batch Loss: 1.387344 Tokens per Sec: 11366, Lr: 0.000300\n2020-07-12 21:27:35,089 Epoch 24 Step: 13700 Batch Loss: 1.838774 Tokens per Sec: 11199, Lr: 0.000300\n2020-07-12 21:27:53,492 Epoch 24 Step: 13800 Batch Loss: 2.099367 Tokens per Sec: 11160, Lr: 0.000300\n2020-07-12 21:28:11,929 Epoch 24 Step: 13900 Batch Loss: 2.407192 Tokens per Sec: 11325, Lr: 0.000300\n2020-07-12 21:28:23,198 Epoch 24: total training loss 1284.61\n2020-07-12 21:28:23,198 EPOCH 25\n2020-07-12 21:28:30,374 Epoch 25 Step: 14000 Batch Loss: 2.420966 Tokens per Sec: 11092, Lr: 0.000300\n2020-07-12 21:28:59,782 Hooray! New best validation result [ppl]!\n2020-07-12 21:28:59,782 Saving new checkpoint.\n2020-07-12 21:29:01,000 Example #0\n2020-07-12 21:29:01,001 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:29:01,001 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:29:01,001 \tHypothesis: Tinali kulalikila m’madela athu amene tinali kutilengamo mbali m’madela akutali\n2020-07-12 21:29:01,002 Example #1\n2020-07-12 21:29:01,002 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:29:01,002 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:29:01,003 \tHypothesis: Iye angafune kuti aziimba mlandu cifukwa ca zimene anacita kwa zaka zambili .\n2020-07-12 21:29:01,003 Example #2\n2020-07-12 21:29:01,003 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:29:01,003 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:29:01,003 \tHypothesis: Kodi zimene Mose anacita pa zocitika za m’nthawi ya Mose zimasonkhezela mau a Paulo pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:29:01,004 Example #3\n2020-07-12 21:29:01,004 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:29:01,004 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:29:01,004 \tHypothesis: Tizisangalala Kudziŵa Coonadi Cimene Cimapatsa , July\n2020-07-12 21:29:01,005 Validation result (greedy) at epoch 25, step 14000: bleu: 13.34, loss: 54857.1328, ppl: 9.1395, duration: 30.6306s\n2020-07-12 21:29:19,687 Epoch 25 Step: 14100 Batch Loss: 2.246745 Tokens per Sec: 11063, Lr: 0.000300\n2020-07-12 21:29:38,428 Epoch 25 Step: 14200 Batch Loss: 2.616163 Tokens per Sec: 11016, Lr: 0.000300\n2020-07-12 21:29:57,088 Epoch 25 Step: 14300 Batch Loss: 2.052552 Tokens per Sec: 11069, Lr: 0.000300\n2020-07-12 21:30:15,744 Epoch 25 Step: 14400 Batch Loss: 2.588664 Tokens per Sec: 11401, Lr: 0.000300\n2020-07-12 21:30:34,159 Epoch 25 Step: 14500 Batch Loss: 2.537948 Tokens per Sec: 11180, Lr: 0.000300\n2020-07-12 21:30:41,916 Epoch 25: total training loss 1265.02\n2020-07-12 21:30:41,916 EPOCH 26\n2020-07-12 21:30:52,745 Epoch 26 Step: 14600 Batch Loss: 2.163159 Tokens per Sec: 11315, Lr: 0.000300\n2020-07-12 21:31:11,302 Epoch 26 Step: 14700 Batch Loss: 2.121996 Tokens per Sec: 11139, Lr: 0.000300\n2020-07-12 21:31:29,805 Epoch 26 Step: 14800 Batch Loss: 2.149545 Tokens per Sec: 11109, Lr: 0.000300\n2020-07-12 21:31:48,487 Epoch 26 Step: 14900 Batch Loss: 2.132342 Tokens per Sec: 11199, Lr: 0.000300\n2020-07-12 21:32:07,100 Epoch 26 Step: 15000 Batch Loss: 2.116457 Tokens per Sec: 11188, Lr: 0.000300\n2020-07-12 21:32:29,659 Hooray! New best validation result [ppl]!\n2020-07-12 21:32:29,659 Saving new checkpoint.\n2020-07-12 21:32:30,833 Example #0\n2020-07-12 21:32:30,834 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:32:30,834 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:32:30,834 \tHypothesis: Tinali kugula zakudya za anthu amene tinali kutitenga dziko lapansi\n2020-07-12 21:32:30,835 Example #1\n2020-07-12 21:32:30,836 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:32:30,836 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:32:30,836 \tHypothesis: Iye anali ndi maganizo olakwika amene anali nao kale , ngakhale zaka zambili .\n2020-07-12 21:32:30,837 Example #2\n2020-07-12 21:32:30,837 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:32:30,837 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:32:30,837 \tHypothesis: Kodi mau a Mose a m’nthawi ya Mose anali kuimila ciani ?\n2020-07-12 21:32:30,837 Example #3\n2020-07-12 21:32:30,838 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:32:30,838 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:32:30,838 \tHypothesis: Tili ndi Ciyembekezo Cabwino , July\n2020-07-12 21:32:30,838 Validation result (greedy) at epoch 26, step 15000: bleu: 13.85, loss: 53885.8867, ppl: 8.7884, duration: 23.7381s\n2020-07-12 21:32:49,432 Epoch 26 Step: 15100 Batch Loss: 2.359765 Tokens per Sec: 11129, Lr: 0.000300\n2020-07-12 21:32:53,707 Epoch 26: total training loss 1251.62\n2020-07-12 21:32:53,707 EPOCH 27\n2020-07-12 21:33:07,862 Epoch 27 Step: 15200 Batch Loss: 1.612871 Tokens per Sec: 11248, Lr: 0.000300\n2020-07-12 21:33:26,770 Epoch 27 Step: 15300 Batch Loss: 2.410092 Tokens per Sec: 11272, Lr: 0.000300\n2020-07-12 21:33:45,135 Epoch 27 Step: 15400 Batch Loss: 2.515933 Tokens per Sec: 11132, Lr: 0.000300\n2020-07-12 21:34:03,330 Epoch 27 Step: 15500 Batch Loss: 2.067173 Tokens per Sec: 11112, Lr: 0.000300\n2020-07-12 21:34:21,547 Epoch 27 Step: 15600 Batch Loss: 2.100986 Tokens per Sec: 11403, Lr: 0.000300\n2020-07-12 21:34:39,988 Epoch 27 Step: 15700 Batch Loss: 2.350737 Tokens per Sec: 11228, Lr: 0.000300\n2020-07-12 21:34:41,111 Epoch 27: total training loss 1242.77\n2020-07-12 21:34:41,112 EPOCH 28\n2020-07-12 21:34:58,730 Epoch 28 Step: 15800 Batch Loss: 1.252520 Tokens per Sec: 11290, Lr: 0.000300\n2020-07-12 21:35:17,218 Epoch 28 Step: 15900 Batch Loss: 2.261224 Tokens per Sec: 11266, Lr: 0.000300\n2020-07-12 21:35:35,839 Epoch 28 Step: 16000 Batch Loss: 2.414489 Tokens per Sec: 11158, Lr: 0.000300\n2020-07-12 21:36:04,598 Hooray! New best validation result [ppl]!\n2020-07-12 21:36:04,599 Saving new checkpoint.\n2020-07-12 21:36:05,824 Example #0\n2020-07-12 21:36:05,825 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:36:05,825 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:36:05,825 \tHypothesis: Tinali kusiyana kwambili ndi anthu a m’dzikoli\n2020-07-12 21:36:05,825 Example #1\n2020-07-12 21:36:05,826 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:36:05,826 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:36:05,826 \tHypothesis: Iye angaganize kuti anali ndi vuto laciwelewele , ndipo anali ndi zaka zambili .\n2020-07-12 21:36:05,826 Example #2\n2020-07-12 21:36:05,827 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:36:05,827 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:36:05,827 \tHypothesis: Kodi zocitika za m’nthawi ya Mose zinasonyeza ciani pa mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:36:05,827 Example #3\n2020-07-12 21:36:05,828 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:36:05,828 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:36:05,828 \tHypothesis: Tili ndi Ciyembekezo Cimene Cimene Cimatipatsa , July\n2020-07-12 21:36:05,828 Validation result (greedy) at epoch 28, step 16000: bleu: 14.68, loss: 53204.4258, ppl: 8.5501, duration: 29.9881s\n2020-07-12 21:36:25,203 Epoch 28 Step: 16100 Batch Loss: 2.462816 Tokens per Sec: 10942, Lr: 0.000300\n2020-07-12 21:36:43,579 Epoch 28 Step: 16200 Batch Loss: 2.299822 Tokens per Sec: 11067, Lr: 0.000300\n2020-07-12 21:36:59,264 Epoch 28: total training loss 1222.03\n2020-07-12 21:36:59,264 EPOCH 29\n2020-07-12 21:37:02,248 Epoch 29 Step: 16300 Batch Loss: 1.654314 Tokens per Sec: 10759, Lr: 0.000300\n2020-07-12 21:37:20,690 Epoch 29 Step: 16400 Batch Loss: 2.241484 Tokens per Sec: 11366, Lr: 0.000300\n2020-07-12 21:37:39,530 Epoch 29 Step: 16500 Batch Loss: 2.279921 Tokens per Sec: 10965, Lr: 0.000300\n2020-07-12 21:37:58,074 Epoch 29 Step: 16600 Batch Loss: 2.222358 Tokens per Sec: 11352, Lr: 0.000300\n2020-07-12 21:38:16,390 Epoch 29 Step: 16700 Batch Loss: 2.419949 Tokens per Sec: 11164, Lr: 0.000300\n2020-07-12 21:38:34,734 Epoch 29 Step: 16800 Batch Loss: 2.223499 Tokens per Sec: 11140, Lr: 0.000300\n2020-07-12 21:38:47,135 Epoch 29: total training loss 1219.28\n2020-07-12 21:38:47,135 EPOCH 30\n2020-07-12 21:38:53,392 Epoch 30 Step: 16900 Batch Loss: 1.924732 Tokens per Sec: 11642, Lr: 0.000300\n2020-07-12 21:39:11,811 Epoch 30 Step: 17000 Batch Loss: 2.250193 Tokens per Sec: 11172, Lr: 0.000300\n2020-07-12 21:39:32,606 Example #0\n2020-07-12 21:39:32,607 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:39:32,607 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:39:32,607 \tHypothesis: Pokhala anthu amene tinali kusonkhana ndi anthu amene tinali kutitenga padziko lonse lapansi\n2020-07-12 21:39:32,608 Example #1\n2020-07-12 21:39:32,608 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:39:32,608 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:39:32,608 \tHypothesis: Iye angaone kuti anali ndi vuto loipa kwambili kwa zaka zambili .\n2020-07-12 21:39:32,608 Example #2\n2020-07-12 21:39:32,609 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:39:32,609 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:39:32,609 \tHypothesis: Ndi zocitika ziti zimene Mose anapeleka pamene mau a Paulo analembedwa pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:39:32,609 Example #3\n2020-07-12 21:39:32,610 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:39:32,610 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:39:32,610 \tHypothesis: Palibe Cikondi Cimene Cimene Cisomo Cili pa July\n2020-07-12 21:39:32,610 Validation result (greedy) at epoch 30, step 17000: bleu: 14.18, loss: 53287.4102, ppl: 8.5788, duration: 20.7991s\n2020-07-12 21:39:51,069 Epoch 30 Step: 17100 Batch Loss: 1.996659 Tokens per Sec: 11078, Lr: 0.000300\n2020-07-12 21:40:09,724 Epoch 30 Step: 17200 Batch Loss: 2.441917 Tokens per Sec: 11321, Lr: 0.000300\n2020-07-12 21:40:28,184 Epoch 30 Step: 17300 Batch Loss: 2.245591 Tokens per Sec: 11227, Lr: 0.000300\n2020-07-12 21:40:46,651 Epoch 30 Step: 17400 Batch Loss: 1.991428 Tokens per Sec: 11026, Lr: 0.000300\n2020-07-12 21:40:55,549 Epoch 30: total training loss 1207.48\n2020-07-12 21:40:55,550 EPOCH 31\n2020-07-12 21:41:05,291 Epoch 31 Step: 17500 Batch Loss: 1.878195 Tokens per Sec: 11394, Lr: 0.000300\n2020-07-12 21:41:23,650 Epoch 31 Step: 17600 Batch Loss: 2.384914 Tokens per Sec: 11117, Lr: 0.000300\n2020-07-12 21:41:42,167 Epoch 31 Step: 17700 Batch Loss: 2.224703 Tokens per Sec: 11077, Lr: 0.000300\n2020-07-12 21:42:00,597 Epoch 31 Step: 17800 Batch Loss: 2.243607 Tokens per Sec: 11203, Lr: 0.000300\n2020-07-12 21:42:19,408 Epoch 31 Step: 17900 Batch Loss: 2.156587 Tokens per Sec: 11277, Lr: 0.000300\n2020-07-12 21:42:37,706 Epoch 31 Step: 18000 Batch Loss: 1.582895 Tokens per Sec: 11132, Lr: 0.000300\n2020-07-12 21:43:06,375 Hooray! New best validation result [ppl]!\n2020-07-12 21:43:06,375 Saving new checkpoint.\n2020-07-12 21:43:07,651 Example #0\n2020-07-12 21:43:07,651 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:43:07,652 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:43:07,652 \tHypothesis: Tinali kugaŵila anthu ambili ku malo osiyanasiyana amene tinali nao padziko lonse\n2020-07-12 21:43:07,652 Example #1\n2020-07-12 21:43:07,652 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:43:07,652 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:43:07,653 \tHypothesis: Iye anali kufunitsitsa kuvulaza zinthu zimene anali kucita , ngakhale zaka zambili .\n2020-07-12 21:43:07,653 Example #2\n2020-07-12 21:43:07,653 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:43:07,653 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:43:07,654 \tHypothesis: Kodi zocitika za m’nthawi ya Mose zinasonyeza ciani pa mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:43:07,654 Example #3\n2020-07-12 21:43:07,654 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:43:07,654 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:43:07,654 \tHypothesis: Tengelani Citsanzo Cake Cisomo Conse , July\n2020-07-12 21:43:07,654 Validation result (greedy) at epoch 31, step 18000: bleu: 14.77, loss: 52131.8594, ppl: 8.1881, duration: 29.9481s\n2020-07-12 21:43:13,475 Epoch 31: total training loss 1202.09\n2020-07-12 21:43:13,476 EPOCH 32\n2020-07-12 21:43:26,535 Epoch 32 Step: 18100 Batch Loss: 2.240680 Tokens per Sec: 10869, Lr: 0.000300\n2020-07-12 21:43:44,600 Epoch 32 Step: 18200 Batch Loss: 1.937984 Tokens per Sec: 11189, Lr: 0.000300\n2020-07-12 21:44:03,105 Epoch 32 Step: 18300 Batch Loss: 1.852296 Tokens per Sec: 11308, Lr: 0.000300\n2020-07-12 21:44:21,722 Epoch 32 Step: 18400 Batch Loss: 2.168491 Tokens per Sec: 11348, Lr: 0.000300\n2020-07-12 21:44:40,145 Epoch 32 Step: 18500 Batch Loss: 2.244599 Tokens per Sec: 11134, Lr: 0.000300\n2020-07-12 21:44:58,643 Epoch 32 Step: 18600 Batch Loss: 2.211453 Tokens per Sec: 11243, Lr: 0.000300\n2020-07-12 21:45:01,378 Epoch 32: total training loss 1191.61\n2020-07-12 21:45:01,379 EPOCH 33\n2020-07-12 21:45:17,130 Epoch 33 Step: 18700 Batch Loss: 2.181301 Tokens per Sec: 11121, Lr: 0.000300\n2020-07-12 21:45:35,671 Epoch 33 Step: 18800 Batch Loss: 2.112555 Tokens per Sec: 11139, Lr: 0.000300\n2020-07-12 21:45:54,058 Epoch 33 Step: 18900 Batch Loss: 1.560102 Tokens per Sec: 11092, Lr: 0.000300\n2020-07-12 21:46:12,215 Epoch 33 Step: 19000 Batch Loss: 2.184921 Tokens per Sec: 11260, Lr: 0.000300\n2020-07-12 21:46:31,492 Hooray! New best validation result [ppl]!\n2020-07-12 21:46:31,492 Saving new checkpoint.\n2020-07-12 21:46:32,684 Example #0\n2020-07-12 21:46:32,685 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:46:32,685 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:46:32,685 \tHypothesis: Tinali kugaŵila mabuku athu pa sitima ya anthu amene tinali kutilengako mbali m’dzikolo\n2020-07-12 21:46:32,685 Example #1\n2020-07-12 21:46:32,686 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:46:32,686 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:46:32,686 \tHypothesis: Iye anali kufunitsitsa kuteteza zinthu zina zimene anali kucita m’nthawi yakale , ngakhale kuti zaka zambili zapitazo .\n2020-07-12 21:46:32,686 Example #2\n2020-07-12 21:46:32,686 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:46:32,687 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:46:32,687 \tHypothesis: Kodi zocitika za m’nthawi ya Mose zimasonkhezela mau a Paulo pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:46:32,687 Example #3\n2020-07-12 21:46:32,687 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:46:32,687 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:46:32,688 \tHypothesis: Tengelani Cikondi Cosatha , July\n2020-07-12 21:46:32,688 Validation result (greedy) at epoch 33, step 19000: bleu: 15.43, loss: 52101.8984, ppl: 8.1782, duration: 20.4726s\n2020-07-12 21:46:51,651 Epoch 33 Step: 19100 Batch Loss: 2.147182 Tokens per Sec: 11138, Lr: 0.000300\n2020-07-12 21:47:09,754 Epoch 33: total training loss 1180.51\n2020-07-12 21:47:09,754 EPOCH 34\n2020-07-12 21:47:10,167 Epoch 34 Step: 19200 Batch Loss: 1.254606 Tokens per Sec: 7697, Lr: 0.000300\n2020-07-12 21:47:28,747 Epoch 34 Step: 19300 Batch Loss: 2.237213 Tokens per Sec: 11272, Lr: 0.000300\n2020-07-12 21:47:47,380 Epoch 34 Step: 19400 Batch Loss: 1.689342 Tokens per Sec: 11192, Lr: 0.000300\n2020-07-12 21:48:05,737 Epoch 34 Step: 19500 Batch Loss: 1.540750 Tokens per Sec: 11147, Lr: 0.000300\n2020-07-12 21:48:24,504 Epoch 34 Step: 19600 Batch Loss: 2.257375 Tokens per Sec: 11127, Lr: 0.000300\n2020-07-12 21:48:42,894 Epoch 34 Step: 19700 Batch Loss: 1.333181 Tokens per Sec: 11175, Lr: 0.000300\n2020-07-12 21:48:57,675 Epoch 34: total training loss 1170.53\n2020-07-12 21:48:57,676 EPOCH 35\n2020-07-12 21:49:01,372 Epoch 35 Step: 19800 Batch Loss: 1.743930 Tokens per Sec: 10966, Lr: 0.000300\n2020-07-12 21:49:19,865 Epoch 35 Step: 19900 Batch Loss: 1.776612 Tokens per Sec: 11287, Lr: 0.000300\n2020-07-12 21:49:38,451 Epoch 35 Step: 20000 Batch Loss: 1.984108 Tokens per Sec: 11249, Lr: 0.000300\n2020-07-12 21:49:56,902 Hooray! New best validation result [ppl]!\n2020-07-12 21:49:56,903 Saving new checkpoint.\n2020-07-12 21:49:58,122 Example #0\n2020-07-12 21:49:58,123 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:49:58,123 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:49:58,123 \tHypothesis: Tinali kugaŵila ku malo osiyanasiyana kuti tikaloŵe m’dziko latsopano\n2020-07-12 21:49:58,123 Example #1\n2020-07-12 21:49:58,124 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:49:58,124 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:49:58,124 \tHypothesis: Iye angaganize kuti anali ndi nkhawa kwambili zokhudza zinthu zimene anacita , ngakhale zaka zambili .\n2020-07-12 21:49:58,124 Example #2\n2020-07-12 21:49:58,125 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:49:58,125 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:49:58,125 \tHypothesis: Kodi mau a Paulo a pa 2 Timoteyo 2 : 19 anali ndi zinthu ziti zimene zinacitika m’nthawi ya Mose ?\n2020-07-12 21:49:58,125 Example #3\n2020-07-12 21:49:58,125 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:49:58,125 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:49:58,126 \tHypothesis: Tili ndi Ciyembekezo Cabwino Cimene Cimatsogolela , July\n2020-07-12 21:49:58,126 Validation result (greedy) at epoch 35, step 20000: bleu: 15.35, loss: 51433.5742, ppl: 7.9607, duration: 19.6741s\n2020-07-12 21:50:17,304 Epoch 35 Step: 20100 Batch Loss: 2.221691 Tokens per Sec: 10735, Lr: 0.000300\n2020-07-12 21:50:35,839 Epoch 35 Step: 20200 Batch Loss: 2.187058 Tokens per Sec: 11372, Lr: 0.000300\n2020-07-12 21:50:54,215 Epoch 35 Step: 20300 Batch Loss: 2.000558 Tokens per Sec: 11096, Lr: 0.000300\n2020-07-12 21:51:05,351 Epoch 35: total training loss 1156.85\n2020-07-12 21:51:05,352 EPOCH 36\n2020-07-12 21:51:12,908 Epoch 36 Step: 20400 Batch Loss: 2.145525 Tokens per Sec: 11354, Lr: 0.000300\n2020-07-12 21:51:31,419 Epoch 36 Step: 20500 Batch Loss: 2.117764 Tokens per Sec: 11099, Lr: 0.000300\n2020-07-12 21:51:49,883 Epoch 36 Step: 20600 Batch Loss: 2.139196 Tokens per Sec: 11148, Lr: 0.000300\n2020-07-12 21:52:08,267 Epoch 36 Step: 20700 Batch Loss: 1.803431 Tokens per Sec: 11275, Lr: 0.000300\n2020-07-12 21:52:26,560 Epoch 36 Step: 20800 Batch Loss: 2.002237 Tokens per Sec: 11100, Lr: 0.000300\n2020-07-12 21:52:44,998 Epoch 36 Step: 20900 Batch Loss: 2.066098 Tokens per Sec: 11177, Lr: 0.000300\n2020-07-12 21:52:53,298 Epoch 36: total training loss 1158.20\n2020-07-12 21:52:53,299 EPOCH 37\n2020-07-12 21:53:03,462 Epoch 37 Step: 21000 Batch Loss: 1.876578 Tokens per Sec: 11149, Lr: 0.000300\n2020-07-12 21:53:22,061 Hooray! New best validation result [ppl]!\n2020-07-12 21:53:22,062 Saving new checkpoint.\n2020-07-12 21:53:23,284 Example #0\n2020-07-12 21:53:23,285 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:53:23,285 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:53:23,285 \tHypothesis: Tinali kugaŵila nchito yoyang’anila dela imene tinali kuyembekezela kudziko lonse\n2020-07-12 21:53:23,285 Example #1\n2020-07-12 21:53:23,286 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:53:23,286 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:53:23,286 \tHypothesis: Iye anali ndi nkhawa kwambili cakuti anali atayamba kale , ngakhale zaka zambili zapitazo .\n2020-07-12 21:53:23,286 Example #2\n2020-07-12 21:53:23,287 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:53:23,287 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:53:23,287 \tHypothesis: Kodi mau a Mose anali pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:53:23,287 Example #3\n2020-07-12 21:53:23,288 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:53:23,288 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:53:23,288 \tHypothesis: Tili na Ciyembekezo Cabwino Cimene Cisomo Cimatsogolela , July\n2020-07-12 21:53:23,288 Validation result (greedy) at epoch 37, step 21000: bleu: 15.83, loss: 51151.2070, ppl: 7.8706, duration: 19.8251s\n2020-07-12 21:53:42,088 Epoch 37 Step: 21100 Batch Loss: 2.115872 Tokens per Sec: 11126, Lr: 0.000300\n2020-07-12 21:54:00,599 Epoch 37 Step: 21200 Batch Loss: 2.029389 Tokens per Sec: 11116, Lr: 0.000300\n2020-07-12 21:54:19,420 Epoch 37 Step: 21300 Batch Loss: 2.030703 Tokens per Sec: 11442, Lr: 0.000300\n2020-07-12 21:54:37,743 Epoch 37 Step: 21400 Batch Loss: 2.162308 Tokens per Sec: 11304, Lr: 0.000300\n2020-07-12 21:54:56,431 Epoch 37 Step: 21500 Batch Loss: 2.328736 Tokens per Sec: 11388, Lr: 0.000300\n2020-07-12 21:55:00,472 Epoch 37: total training loss 1133.69\n2020-07-12 21:55:00,472 EPOCH 38\n2020-07-12 21:55:14,795 Epoch 38 Step: 21600 Batch Loss: 1.523328 Tokens per Sec: 10858, Lr: 0.000300\n2020-07-12 21:55:33,693 Epoch 38 Step: 21700 Batch Loss: 2.188564 Tokens per Sec: 11432, Lr: 0.000300\n2020-07-12 21:55:52,043 Epoch 38 Step: 21800 Batch Loss: 2.258023 Tokens per Sec: 11232, Lr: 0.000300\n2020-07-12 21:56:10,512 Epoch 38 Step: 21900 Batch Loss: 2.049119 Tokens per Sec: 11190, Lr: 0.000300\n2020-07-12 21:56:29,208 Epoch 38 Step: 22000 Batch Loss: 2.075769 Tokens per Sec: 11375, Lr: 0.000300\n2020-07-12 21:56:55,370 Hooray! New best validation result [ppl]!\n2020-07-12 21:56:55,370 Saving new checkpoint.\n2020-07-12 21:56:56,543 Example #0\n2020-07-12 21:56:56,544 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 21:56:56,544 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 21:56:56,544 \tHypothesis: Tinali kusonkhana ndi anthu ambili amene tinali kukhala nao m’dziko lathu\n2020-07-12 21:56:56,544 Example #1\n2020-07-12 21:56:56,545 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 21:56:56,545 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 21:56:56,545 \tHypothesis: Iye anali kufunitsitsa kuteteza zinthu zina zimene anacita , ngakhale zaka zambili zapitazo .\n2020-07-12 21:56:56,545 Example #2\n2020-07-12 21:56:56,545 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 21:56:56,545 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:56:56,546 \tHypothesis: Ndi zocitika ziti zimene zinacitikila Mose pamene mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 21:56:56,546 Example #3\n2020-07-12 21:56:56,546 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 21:56:56,546 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 21:56:56,546 \tHypothesis: Tengelani Ciyembekezo Cabwino Cimene Cili pa Cisomo , July\n2020-07-12 21:56:56,547 Validation result (greedy) at epoch 38, step 22000: bleu: 15.78, loss: 50394.4102, ppl: 7.6340, duration: 27.3318s\n2020-07-12 21:57:15,159 Epoch 38 Step: 22100 Batch Loss: 1.969265 Tokens per Sec: 10923, Lr: 0.000300\n2020-07-12 21:57:15,721 Epoch 38: total training loss 1131.84\n2020-07-12 21:57:15,721 EPOCH 39\n2020-07-12 21:57:33,739 Epoch 39 Step: 22200 Batch Loss: 1.735266 Tokens per Sec: 11283, Lr: 0.000300\n2020-07-12 21:57:52,243 Epoch 39 Step: 22300 Batch Loss: 2.093395 Tokens per Sec: 11086, Lr: 0.000300\n2020-07-12 21:58:10,911 Epoch 39 Step: 22400 Batch Loss: 1.781759 Tokens per Sec: 11207, Lr: 0.000300\n2020-07-12 21:58:29,487 Epoch 39 Step: 22500 Batch Loss: 1.843250 Tokens per Sec: 11430, Lr: 0.000300\n2020-07-12 21:58:48,064 Epoch 39 Step: 22600 Batch Loss: 2.179653 Tokens per Sec: 11112, Lr: 0.000300\n2020-07-12 21:59:03,220 Epoch 39: total training loss 1123.27\n2020-07-12 21:59:03,221 EPOCH 40\n2020-07-12 21:59:06,699 Epoch 40 Step: 22700 Batch Loss: 2.049891 Tokens per Sec: 11025, Lr: 0.000300\n2020-07-12 21:59:24,988 Epoch 40 Step: 22800 Batch Loss: 1.811621 Tokens per Sec: 11263, Lr: 0.000300\n2020-07-12 21:59:43,458 Epoch 40 Step: 22900 Batch Loss: 2.082726 Tokens per Sec: 11251, Lr: 0.000300\n2020-07-12 22:00:01,795 Epoch 40 Step: 23000 Batch Loss: 1.971813 Tokens per Sec: 11260, Lr: 0.000300\n2020-07-12 22:00:23,644 Hooray! New best validation result [ppl]!\n2020-07-12 22:00:23,644 Saving new checkpoint.\n2020-07-12 22:00:24,843 Example #0\n2020-07-12 22:00:24,844 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:00:24,844 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:00:24,844 \tHypothesis: Tinali kusukulu athu ambili amene tinali kutitenga ku dziko lonse\n2020-07-12 22:00:24,844 Example #1\n2020-07-12 22:00:24,845 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:00:24,845 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:00:24,845 \tHypothesis: Mwina iye anali ndi maganizo olakwika ponena za zinthu zimene anacita , ngakhale zaka zambili .\n2020-07-12 22:00:24,846 Example #2\n2020-07-12 22:00:24,846 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:00:24,846 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:00:24,847 \tHypothesis: Ndi zocitika ziti zimene Mose anapeleka pamene Paulo analembedwa pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:00:24,847 Example #3\n2020-07-12 22:00:24,848 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:00:24,848 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:00:24,848 \tHypothesis: Tiziseŵenzetsa Uthenga Wabwino wa Uthenga Wabwino , July\n2020-07-12 22:00:24,848 Validation result (greedy) at epoch 40, step 23000: bleu: 16.34, loss: 50194.8047, ppl: 7.5727, duration: 23.0523s\n2020-07-12 22:00:43,690 Epoch 40 Step: 23100 Batch Loss: 2.083548 Tokens per Sec: 11131, Lr: 0.000300\n2020-07-12 22:01:01,894 Epoch 40 Step: 23200 Batch Loss: 2.167542 Tokens per Sec: 11281, Lr: 0.000300\n2020-07-12 22:01:13,658 Epoch 40: total training loss 1120.55\n2020-07-12 22:01:13,658 EPOCH 41\n2020-07-12 22:01:20,741 Epoch 41 Step: 23300 Batch Loss: 1.966002 Tokens per Sec: 11656, Lr: 0.000300\n2020-07-12 22:01:39,354 Epoch 41 Step: 23400 Batch Loss: 2.233330 Tokens per Sec: 11330, Lr: 0.000300\n2020-07-12 22:01:57,616 Epoch 41 Step: 23500 Batch Loss: 2.065772 Tokens per Sec: 11154, Lr: 0.000300\n2020-07-12 22:02:16,084 Epoch 41 Step: 23600 Batch Loss: 1.971395 Tokens per Sec: 11143, Lr: 0.000300\n2020-07-12 22:02:34,781 Epoch 41 Step: 23700 Batch Loss: 2.025626 Tokens per Sec: 11371, Lr: 0.000300\n2020-07-12 22:02:53,147 Epoch 41 Step: 23800 Batch Loss: 2.111092 Tokens per Sec: 10892, Lr: 0.000300\n2020-07-12 22:03:01,268 Epoch 41: total training loss 1111.13\n2020-07-12 22:03:01,268 EPOCH 42\n2020-07-12 22:03:11,680 Epoch 42 Step: 23900 Batch Loss: 1.773509 Tokens per Sec: 11092, Lr: 0.000300\n2020-07-12 22:03:30,104 Epoch 42 Step: 24000 Batch Loss: 1.953830 Tokens per Sec: 11281, Lr: 0.000300\n2020-07-12 22:03:51,841 Hooray! New best validation result [ppl]!\n2020-07-12 22:03:51,841 Saving new checkpoint.\n2020-07-12 22:03:53,453 Example #0\n2020-07-12 22:03:53,454 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:03:53,454 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:03:53,454 \tHypothesis: Padziko lonse lapansi , anthu amene tinali kusamalila dziko lapansi\n2020-07-12 22:03:53,454 Example #1\n2020-07-12 22:03:53,455 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:03:53,455 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:03:53,455 \tHypothesis: Iye angafunike kukangana ndi munthu amene anacitapo zinthu zakale , ngakhale kuti zaka zambili zapitazo .\n2020-07-12 22:03:53,455 Example #2\n2020-07-12 22:03:53,455 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:03:53,456 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:03:53,456 \tHypothesis: Kodi mau a Paulo a pa 2 Timoteyo 2 : 19 , ati : 19 .\n2020-07-12 22:03:53,456 Example #3\n2020-07-12 22:03:53,456 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:03:53,456 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:03:53,457 \tHypothesis: Tizionetsa Kuti Mmene Mumalandila Cikondi Conse , July\n2020-07-12 22:03:53,457 Validation result (greedy) at epoch 42, step 24000: bleu: 16.58, loss: 50057.7031, ppl: 7.5310, duration: 23.3520s\n2020-07-12 22:04:12,268 Epoch 42 Step: 24100 Batch Loss: 2.251055 Tokens per Sec: 11104, Lr: 0.000300\n2020-07-12 22:04:30,993 Epoch 42 Step: 24200 Batch Loss: 2.124293 Tokens per Sec: 11434, Lr: 0.000300\n2020-07-12 22:04:49,415 Epoch 42 Step: 24300 Batch Loss: 1.837537 Tokens per Sec: 11014, Lr: 0.000300\n2020-07-12 22:05:07,639 Epoch 42 Step: 24400 Batch Loss: 1.755044 Tokens per Sec: 10997, Lr: 0.000300\n2020-07-12 22:05:12,681 Epoch 42: total training loss 1109.49\n2020-07-12 22:05:12,682 EPOCH 43\n2020-07-12 22:05:26,184 Epoch 43 Step: 24500 Batch Loss: 1.948527 Tokens per Sec: 10963, Lr: 0.000300\n2020-07-12 22:05:44,812 Epoch 43 Step: 24600 Batch Loss: 1.826027 Tokens per Sec: 11271, Lr: 0.000300\n2020-07-12 22:06:03,454 Epoch 43 Step: 24700 Batch Loss: 1.973153 Tokens per Sec: 11319, Lr: 0.000300\n2020-07-12 22:06:21,788 Epoch 43 Step: 24800 Batch Loss: 1.975532 Tokens per Sec: 11232, Lr: 0.000300\n2020-07-12 22:06:40,262 Epoch 43 Step: 24900 Batch Loss: 1.272901 Tokens per Sec: 11025, Lr: 0.000300\n2020-07-12 22:06:58,732 Epoch 43 Step: 25000 Batch Loss: 2.216368 Tokens per Sec: 11351, Lr: 0.000300\n2020-07-12 22:07:22,825 Hooray! New best validation result [ppl]!\n2020-07-12 22:07:22,825 Saving new checkpoint.\n2020-07-12 22:07:24,023 Example #0\n2020-07-12 22:07:24,024 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:07:24,024 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:07:24,024 \tHypothesis: Tinali kugula makilomita a pa nyumba yathu imene tinali kutitenga padziko lonse lapansi\n2020-07-12 22:07:24,024 Example #1\n2020-07-12 22:07:24,024 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:07:24,025 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:07:24,025 \tHypothesis: Iye angagonje ndi kuteteza zinthu zina zimene anacita , ngakhale zaka zambili zapitazo .\n2020-07-12 22:07:24,025 Example #2\n2020-07-12 22:07:24,025 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:07:24,025 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:07:24,026 \tHypothesis: Kodi mau a Mose a pa 2 Timoteyo 2 : 19 amati ?\n2020-07-12 22:07:24,026 Example #3\n2020-07-12 22:07:24,026 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:07:24,026 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:07:24,026 \tHypothesis: Tiziyamikila Cikondi Cosatha , July\n2020-07-12 22:07:24,027 Validation result (greedy) at epoch 43, step 25000: bleu: 16.62, loss: 49733.6367, ppl: 7.4332, duration: 25.2938s\n2020-07-12 22:07:25,611 Epoch 43: total training loss 1100.98\n2020-07-12 22:07:25,611 EPOCH 44\n2020-07-12 22:07:43,008 Epoch 44 Step: 25100 Batch Loss: 1.881896 Tokens per Sec: 11029, Lr: 0.000300\n2020-07-12 22:08:01,573 Epoch 44 Step: 25200 Batch Loss: 1.957312 Tokens per Sec: 11001, Lr: 0.000300\n2020-07-12 22:08:20,036 Epoch 44 Step: 25300 Batch Loss: 2.177895 Tokens per Sec: 11360, Lr: 0.000300\n2020-07-12 22:08:38,535 Epoch 44 Step: 25400 Batch Loss: 1.982558 Tokens per Sec: 11313, Lr: 0.000300\n2020-07-12 22:08:57,047 Epoch 44 Step: 25500 Batch Loss: 1.973275 Tokens per Sec: 11074, Lr: 0.000300\n2020-07-12 22:09:13,600 Epoch 44: total training loss 1091.23\n2020-07-12 22:09:13,600 EPOCH 45\n2020-07-12 22:09:15,716 Epoch 45 Step: 25600 Batch Loss: 1.971170 Tokens per Sec: 11097, Lr: 0.000300\n2020-07-12 22:09:34,280 Epoch 45 Step: 25700 Batch Loss: 1.735779 Tokens per Sec: 11237, Lr: 0.000300\n2020-07-12 22:09:53,076 Epoch 45 Step: 25800 Batch Loss: 2.019991 Tokens per Sec: 11135, Lr: 0.000300\n2020-07-12 22:10:11,497 Epoch 45 Step: 25900 Batch Loss: 2.067264 Tokens per Sec: 11276, Lr: 0.000300\n2020-07-12 22:10:29,870 Epoch 45 Step: 26000 Batch Loss: 1.724764 Tokens per Sec: 11282, Lr: 0.000300\n2020-07-12 22:10:58,947 Hooray! New best validation result [ppl]!\n2020-07-12 22:10:58,947 Saving new checkpoint.\n2020-07-12 22:11:00,070 Example #0\n2020-07-12 22:11:00,071 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:11:00,071 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:11:00,071 \tHypothesis: Nchito yathu inali yosamalila nyumba yathu imene tinali kutitenga padziko lonse lapansi\n2020-07-12 22:11:00,071 Example #1\n2020-07-12 22:11:00,072 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:11:00,072 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:11:00,072 \tHypothesis: Iye ayenela kuti anali ndi maganizo olakwika ponena za zimene anacita zaka zambili , ngakhale kuti anali ndi zaka zambili .\n2020-07-12 22:11:00,072 Example #2\n2020-07-12 22:11:00,073 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:11:00,073 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:11:00,073 \tHypothesis: Ndi zocitika ziti zimene Mose anapeleka pamene mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:11:00,073 Example #3\n2020-07-12 22:11:00,073 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:11:00,074 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:11:00,074 \tHypothesis: Pitilizani Kukonda Uthenga Wabwino wa Uthenga Wabwino , July\n2020-07-12 22:11:00,074 Validation result (greedy) at epoch 45, step 26000: bleu: 16.82, loss: 49328.4375, ppl: 7.3127, duration: 30.2034s\n2020-07-12 22:11:18,810 Epoch 45 Step: 26100 Batch Loss: 2.169725 Tokens per Sec: 10929, Lr: 0.000300\n2020-07-12 22:11:31,845 Epoch 45: total training loss 1088.19\n2020-07-12 22:11:31,845 EPOCH 46\n2020-07-12 22:11:37,251 Epoch 46 Step: 26200 Batch Loss: 1.817476 Tokens per Sec: 10952, Lr: 0.000300\n2020-07-12 22:11:55,657 Epoch 46 Step: 26300 Batch Loss: 2.044828 Tokens per Sec: 11302, Lr: 0.000300\n2020-07-12 22:12:14,261 Epoch 46 Step: 26400 Batch Loss: 1.758926 Tokens per Sec: 11395, Lr: 0.000300\n2020-07-12 22:12:32,741 Epoch 46 Step: 26500 Batch Loss: 1.957885 Tokens per Sec: 11173, Lr: 0.000300\n2020-07-12 22:12:51,217 Epoch 46 Step: 26600 Batch Loss: 1.730212 Tokens per Sec: 11167, Lr: 0.000300\n2020-07-12 22:13:09,649 Epoch 46 Step: 26700 Batch Loss: 2.047805 Tokens per Sec: 11242, Lr: 0.000300\n2020-07-12 22:13:19,165 Epoch 46: total training loss 1079.66\n2020-07-12 22:13:19,165 EPOCH 47\n2020-07-12 22:13:28,404 Epoch 47 Step: 26800 Batch Loss: 1.963683 Tokens per Sec: 11235, Lr: 0.000300\n2020-07-12 22:13:47,092 Epoch 47 Step: 26900 Batch Loss: 1.714284 Tokens per Sec: 11292, Lr: 0.000300\n2020-07-12 22:14:05,542 Epoch 47 Step: 27000 Batch Loss: 1.860049 Tokens per Sec: 11263, Lr: 0.000300\n2020-07-12 22:14:33,907 Hooray! New best validation result [ppl]!\n2020-07-12 22:14:33,908 Saving new checkpoint.\n2020-07-12 22:14:35,080 Example #0\n2020-07-12 22:14:35,080 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:14:35,080 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:14:35,081 \tHypothesis: Pokhala nchito yapamwamba imene tinali kugwilila nchito , tinali kutitenga ku dziko lonse\n2020-07-12 22:14:35,081 Example #1\n2020-07-12 22:14:35,081 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:14:35,081 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:14:35,081 \tHypothesis: Iye ayenela kuti anali ndi nkhawa kwambili ponena za zinthu zimene anacita kwa zaka zambili .\n2020-07-12 22:14:35,081 Example #2\n2020-07-12 22:14:35,082 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:14:35,082 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:14:35,082 \tHypothesis: Ndi zocitika ziti zimene zinacitikila m’nthawi ya Mose pamene mau a Paulo analembedwa pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:14:35,082 Example #3\n2020-07-12 22:14:35,083 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:14:35,083 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:14:35,083 \tHypothesis: Tiziyamikila Cisomo Cimene Cisomo Cili pa July\n2020-07-12 22:14:35,083 Validation result (greedy) at epoch 47, step 27000: bleu: 16.73, loss: 49233.0117, ppl: 7.2846, duration: 29.5404s\n2020-07-12 22:14:53,720 Epoch 47 Step: 27100 Batch Loss: 1.741834 Tokens per Sec: 11139, Lr: 0.000300\n2020-07-12 22:15:12,052 Epoch 47 Step: 27200 Batch Loss: 2.265993 Tokens per Sec: 11159, Lr: 0.000300\n2020-07-12 22:15:30,733 Epoch 47 Step: 27300 Batch Loss: 1.931313 Tokens per Sec: 11214, Lr: 0.000300\n2020-07-12 22:15:36,313 Epoch 47: total training loss 1070.92\n2020-07-12 22:15:36,313 EPOCH 48\n2020-07-12 22:15:49,093 Epoch 48 Step: 27400 Batch Loss: 1.875881 Tokens per Sec: 11401, Lr: 0.000300\n2020-07-12 22:16:07,463 Epoch 48 Step: 27500 Batch Loss: 1.767903 Tokens per Sec: 11273, Lr: 0.000300\n2020-07-12 22:16:25,958 Epoch 48 Step: 27600 Batch Loss: 2.175007 Tokens per Sec: 11324, Lr: 0.000300\n2020-07-12 22:16:44,312 Epoch 48 Step: 27700 Batch Loss: 1.650685 Tokens per Sec: 11127, Lr: 0.000300\n2020-07-12 22:17:02,594 Epoch 48 Step: 27800 Batch Loss: 1.905820 Tokens per Sec: 11334, Lr: 0.000300\n2020-07-12 22:17:21,133 Epoch 48 Step: 27900 Batch Loss: 1.916827 Tokens per Sec: 10965, Lr: 0.000300\n2020-07-12 22:17:23,769 Epoch 48: total training loss 1075.79\n2020-07-12 22:17:23,770 EPOCH 49\n2020-07-12 22:17:39,734 Epoch 49 Step: 28000 Batch Loss: 1.575364 Tokens per Sec: 11227, Lr: 0.000300\n2020-07-12 22:18:05,316 Hooray! New best validation result [ppl]!\n2020-07-12 22:18:05,317 Saving new checkpoint.\n2020-07-12 22:18:06,639 Example #0\n2020-07-12 22:18:06,640 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:18:06,640 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:18:06,640 \tHypothesis: Nchito yathu yapamwamba imene tinali kugula inali itayamba padziko lonse\n2020-07-12 22:18:06,640 Example #1\n2020-07-12 22:18:06,641 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:18:06,641 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:18:06,641 \tHypothesis: Iye ayenela kuti anali kufunitsitsa kuteteza zinthu zimene anacita kale , ngakhale kuti anali ndi zaka zambili .\n2020-07-12 22:18:06,641 Example #2\n2020-07-12 22:18:06,642 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:18:06,642 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:18:06,642 \tHypothesis: Kodi mau a Mose a m’nthawi ya Mose analembedwa pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:18:06,642 Example #3\n2020-07-12 22:18:06,642 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:18:06,643 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:18:06,643 \tHypothesis: Tisamalile Cikondi Cosatha , July\n2020-07-12 22:18:06,643 Validation result (greedy) at epoch 49, step 28000: bleu: 16.77, loss: 49109.4180, ppl: 7.2484, duration: 26.9085s\n2020-07-12 22:18:25,231 Epoch 49 Step: 28100 Batch Loss: 1.950407 Tokens per Sec: 11020, Lr: 0.000300\n2020-07-12 22:18:43,852 Epoch 49 Step: 28200 Batch Loss: 1.889157 Tokens per Sec: 11197, Lr: 0.000300\n2020-07-12 22:19:02,298 Epoch 49 Step: 28300 Batch Loss: 1.516591 Tokens per Sec: 11353, Lr: 0.000300\n2020-07-12 22:19:20,672 Epoch 49 Step: 28400 Batch Loss: 1.849100 Tokens per Sec: 10975, Lr: 0.000300\n2020-07-12 22:19:38,587 Epoch 49: total training loss 1066.71\n2020-07-12 22:19:38,587 EPOCH 50\n2020-07-12 22:19:39,232 Epoch 50 Step: 28500 Batch Loss: 2.093219 Tokens per Sec: 10509, Lr: 0.000300\n2020-07-12 22:19:57,742 Epoch 50 Step: 28600 Batch Loss: 1.851735 Tokens per Sec: 11065, Lr: 0.000300\n2020-07-12 22:20:16,555 Epoch 50 Step: 28700 Batch Loss: 1.862564 Tokens per Sec: 11354, Lr: 0.000300\n2020-07-12 22:20:35,050 Epoch 50 Step: 28800 Batch Loss: 2.086834 Tokens per Sec: 11289, Lr: 0.000300\n2020-07-12 22:20:53,602 Epoch 50 Step: 28900 Batch Loss: 1.967869 Tokens per Sec: 11367, Lr: 0.000300\n2020-07-12 22:21:12,089 Epoch 50 Step: 29000 Batch Loss: 1.799863 Tokens per Sec: 10979, Lr: 0.000300\n2020-07-12 22:21:33,248 Hooray! New best validation result [ppl]!\n2020-07-12 22:21:33,249 Saving new checkpoint.\n2020-07-12 22:21:34,414 Example #0\n2020-07-12 22:21:34,415 \tSource: Our professional ballet careers took us around the world to dance\n2020-07-12 22:21:34,415 \tReference: Cifukwa cokhala ndi maluso ovina , tinayenda m’madela ambili padziko lapansi\n2020-07-12 22:21:34,415 \tHypothesis: Nchito yathu yolalikila inali yosiyana kwambili ndi anthu amene tinali kukhala m’dziko la Japan\n2020-07-12 22:21:34,415 Example #1\n2020-07-12 22:21:34,415 \tSource: He may be plagued with guilt about something he did in the past , even many years ago .\n2020-07-12 22:21:34,415 \tReference: Taona kale mmene Davide na Paulo anakambilapo za nkhawa zawo .\n2020-07-12 22:21:34,416 \tHypothesis: Iye ayenela kuti anali ndi nkhawa kwambili za zinthu zimene anacita zaka zambili , ngakhale kuti anali ndi zaka zambili .\n2020-07-12 22:21:34,416 Example #2\n2020-07-12 22:21:34,416 \tSource: What events in Moses ’ day provide the background for Paul’s words recorded at 2 Timothy 2 : 19 ?\n2020-07-12 22:21:34,416 \tReference: Ndi zocitika ziti za m’nthawi ya Mose zimene zimagwilizana ndi zimene Paulo analemba pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:21:34,416 \tHypothesis: Ndi zocitika ziti zimene zinacitikila Mose pa mau a Paulo a pa 2 Timoteyo 2 : 19 ?\n2020-07-12 22:21:34,417 Example #3\n2020-07-12 22:21:34,417 \tSource: Spread the Good News of Undeserved Kindness , July\n2020-07-12 22:21:34,417 \tReference: Lalikilani Uthenga Wabwino Wokamba za Kukoma Mtima , July\n2020-07-12 22:21:34,417 \tHypothesis: Tiziyesetsa Kuonetsa Cikondi Cosatha , July\n2020-07-12 22:21:34,420 Validation result (greedy) at epoch 50, step 29000: bleu: 16.81, loss: 48810.3203, ppl: 7.1615, duration: 22.3307s\n2020-07-12 22:21:48,865 Epoch 50: total training loss 1056.90\n2020-07-12 22:21:48,866 Training ended after 50 epochs.\n2020-07-12 22:21:48,866 Best validation result (greedy) at step 29000: 7.16 ppl.\n/pytorch/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead.\n2020-07-12 22:22:12,383 dev bleu: 17.72 [Beam search decoding with beam size = 5 and alpha = 1.0]\n2020-07-12 22:22:12,388 Translations saved to: /content/drive/My Drive/masakhane/model-temp/00029000.hyps.dev\n2020-07-12 22:22:57,597 test bleu: 30.09 [Beam search decoding with beam size = 5 and alpha = 1.0]\n2020-07-12 22:22:57,602 Translations saved to: /content/drive/My Drive/masakhane/model-temp/00029000.hyps.test\n" ], [ "# Copy the created models from the temporary storage to main storage on google drive for persistant storage \n!cp -r \"/content/drive/My Drive/masakhane/model-temp/\"* \"$gdrive_path/models/${src}${tgt}_transformer/\"", "_____no_output_____" ], [ "# Output our validation accuracy\n! cat \"$gdrive_path/models/${src}${tgt}_transformer/validations.txt\"", "Steps: 1000\tLoss: 104985.17969\tPPL: 69.02498\tbleu: 0.72913\tLR: 0.00030000\t*\nSteps: 2000\tLoss: 87342.05469\tPPL: 33.88090\tbleu: 2.09997\tLR: 0.00030000\t*\nSteps: 3000\tLoss: 78307.31250\tPPL: 23.53396\tbleu: 3.31598\tLR: 0.00030000\t*\nSteps: 4000\tLoss: 72736.87500\tPPL: 18.79830\tbleu: 5.24072\tLR: 0.00030000\t*\nSteps: 5000\tLoss: 68773.72656\tPPL: 16.02127\tbleu: 6.90910\tLR: 0.00030000\t*\nSteps: 6000\tLoss: 66082.85938\tPPL: 14.37346\tbleu: 8.21670\tLR: 0.00030000\t*\nSteps: 7000\tLoss: 63526.49609\tPPL: 12.96529\tbleu: 9.07820\tLR: 0.00030000\t*\nSteps: 8000\tLoss: 61911.11719\tPPL: 12.14747\tbleu: 9.82287\tLR: 0.00030000\t*\nSteps: 9000\tLoss: 60281.30469\tPPL: 11.37461\tbleu: 10.86482\tLR: 0.00030000\t*\nSteps: 10000\tLoss: 58961.61328\tPPL: 10.78499\tbleu: 11.17248\tLR: 0.00030000\t*\nSteps: 11000\tLoss: 57576.37891\tPPL: 10.19894\tbleu: 11.80842\tLR: 0.00030000\t*\nSteps: 12000\tLoss: 56646.62109\tPPL: 9.82355\tbleu: 12.65364\tLR: 0.00030000\t*\nSteps: 13000\tLoss: 55712.61328\tPPL: 9.46036\tbleu: 13.00193\tLR: 0.00030000\t*\nSteps: 14000\tLoss: 54857.13281\tPPL: 9.13950\tbleu: 13.33962\tLR: 0.00030000\t*\nSteps: 15000\tLoss: 53885.88672\tPPL: 8.78839\tbleu: 13.85405\tLR: 0.00030000\t*\nSteps: 16000\tLoss: 53204.42578\tPPL: 8.55012\tbleu: 14.67869\tLR: 0.00030000\t*\nSteps: 17000\tLoss: 53287.41016\tPPL: 8.57879\tbleu: 14.17911\tLR: 0.00030000\t\nSteps: 18000\tLoss: 52131.85938\tPPL: 8.18812\tbleu: 14.77395\tLR: 0.00030000\t*\nSteps: 19000\tLoss: 52101.89844\tPPL: 8.17823\tbleu: 15.42948\tLR: 0.00030000\t*\nSteps: 20000\tLoss: 51433.57422\tPPL: 7.96072\tbleu: 15.35123\tLR: 0.00030000\t*\nSteps: 21000\tLoss: 51151.20703\tPPL: 7.87057\tbleu: 15.82799\tLR: 0.00030000\t*\nSteps: 22000\tLoss: 50394.41016\tPPL: 7.63396\tbleu: 15.77829\tLR: 0.00030000\t*\nSteps: 23000\tLoss: 50194.80469\tPPL: 7.57274\tbleu: 16.33550\tLR: 0.00030000\t*\nSteps: 24000\tLoss: 50057.70312\tPPL: 7.53098\tbleu: 16.58389\tLR: 0.00030000\t*\nSteps: 25000\tLoss: 49733.63672\tPPL: 7.43319\tbleu: 16.61687\tLR: 0.00030000\t*\nSteps: 26000\tLoss: 49328.43750\tPPL: 7.31269\tbleu: 16.81934\tLR: 0.00030000\t*\nSteps: 27000\tLoss: 49233.01172\tPPL: 7.28460\tbleu: 16.73319\tLR: 0.00030000\t*\nSteps: 28000\tLoss: 49109.41797\tPPL: 7.24838\tbleu: 16.76788\tLR: 0.00030000\t*\nSteps: 29000\tLoss: 48810.32031\tPPL: 7.16146\tbleu: 16.81243\tLR: 0.00030000\t*\n" ], [ "# Test our model\n! cd joeynmt; python3 -m joeynmt test \"$gdrive_path/models/${src}${tgt}_transformer/config.yaml\"\n", "2020-07-12 22:31:35,573 Hello! This is Joey-NMT.\n/pytorch/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead.\n2020-07-12 22:32:01,727 dev bleu: 17.72 [Beam search decoding with beam size = 5 and alpha = 1.0]\n2020-07-12 22:32:46,046 test bleu: 30.09 [Beam search decoding with beam size = 5 and alpha = 1.0]\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4af4d346fcad56ba4cfd34bae98b8272aa46138e
267,899
ipynb
Jupyter Notebook
challenge-2/1_evaluate-models.ipynb
exalearn/hydronet
bd6fc4cb0962226a975057978d5de9f0053985ba
[ "Apache-2.0" ]
7
2021-07-04T04:11:32.000Z
2022-01-25T07:11:55.000Z
challenge-2/1_evaluate-models.ipynb
exalearn/hydronet
bd6fc4cb0962226a975057978d5de9f0053985ba
[ "Apache-2.0" ]
11
2021-04-30T18:16:16.000Z
2022-03-18T17:39:36.000Z
challenge-2/1_evaluate-models.ipynb
exalearn/hydronet
bd6fc4cb0962226a975057978d5de9f0053985ba
[ "Apache-2.0" ]
3
2020-12-04T17:48:42.000Z
2021-10-07T20:06:07.000Z
193.010807
43,616
0.894718
[ [ [ "# Evaluate the Performance of MPNN models\nGet all of the models, regardless how we trained them and evaluate their performance", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nfrom sklearn import metrics\nfrom tqdm import tqdm\nfrom glob import glob\nimport pandas as pd\nimport numpy as np\nimport json\nimport os", "_____no_output_____" ] ], [ [ "## Find the Models and Summarize Them\nThere are `best_model.h5` files in subdirectories that contain data on their configuration.", "_____no_output_____" ] ], [ [ "models = glob(os.path.join('**', 'test_predictions.csv'), recursive=True)\nprint(f'Found {len(models)} models')", "Found 35 models\n" ], [ "def generate_summary(path):\n \"\"\"Generate the summary of a model, given path to its output\n \n Args:\n path (str): Path ot the trained weights\n Returns:\n (dict) Model information\n \"\"\"\n \n # Store the directory first\n dir_name = os.path.dirname(path)\n output = {'path': dir_name}\n \n # Get the host and run parameters\n for f in ['host_info.json', 'run_params.json']:\n with open(os.path.join(dir_name, f)) as fp:\n output.update(json.load(fp))\n \n # Compute the number of nodes\n output['n_nodes'] = output['total_ranks'] // output['ranks_per_node'] \\\n if 'total_ranks' in output else 1\n \n # Convert the start time to a datetime\n output['start_time'] = datetime.fromisoformat(output['start_time'])\n \n # Get the log infomration\n log_file = os.path.join(dir_name, 'log.csv')\n log = pd.read_csv(log_file)\n output['completed_epochs'] = len(log)\n output['val_loss'] = log['val_loss'].min()\n output['loss'] = log['loss'].min()\n output['epoch_time'] = np.percentile(log['epoch_time'], 50)\n output['total_train_time'] = log['epoch_time'].sum()\n output['total_node_hours'] = output['total_train_time'] * output['n_nodes']\n \n # Compute performance on hold-out set\n results = pd.read_csv(os.path.join(output['path'], 'test_predictions.csv'))\n for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error']:\n v = getattr(metrics, m)(results['y_true'], results['y_pred'])\n output[m] = v\n \n return output", "_____no_output_____" ], [ "model_info = pd.DataFrame([generate_summary(m) for m in models])\nprint(f'Found {len(model_info)} models')", "Found 35 models\n" ] ], [ [ "## Print out Best Performer\nWe are going to pick the one that has the best performance on the test set", "_____no_output_____" ], [ "### Coarse Network\nSee how we did on the \"node per water\" network", "_____no_output_____" ] ], [ [ "model = model_info.query('network_choice==\"coarse\"').sort_values('mean_absolute_error').iloc[0]\nprint(f'Model being evaluated: {model[\"path\"]}')", "Model being evaluated: train-keras/lambda3-T4-f64-N32-ed9a2d\n" ], [ "model[['path', 'network_choice', 'activation', 'message_steps', 'dropout', 'features', 'batch_size']]", "_____no_output_____" ], [ "model[['loss', 'val_loss', 'mean_squared_error']]", "_____no_output_____" ] ], [ [ "Plot the logs", "_____no_output_____" ] ], [ [ "log = pd.read_csv(os.path.join(model['path'], 'log.csv'))", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(3.5, 2.5))\n\nax.semilogy(log['epoch'], log['loss'], label='Train')\nax.semilogy(log['epoch'], log['val_loss'], label='Validation')\n\nax.legend()\nax.set_xlabel('Epoch')\nax.set_ylabel('Loss')", "_____no_output_____" ] ], [ [ "*Finding*: Huge variance in validation loss is indicative of overfitting", "_____no_output_____" ], [ "Plot the performance on the test set", "_____no_output_____" ] ], [ [ "results = pd.read_csv(os.path.join(model['path'], 'test_predictions.csv'))", "_____no_output_____" ], [ "for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']:\n v = getattr(metrics, m)(results['y_true'], results['y_pred'])\n print(f'{m}: {v: .2f}')", "r2_score: 1.00\nmean_squared_error: 6.13\nmean_absolute_error: 1.82\n" ] ], [ [ "Plot the true vs predicted", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\nax.scatter(results['y_true'], results['y_pred'], s=0.5, alpha=0.2)\n\nax.plot(ax.get_xlim(), ax.get_ylim(), 'k--')\nax.set_xlabel('$E$, True')\nax.set_ylabel('$E$, ML')\n\nfig.set_size_inches(3.5, 3.5)", "_____no_output_____" ] ], [ [ "Plot only the largest cluster size", "_____no_output_____" ] ], [ [ "subset = results.query(f'n_waters == {results[\"n_waters\"].max()}')\nprint(f'Scores for the {len(subset)} largest molecules with {results[\"n_waters\"].max()} waters')\nfor m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']:\n v = getattr(metrics, m)(subset['y_true'], subset['y_pred'])\n print(f'{m}: {v: .2f}')", "Scores for the 12333 largest molecules with 30 waters\nr2_score: 0.36\nmean_squared_error: 10.41\nmean_absolute_error: 2.39\n" ], [ "fig, ax = plt.subplots()\n\nerrors = subset['y_pred'] - subset['y_true']\nbins = np.linspace(-10, 10, 256)\nax.hist(errors, bins=bins, density=False)\n\nax.set_xlabel('Error (kcal/mol)')\nax.set_ylabel('Frequency')\nfig.set_size_inches(3.5, 2)", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(3.5, 3.5))\n\nax.scatter(subset['y_true'], subset['y_pred'], s=0.5, alpha=0.1)\n\nax.set_ylim(-340, -305)\nax.set_xlim(ax.get_ylim())\nax.set_ylim(ax.get_xlim())\nax.plot(ax.get_xlim(), ax.get_xlim(), 'k--')\nax.set_xlabel('$E$ (kcal/mol), True')\nax.set_ylabel('$E$ (kcal/mol), ML')\n\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "### Atomic Network\nSee how we did for the \"node per atom\" network", "_____no_output_____" ] ], [ [ "model = model_info.query('network_choice==\"atomic\"').sort_values('mean_absolute_error').iloc[0]\nprint(f'Model being evaluated: {model[\"path\"]}')", "Model being evaluated: train-keras/lambda3-T32-f64-N32-be165f\n" ], [ "model[['path', 'network_choice', 'activation', 'message_steps', 'dropout', 'features', 'batch_size']]", "_____no_output_____" ], [ "model[['loss', 'val_loss', 'mean_squared_error']]", "_____no_output_____" ] ], [ [ "Plot the logs", "_____no_output_____" ] ], [ [ "log = pd.read_csv(os.path.join(model['path'], 'log.csv'))", "_____no_output_____" ], [ "fig, ax = plt.subplots()\n\nax.semilogy(log['epoch'], log['loss'], label='Train')\nax.semilogy(log['epoch'], log['val_loss'], label='Validation')\n\nax.legend()\nax.set_xlabel('Epoch')\nax.set_ylabel('Loss')", "_____no_output_____" ] ], [ [ "*Finding*: Huge variance in validation loss is indicative of overfitting", "_____no_output_____" ], [ "Plot the performance on the test set", "_____no_output_____" ] ], [ [ "results = pd.read_csv(os.path.join(model['path'], 'test_predictions.csv'))", "_____no_output_____" ], [ "for m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']:\n v = getattr(metrics, m)(results['y_true'], results['y_pred'])\n print(f'{m}: {v: .2f}')", "r2_score: 1.00\nmean_squared_error: 1.99\nmean_absolute_error: 1.03\n" ] ], [ [ "Plot the true vs predicted", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(3.5, 3.5))\n\nax.set_title('Performance on hold-out set')\nax.scatter(results['y_true'], results['y_pred'], s=0.5, alpha=0.2)\n\nax.plot(ax.get_xlim(), ax.get_ylim(), 'k--')\nax.set_xlabel('$E$, True')\nax.set_ylabel('$E$, ML')\n\nfig.set_size_inches(3.5, 3.5)", "_____no_output_____" ] ], [ [ "Plot only the largest cluster size", "_____no_output_____" ] ], [ [ "subset = results.query(f'n_waters == {results[\"n_waters\"].max()}')\nprint(f'Scores for the {len(subset)} largest molecules with {results[\"n_waters\"].max()} waters')\nfor m in ['r2_score', 'mean_squared_error', 'mean_absolute_error']:\n v = getattr(metrics, m)(subset['y_true'], subset['y_pred'])\n print(f'{m}: {v: .2f}')", "Scores for the 12333 largest molecules with 30 waters\nr2_score: 0.84\nmean_squared_error: 2.62\nmean_absolute_error: 1.16\n" ], [ "fig, ax = plt.subplots()\n\nerrors = subset['y_pred'] - subset['y_true']\nbins = np.linspace(-10, 10, 256)\nax.hist(errors, bins=bins, density=False)\n\nax.set_xlabel('Error (kcal/mol)')\nax.set_ylabel('Frequency')\nfig.set_size_inches(3.5, 2)", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(3.5, 3.5))\n\nax.set_title('Clusters with 30 waters')\nax.scatter(subset['y_true'], subset['y_pred'], s=0.5, alpha=0.1)\n\nax.set_ylim(-340, -305)\nax.set_xlim(ax.get_ylim())\nax.set_ylim(ax.get_xlim())\nax.plot(ax.get_xlim(), ax.get_xlim(), 'k--')\nax.set_xlabel('$E$ (kcal/mol), True')\nax.set_ylabel('$E$ (kcal/mol), ML')\n\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "Make a publication-ready figure", "_____no_output_____" ] ], [ [ "fig, axs = plt.subplots(1, 3, figsize=(6.5, 2.5))\n\n# Predicted vs actual plots\nn_waters = results[\"n_waters\"].max()\nsubset = results.query(f'n_waters == {n_waters}')\nfor d, ax, title in zip([results, subset], axs,\n ['Full Dataset', '30-Water Clusters']):\n ax.set_title(title)\n ax.scatter(d['y_true'], d['y_pred'], s=0.7, alpha=0.2, edgecolor='none')\n\n max_ = max(ax.get_xlim()[1], ax.get_ylim()[1])\n min_ = min(ax.get_xlim()[0], ax.get_ylim()[0])\n ax.set_xlim([min_, max_])\n ax.set_ylim(ax.get_xlim())\n ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--')\n ax.set_xlabel('$E$ (kcal/mol), True')\n ax.set_ylabel('$E$ (kcal/mol), ML')\n \n mae = metrics.mean_absolute_error(d['y_true'], d['y_pred'])\n r2 = metrics.r2_score(d['y_true'], d['y_pred'])\n ax.text(0.99, 0, f'MAE: {mae:.2f}\\n$R^2$: {r2:.2f}',\n ha='right', va='bottom', transform=ax.transAxes,\n fontsize=10)\n \n# Box and wisker plot\nax = axs[2]\nerror_stats = []\nfor s, subset in results.groupby('n_waters'):\n error = np.abs(subset['y_pred'] - subset['y_true']) / s\n error_stats.append({'size': s, 'mae': error.mean()})\nerror_stats = pd.DataFrame(error_stats)\nax.plot(error_stats['size'], error_stats['mae'], '--o', ms=3)\nax.set_xlabel('# Waters')\nax.set_ylabel('MAE (kcal/mol/water)')\n\n# Add figure labels\nfor ax, l in zip(axs[:2], ['a', 'b']):\n ax.text(0.02, 0.9, f'({l})', transform=ax.transAxes)\naxs[2].text(0.82, 0.9, '(c)', transform=axs[2].transAxes)\n \nfig.tight_layout()\nfig.savefig(os.path.join('figures', 'mpnn-performance.png'), dpi=320)", "_____no_output_____" ] ], [ [ "## Make the Box Plot\nTo match Jenna's", "_____no_output_____" ] ], [ [ "results['abs_error_per_water'] = np.abs(results['y_true'] - results['y_pred']) / results['n_waters']", "_____no_output_____" ], [ "def make_box_plot(df, metric='abs_error_per_water'): \n boxplot = df.query('n_waters >= 10 and n_waters <= 30').boxplot(metric, 'n_waters', grid=False, fontsize=20, figsize=(12,6), return_type='both')\n plt.ylim(-0.01,0.7)\n plt.ylabel('Absolute Error\\n(kcal/mol/water)', fontsize=22, fontweight='bold', labelpad=15)\n plt.xlabel('Cluster Size', fontsize=22, fontweight='bold', labelpad=15)\n plt.xticks(range(1,23,2), ['10','12','14','16','18','20','22','24','26','28','30'])\n plt.xlim(0, 22)\n plt.suptitle('')\n plt.title('')\n plt.tight_layout()\n plt.savefig('figures/mpnn_boxplot-horz.png',dpi=600)\nmake_box_plot(results)", "_____no_output_____" ] ], [ [ "## Evaluate Hyperparameter Sweeps\nWe did some manual hyperparameter tuning for the atomic model", "_____no_output_____" ], [ "### Batch Sizes\nEvaluate different batch sizes to get a tradeoff between accuracy and using the full GPU", "_____no_output_____" ] ], [ [ "base_query = ('epochs==32 and shuffle_buffer_size==2097152 and activation==\"sigmoid\" '\n 'and message_steps==4 and network_choice==\"atomic\" and dropout==0 and features==64')", "_____no_output_____" ], [ "model_info.query(base_query).sort_values('val_loss')[['batch_size', 'loss', 'val_loss', 'mean_squared_error', 'epoch_time']]", "_____no_output_____" ] ], [ [ "*Finding*: We get decent accuracy with a batch size of 1024 and still use 90% of the GPU", "_____no_output_____" ], [ "### Activation Function\nWe evaluated different activation functions for the message steps", "_____no_output_____" ] ], [ [ "base_query = ('batch_size==1024 and epochs==32 and shuffle_buffer_size==2097152 '\n 'and message_steps==4 and network_choice==\"atomic\" and dropout==0 and features==64')", "_____no_output_____" ], [ "model_info.query(base_query).sort_values('mean_squared_error')[['activation', 'loss', 'val_loss', 'mean_squared_error', 'epoch_time']]", "_____no_output_____" ] ], [ [ "*Finding*: We should go with the softplus. Fastest and most accurate", "_____no_output_____" ], [ "### Number of Message Passing Layers\nWe compared increasing the number of message passing layers", "_____no_output_____" ] ], [ [ "base_query = ('hostname==\"lambda3\" and shuffle_buffer_size==2097152 and batch_size==1024 and activation==\"softplus\" and epochs==32 '\n 'and network_choice==\"atomic\"')", "_____no_output_____" ], [ "model_info.query(base_query).sort_values('message_steps')[['network_choice', 'message_steps', \n 'loss', 'val_loss', 'mean_squared_error',\n 'epoch_time']]", "_____no_output_____" ], [ "fig, ax = plt.subplots()\n\nfor label, subset in model_info.query(base_query).sort_values('message_steps').groupby('network_choice'):\n ax.plot(subset['message_steps'], subset['mean_absolute_error'], '-o', label=label)\n\nax.set_xscale('log', base=2)\nax.set_xlabel('Message Steps')\nax.set_ylabel('Mean Absolute Error')\nax.legend()", "_____no_output_____" ] ], [ [ "*Finding*: We need many message passing layers, which can get expensive", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4af4de70f5e42c2072245d6a34a20554dbad64aa
581,618
ipynb
Jupyter Notebook
notebooks/BalancedMultiCenterNestedCrossValidation.ipynb
spisakt/RPN-siganture
7870554122551af270b3fa4be63592fa2df98e1f
[ "BSD-3-Clause" ]
11
2019-08-19T16:13:37.000Z
2022-02-25T16:41:27.000Z
notebooks/BalancedMultiCenterNestedCrossValidation.ipynb
spisakt/RPN-siganture
7870554122551af270b3fa4be63592fa2df98e1f
[ "BSD-3-Clause" ]
3
2019-04-26T09:42:56.000Z
2020-04-19T11:28:38.000Z
notebooks/BalancedMultiCenterNestedCrossValidation.ipynb
spisakt/RPN-siganture
7870554122551af270b3fa4be63592fa2df98e1f
[ "BSD-3-Clause" ]
4
2019-09-28T10:12:49.000Z
2020-09-20T11:58:48.000Z
322.404656
191,860
0.911401
[ [ [ "# Multi-center analysis", "_____no_output_____" ], [ "### Imports", "_____no_output_____" ] ], [ [ " import sys\n sys.path.append('../')\n from PAINTeR import connectivity # in-house lib used for the RPN-signature\n from PAINTeR import plot # in-house lib used for the RPN-signature\n from PAINTeR import model # in-house lib used for the RPN-signature\n import numpy as np # hi old friend\n import pandas as pd\n\n from sklearn.preprocessing import StandardScaler\n from nilearn.connectome import ConnectivityMeasure\n\n from matplotlib.colors import ListedColormap\n from matplotlib.colors import Normalize\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_style(\"white\")\n\n from sklearn.linear_model import ElasticNet, Ridge\n from sklearn.feature_selection import SelectKBest, f_regression\n from sklearn import preprocessing\n from sklearn.pipeline import Pipeline\n from sklearn.model_selection import LeaveOneOut, KFold, GroupKFold, LeavePGroupsOut\n from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score, explained_variance_score\n from sklearn.model_selection import GridSearchCV\n from sklearn.model_selection import cross_val_predict\n from sklearn.model_selection import cross_validate", "_____no_output_____" ] ], [ [ "### Processing parameters", "_____no_output_____" ] ], [ [ "thres_mean_FD = 0.15 # mm\nscrub_threshold = 0.15 # mm\nthres_perc_scrub = 30 # % scubbed out", "_____no_output_____" ] ], [ [ "### Load all behavioral data", "_____no_output_____" ] ], [ [ "# load bochum data\ndf_bochum = pd.read_csv(\"../res/bochum_sample_excl.csv\")\ndf_essen = pd.read_csv(\"../res/essen_sample_excl.csv\")\ndf_szeged = pd.read_csv(\"../res/szeged_sample_excl.csv\")\ndf_bochum['study']='bochum'\ndf_essen['study']='essen'\ndf_szeged['study']='szeged'\ndf=pd.concat((df_bochum, df_essen, df_szeged), sort=False)\ndf=df.reset_index()", "_____no_output_____" ], [ "df.groupby('study').hist('mean_QST_pain_sensitivity', bins=6)", "_____no_output_____" ] ], [ [ "### Load standardized scrubbed timeseries", "_____no_output_____" ] ], [ [ "timeseries = []\nperc_scrubbed = []\nfor i, f in enumerate(df['ts_file']):\n f = '..' + f.split('/..')[1]\n f_scrub = f.split('.tsv')[0] + '-scrubbed.tsv'\n \n ts = pd.read_csv(f_scrub).iloc[:,1:] # here we can omit global signal...\n \n fd_file = df[\"fd_file\"].values[i]\n fd_file = '..' + fd_file.split('/..')[1]\n fd = pd.read_csv(fd_file).values.ravel().tolist()\n fd = [0] + fd \n \n perc_scrubbed.append(100 - 100*len(ts.shape)/len(fd) )\n timeseries.append(ts.values)\n ", "_____no_output_____" ], [ "#region names\nlabels=ts.columns.values\nl = pd.read_csv('../data/atlas_relabeled.tsv', sep=\"\\t\")\nmodules=np.insert(l['modules'].values, 0, \"GlobSig\")", "_____no_output_____" ], [ "# plot a specific timeseries\nsub_idx=10\npd.DataFrame(timeseries[sub_idx], columns=ts.columns.values).loc[:, ['AINS_pd', 'AINS_v', 'PINS_v']].plot()", "_____no_output_____" ] ], [ [ "### Calculate connectivity", "_____no_output_____" ] ], [ [ "correlation_measure = ConnectivityMeasure(kind='partial correlation', vectorize=True, discard_diagonal=True)\nX = correlation_measure.fit_transform(timeseries) # these are the features\nmat=correlation_measure.mean_\n#mat=mat[1:, 1:] #fisrt row and column is global signal\nmat[range(mat.shape[0]), range(mat.shape[0])] = 0 # zero diag", "_____no_output_____" ], [ "# 3d plot in browser window\n#coords = plotting.find_parcellation_cut_coords(\"../data/atlas_relabeled.nii.gz\")\n#view = plotting.view_connectome(mat, coords)\n#view.open_in_browser()", "_____no_output_____" ], [ "plot.plot_matrix(mat, labels, modules)", "_____no_output_____" ], [ "y = df.mean_QST_pain_sensitivity", "_____no_output_____" ], [ "sns.distplot(y[df.study=='bochum'], hist=False, rug=True)\nsns.distplot(y[df.study=='essen'], hist=False, rug=True)\nsns.distplot(y[df.study=='szeged'], hist=False, rug=True)\nprint(X.shape, len(y))", "(91, 7503) 91\n" ] ], [ [ "### Group data to get balanced splits in a 30-fold cross-validation", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12, 0.3))\nsns.heatmap([df.study.astype(\"category\").cat.codes.values]).set_title('study center')\nplt.show()\nn_szeged = np.sum(df.study == 'szeged') # size of the smallest study\nn_essen = np.sum(df.study == 'essen')\nn_bochum = np.sum(df.study == 'bochum')\nprint(n_bochum, n_essen, n_szeged)\n\ngroups=np.zeros(len(df), dtype=int)\n\ng=0\ni=0\nwhile i < n_bochum:\n groups[i] = g\n #groups[i+1] = g\n i += 1\n g += 1\n \ng=0\ni=n_bochum\nwhile i < n_bochum+n_essen:\n groups[i] = g\n #groups[i+1] = g\n i += 1\n g += 1\ng=0\ni=n_bochum+n_essen\nwhile i < len(df):\n groups[i] = g\n i += 1\n g += 1\n \nplt.figure(figsize=(12, 0.3))\nsns.heatmap([groups]).set_title('groups')\nplt.show()\n\ngroups", "_____no_output_____" ] ], [ [ "## Model training - non nested", "_____no_output_____" ] ], [ [ "def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(),\n fsel=SelectKBest(f_regression),\n model=ElasticNet(max_iter=100000),\n p_grid = {'fsel__k': [25, 50, 100, 1000, 3000, 'all'],\n 'model__alpha': [ 0.001, 0.01, 0.1, 1, 10],\n 'model__l1_ratio': [0.0001, .25, .5, .75, 0.9999] \n }):\n \n mymodel = Pipeline(\n [('scaler', scaler),\n ('fsel', fsel),\n ('model', model)])\n return mymodel, p_grid\n\nmodel, p_grid = pipe_scale_fsel_elnet()\n\ncv = GroupKFold(30)\nclf = GridSearchCV(estimator=model, param_grid=p_grid, cv=cv,\n scoring=\"neg_mean_squared_error\", verbose=True, return_train_score=False,\n n_jobs=-1)\nclf.fit(X, y, groups=groups)\nprint(\"**** Non-nested analysis ****\")\nprint(\"** Best hyperparameters: \" + str(clf.best_params_))\n\nprint(\"** Score on full data as training set:\\t\" + str(-mean_squared_error(y_pred=clf.best_estimator_.predict(X), y_true=y)))\nprint(\"** Score on mean as model: \" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))\nprint(\"** Best Non-nested cross-validated score on test:\\t\" + str(clf.best_score_))\nprint(\"XXXXX Explained Variance: \" + str(\n 1 - clf.best_score_ / -mean_squared_error(np.repeat(y.mean(), len(y)), y)))\n\n\ncv_pred = cross_val_predict(clf.best_estimator_, X, y, cv=cv, groups=groups, n_jobs=-1) \nplot.plot_prediction(y, predicted, sd=True, covar=[])\n\n#for train_index, test_index in group_kfold.split(X, y, groups):\n# #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n# #print(df.study[train_index].values)\n# print('test:', df.study[test_index].values)", "Fitting 30 folds for each of 150 candidates, totalling 4500 fits\n" ] ], [ [ "## Model training - nested", "_____no_output_____" ] ], [ [ "def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(),\n fsel=SelectKBest(f_regression),\n model=ElasticNet(max_iter=100000),\n p_grid = {'fsel__k': [25, 2000, 4000, 6000],\n 'model__alpha': [ 0.001, 0.01, 0.1, 1],\n 'model__l1_ratio': [0.0001, .25, .5, .75, 0.9999] \n }):\n \n mymodel = Pipeline(\n [('scaler', scaler),\n ('fsel', fsel),\n ('model', model)])\n return mymodel, p_grid\n\nmodel, p_grid = pipe_scale_fsel_elnet()\n\ncv = GroupKFold(30)\nclf = GridSearchCV(estimator=model, param_grid=p_grid, cv=cv,\n scoring=\"neg_mean_squared_error\", verbose=True, return_train_score=False,\n n_jobs=-1)\nclf.fit(X, y, groups=groups)\nprint(\"**** Non-nested analysis ****\")\nprint(\"** Best hyperparameters: \" + str(clf.best_params_))\n\nprint(\"** Score on full data as training set:\\t\" + str(-mean_squared_error(y_pred=clf.best_estimator_.predict(X), y_true=y)))\nprint(\"** Score on mean as model: \" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))\nprint(\"** Best Non-nested cross-validated score on test:\\t\" + str(clf.best_score_))\nprint(\"XXXXX Explained Variance: \" + str(\n 1 - clf.best_score_ / -mean_squared_error(np.repeat(y.mean(), len(y)), y)))\n\n\ncv_pred = cross_val_predict(clf.best_estimator_, X, y, cv=cv, groups=groups, n_jobs=-1) \nplot.plot_prediction(y, predicted, sd=True, covar=[])\n\n#for train_index, test_index in group_kfold.split(X, y, groups):\n# #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n# #print(df.study[train_index].values)\n# print('test:', df.study[test_index].values)", "Fitting 30 folds for each of 80 candidates, totalling 2400 fits\n" ], [ "def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(),\n fsel=SelectKBest(f_regression),\n model=ElasticNet(max_iter=100000),\n p_grid = {'fsel__k': [10, 50, 100, 200, 500, 700, 1000, 2000, 3000, 4000, 5000, 'all'], 'model__alpha': [.001, .01, .1, 1, 10], 'model__l1_ratio': [0.001, .1, .3, .5, .7, .9, .999] \n #p_grid = {'fsel__k': [1000, 2000, 5000], 'model__alpha': [.001, .005, .01, .05, .1], 'model__l1_ratio': [.999]\n }):\n mymodel = Pipeline(\n [('scaler', scaler),\n ('fsel', fsel),\n ('model', model)])\n return mymodel, p_grid\n\nmodel, p_grid = pipe_scale_fsel_elnet()\n\nouter_cv = GroupKFold(30)\ninner_cv = GroupKFold(30) \nclf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv,\n scoring=\"neg_mean_squared_error\", verbose=True, return_train_score=False,\n n_jobs=-1)\n\nall_models = []\nbest_params = []\npredicted = np.zeros(len(y))\nnested_scores_train = np.zeros(outer_cv.get_n_splits(X))\nnested_scores_test = np.zeros(outer_cv.get_n_splits(X)) \n \nprint(\"model\\tinner_cv mean score\\touter vc score\")\ni=0\nfor train, test in outer_cv.split(X, y, groups=groups):\n group_train = groups[train] \n clf.fit(X[train], y[train], groups=group_train)\n \n print(str(clf.best_params_) + \" \" + str(clf.best_score_) + \" \" + str(clf.score(X[test], y[test])))\n \n all_models.append(clf.best_estimator_)\n best_params.append(clf.best_params_)\n \n predicted[test] = clf.predict(X[test])\n \n nested_scores_train[i] = clf.best_score_\n nested_scores_test[i] = clf.score(X[test], y[test])\n i = i+1\n \nprint(\"*** Score on mean as model:\\t\" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))\nprint(\"** Mean score in the inner crossvaludation (inner_cv):\\t\" + str(nested_scores_train.mean()))\nprint(\"** Mean Nested Crossvalidation Score (outer_cv):\\t\" + str(nested_scores_test.mean()))\nprint(\"Explained Variance: \" + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) ))\nprint(\"Correlation: \" + str(np.corrcoef(y, predicted)[0,1]))\n \nplot.plot_prediction(y, predicted, sd=True, covar=[])", "model\tinner_cv mean score\touter vc score\nFitting 30 folds for each of 420 candidates, totalling 12600 fits\n" ], [ "print(\"*** Score on mean as model:\\t\" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))\nprint(\"** Mean score in the inner crossvaludation (inner_cv):\\t\" + str(nested_scores_train.mean()))\nprint(\"** Mean Nested Crossvalidation Score (outer_cv):\\t\" + str(nested_scores_test.mean()))\nprint(\"Explained Variance: \" + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) ))\nprint(\"Correlation: \" + str(np.corrcoef(y, predicted)[0,1]))\n \nplot.plot_prediction(y, predicted, sd=True, covar=[])", "*** Score on mean as model:\t-0.5655535039999882\n** Mean score in the inner crossvaludation (inner_cv):\t-0.4213646506581069\n** Mean Nested Crossvalidation Score (outer_cv):\t-0.39838520258572474\nExplained Variance: 0.295583530527055\nCorrelation: 0.5557563502360584\n" ] ], [ [ "## Finalize and save model", "_____no_output_____" ], [ "## Obtain predictive network and compare to the RPN-signature", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4af4e29b96bdc6eccbb1ec96d6c6e50d37da5618
4,009
ipynb
Jupyter Notebook
notebook/2018-02-08_expressed_gene_list_for_mimic.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2019-09-13T13:24:18.000Z
2019-09-13T13:24:18.000Z
notebook/2018-02-08_expressed_gene_list_for_mimic.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
65
2019-07-24T16:23:08.000Z
2020-03-06T22:18:47.000Z
notebook/2018-02-08_expressed_gene_list_for_mimic.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2021-06-02T19:09:35.000Z
2021-06-02T19:09:35.000Z
23.30814
184
0.566475
[ [ [ "# Expressed Gene List of Mimic", "_____no_output_____" ], [ "Brian asked Sharvani and Max to intersect the Mimic list with all the commonly expressed genes on the the X and 4th. I need to dump out these gene lists for them to do the merge.", "_____no_output_____" ] ], [ [ "import os\nimport sys\nfrom pathlib import Path\n\nfrom IPython.display import display, HTML, Markdown\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import chi2_contingency\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Project level imports\nsys.path.insert(0, '../lib')\nfrom larval_gonad.notebook import Nb\nfrom larval_gonad.x_to_a import (CHROMS_CHR, MAJOR_ARMS_CHR, idx_stats_by_cluster, commonly_expressed)\nfrom larval_gonad.plotting import make_figs\nfrom larval_gonad.io import memory\n\n# Setup notebook\nnbconfig = Nb.setup_notebook('2018-02-08_expressed_gene_list_for_mimic', subproject_dir='../output/testis_scRNAseq_pilot')", "last updated: 2018-02-08 \nGit hash: be8ef9727c59afa8d3497e24ff289ad4e61e8c95\n" ], [ "# Import data from testes\nnorm = nbconfig.seurat.get_normalized_read_counts()\n\n# Get list of commonly expressed gene\nexpressed = commonly_expressed(norm)", "_____no_output_____" ], [ "expressed2Chrom = nbconfig.fbgn2chrom.loc[expressed]", "_____no_output_____" ], [ "interest = expressed2Chrom[(expressed2Chrom.chrom == 'chrX') | (expressed2Chrom.chrom == 'chr4')].copy()\ninterest['gene'] = interest.index.map(lambda x: nbconfig.fbgn2symbol[x])\ninterest.set_index('gene', append=True, inplace=True)", "_____no_output_____" ], [ "interest.groupby('chrom').size()", "_____no_output_____" ], [ "nbconfig.table_name('target')", "_____no_output_____" ], [ "fname = '../output/2018-02-08_expressed_gene_list_for_mimic_target.tsv'\ninterest.to_csv(fname, sep='\\t')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4af4e74ec82e9fafb1871afc46f1d7c4d62866b9
5,129
ipynb
Jupyter Notebook
doc/source/tune/examples/tune_mnist_keras.ipynb
kyle-chen-uber/ray
d777ff34de4ddd82b47caa46a24677fda62a7557
[ "Apache-2.0" ]
1
2021-09-20T15:45:59.000Z
2021-09-20T15:45:59.000Z
doc/source/tune/examples/tune_mnist_keras.ipynb
ArneTroch99/ray
f51cb09e02c2797fd857adb0e7fd44d4dd4222d5
[ "Apache-2.0" ]
53
2021-10-06T20:08:04.000Z
2022-03-21T20:17:25.000Z
doc/source/tune/examples/tune_mnist_keras.ipynb
jinnovation/ray
a8d8d0e1a6307ee4a92df5a57b69f7379b11187a
[ "Apache-2.0" ]
null
null
null
33.305195
122
0.520179
[ [ [ "(tune-mnist-keras)=\n\n# Using Keras & TensorFlow with Tune\n\n```{image} /images/tf_keras_logo.jpeg\n:align: center\n:alt: Keras & TensorFlow Logo\n:height: 120px\n:target: https://www.keras.io\n```\n\n```{contents}\n:backlinks: none\n:local: true\n```\n\n## Example", "_____no_output_____" ] ], [ [ "import argparse\nimport os\n\nfrom filelock import FileLock\nfrom tensorflow.keras.datasets import mnist\n\nimport ray\nfrom ray import tune\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\nfrom ray.tune.integration.keras import TuneReportCallback\n\n\ndef train_mnist(config):\n # https://github.com/tensorflow/tensorflow/issues/32159\n import tensorflow as tf\n\n batch_size = 128\n num_classes = 10\n epochs = 12\n\n with FileLock(os.path.expanduser(\"~/.data.lock\")):\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train, x_test = x_train / 255.0, x_test / 255.0\n model = tf.keras.models.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(config[\"hidden\"], activation=\"relu\"),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(num_classes, activation=\"softmax\"),\n ]\n )\n\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=tf.keras.optimizers.SGD(lr=config[\"lr\"], momentum=config[\"momentum\"]),\n metrics=[\"accuracy\"],\n )\n\n model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n validation_data=(x_test, y_test),\n callbacks=[TuneReportCallback({\"mean_accuracy\": \"accuracy\"})],\n )\n\n\ndef tune_mnist(num_training_iterations):\n sched = AsyncHyperBandScheduler(\n time_attr=\"training_iteration\", max_t=400, grace_period=20\n )\n\n analysis = tune.run(\n train_mnist,\n name=\"exp\",\n scheduler=sched,\n metric=\"mean_accuracy\",\n mode=\"max\",\n stop={\"mean_accuracy\": 0.99, \"training_iteration\": num_training_iterations},\n num_samples=10,\n resources_per_trial={\"cpu\": 2, \"gpu\": 0},\n config={\n \"threads\": 2,\n \"lr\": tune.uniform(0.001, 0.1),\n \"momentum\": tune.uniform(0.1, 0.9),\n \"hidden\": tune.randint(32, 512),\n },\n )\n print(\"Best hyperparameters found were: \", analysis.best_config)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--smoke-test\", action=\"store_true\", help=\"Finish quickly for testing\"\n )\n parser.add_argument(\n \"--server-address\",\n type=str,\n default=None,\n required=False,\n help=\"The address of server to connect to if using \" \"Ray Client.\",\n )\n args, _ = parser.parse_known_args()\n if args.smoke_test:\n ray.init(num_cpus=4)\n elif args.server_address:\n ray.init(f\"ray://{args.server_address}\")\n\n tune_mnist(num_training_iterations=5 if args.smoke_test else 300)\n", "_____no_output_____" ] ], [ [ "## More Keras and TensorFlow Examples\n\n- {doc}`/tune/examples/includes/pbt_memnn_example`: Example of training a Memory NN on bAbI with Keras using PBT.\n- {doc}`/tune/examples/includes/tf_mnist_example`: Converts the Advanced TF2.0 MNIST example to use Tune\n with the Trainable. This uses `tf.function`.\n Original code from tensorflow: https://www.tensorflow.org/tutorials/quickstart/advanced\n- {doc}`/tune/examples/includes/pbt_tune_cifar10_with_keras`:\n A contributed example of tuning a Keras model on CIFAR10 with the PopulationBasedTraining scheduler.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af4f1bdaad527358b8a5b93f87183b1c1c8cd7c
9,486
ipynb
Jupyter Notebook
_posts/scikit/Comparing-various-online-solvers/Comparing Various Online Solvers.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
2
2019-06-24T23:55:53.000Z
2019-07-08T12:22:56.000Z
_posts/scikit/Comparing-various-online-solvers/Comparing Various Online Solvers.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
15
2020-06-30T21:21:30.000Z
2021-08-02T21:16:33.000Z
_posts/scikit/Comparing-various-online-solvers/Comparing Various Online Solvers.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
1
2019-11-10T04:01:48.000Z
2019-11-10T04:01:48.000Z
30.114286
803
0.562092
[ [ [ "An example showing how different online solvers perform on the hand-written digits dataset.", "_____no_output_____" ], [ "#### New to Plotly?\nPlotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).\n<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).\n<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!", "_____no_output_____" ], [ "### Version", "_____no_output_____" ] ], [ [ "import sklearn\nsklearn.__version__", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ], [ "This tutorial imports [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html#sklearn.model_selection.train_test_split), [SGDClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier), [Perceptron](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html#sklearn.linear_model.Perceptron), [PassiveAggressiveClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html#sklearn.linear_model.PassiveAggressiveClassifier) and [LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression).", "_____no_output_____" ] ], [ [ "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nfrom sklearn import datasets\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier, Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import LogisticRegression\n", "_____no_output_____" ] ], [ [ "### Calculations", "_____no_output_____" ] ], [ [ "heldout = [0.95, 0.90, 0.75, 0.50, 0.01]\nrounds = 20\ndigits = datasets.load_digits()\nX, y = digits.data, digits.target\n\nclassifiers = [\n (\"SGD\", SGDClassifier()),\n (\"ASGD\", SGDClassifier(average=True)),\n (\"Perceptron\", Perceptron()),\n (\"Passive-Aggressive I\", PassiveAggressiveClassifier(loss='hinge',\n C=1.0)),\n (\"Passive-Aggressive II\", PassiveAggressiveClassifier(loss='squared_hinge',\n C=1.0)),\n (\"SAG\", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))\n]\n\nxx = 1. - np.array(heldout)\n", "_____no_output_____" ] ], [ [ "### Plot Results", "_____no_output_____" ] ], [ [ "data = []\n\nfor name, clf in classifiers:\n print(\"training %s\" % name)\n rng = np.random.RandomState(42)\n yy = []\n for i in heldout:\n yy_ = []\n for r in range(rounds):\n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=i, random_state=rng)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n yy_.append(1 - np.mean(y_pred == y_test))\n yy.append(np.mean(yy_))\n trace = go.Scatter(x=xx, y=yy, \n mode='lines',\n name=name)\n data.append(trace)\n \nlayout = go.Layout(xaxis=dict(title=\"Proportion train\"),\n yaxis=dict(title=\"Test Error Rate\")\n )\nfig = go.Figure(data=data, layout=layout)", "training SGD\ntraining ASGD\ntraining Perceptron\ntraining Passive-Aggressive I\ntraining Passive-Aggressive II\ntraining SAG\n" ], [ "py.iplot(fig)", "_____no_output_____" ] ], [ [ "### License", "_____no_output_____" ], [ "Author: \n \n Rob Zinkov <[email protected]>\n\nLicense:\n \n BSD 3 clause", "_____no_output_____" ] ], [ [ "from IPython.display import display, HTML\n\ndisplay(HTML('<link href=\"//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700\" rel=\"stylesheet\" type=\"text/css\" />'))\ndisplay(HTML('<link rel=\"stylesheet\" type=\"text/css\" href=\"http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css\">'))\n\n! pip install git+https://github.com/plotly/publisher.git --upgrade\nimport publisher\npublisher.publish(\n 'Comparing Various Online Solvers.ipynb', 'scikit-learn/plot-sgd-comparison/', 'Comparing Various Online Solvers | plotly',\n ' ',\n title = 'Comparing Various Online Solvers | plotly',\n name = 'Comparing Various Online Solvers',\n has_thumbnail='true', thumbnail='thumbnail/sgd-comparision.jpg', \n language='scikit-learn', page_type='example_index',\n display_as='linear_models', order=17,\n ipynb= '~Diksha_Gabha/3220')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4af4f22230034d8c168f9387f2f57521882914ce
34,243
ipynb
Jupyter Notebook
06_sequences_part_II.ipynb
zexhan17/Intro-to-computing-using-python
0f040a4f3cfe08b15af317cb27eafa0a1a38c573
[ "MIT" ]
null
null
null
06_sequences_part_II.ipynb
zexhan17/Intro-to-computing-using-python
0f040a4f3cfe08b15af317cb27eafa0a1a38c573
[ "MIT" ]
null
null
null
06_sequences_part_II.ipynb
zexhan17/Intro-to-computing-using-python
0f040a4f3cfe08b15af317cb27eafa0a1a38c573
[ "MIT" ]
null
null
null
27.772101
362
0.442981
[ [ [ "## Nested sequences ", "_____no_output_____" ], [ "Just as we had nested expressions, we can have nested sequences. ", "_____no_output_____" ] ], [ [ "digits = [ 1, 2, 3, 4, 5 ]\n\npairs = [ \n [1, 20], \n [2, 30], \n [3, 40], \n [4, 50] \n]\n\n", "_____no_output_____" ], [ "pairs[2][1] # also add second index ", "_____no_output_____" ], [ "for x, y in pairs: # but we know that a is a \"pair\" \n print \"x=\", x, \"and y=\", y\n \n", "x= 1 and y= 20\nx= 2 and y= 30\nx= 3 and y= 40\nx= 4 and y= 50\n" ], [ "pairs = [[1, 2], [2, 2], [2, 3], [4, 4], [6, 8]]", "_____no_output_____" ], [ "same_count = 0 \n\nfor x, y in pairs: \n if x == y : \n same_count += 1\n \nprint same_count", "2\n" ] ], [ [ "### Other operations on lists ", "_____no_output_____" ] ], [ [ "a = [66.25, 333, 333, 1, 1234.5]", "_____no_output_____" ], [ "print a\n\na.append(987)\n\nprint a", "[66.25, 333, 333, 1, 1234.5]\n[66.25, 333, 333, 1, 1234.5, 987]\n" ], [ "print a\n\na.insert(2, -1)\n\nprint a", "[66.25, 333, 333, 1, 1234.5, 987]\n[66.25, 333, -1, 333, 1, 1234.5, 987]\n" ], [ "print a\n\na.index(333)", "[66.25, 333, -1, 333, 1, 1234.5, 987]\n" ], [ "print a\n\na.remove(333)\n\nprint a", "[66.25, 333, -1, 333, 1, 1234.5, 987]\n[66.25, -1, 333, 1, 1234.5, 987]\n" ], [ "print a\n\na.reverse() # notice that no assignment is needed \n\nprint a", "[66.25, -1, 333, 1, 1234.5, 987]\n[987, 1234.5, 1, 333, -1, 66.25]\n" ], [ "print a\n\na.sort(reverse=True)\n\nprint a", "[-1, 1, 66.25, 333, 987, 1234.5]\n[1234.5, 987, 333, 66.25, 1, -1]\n" ], [ "help(list)", "Help on class list in module __builtin__:\n\nclass list(object)\n | list() -> new empty list\n | list(iterable) -> new list initialized from iterable's items\n | \n | Methods defined here:\n | \n | __add__(...)\n | x.__add__(y) <==> x+y\n | \n | __contains__(...)\n | x.__contains__(y) <==> y in x\n | \n | __delitem__(...)\n | x.__delitem__(y) <==> del x[y]\n | \n | __delslice__(...)\n | x.__delslice__(i, j) <==> del x[i:j]\n | \n | Use of negative indices is not supported.\n | \n | __eq__(...)\n | x.__eq__(y) <==> x==y\n | \n | __ge__(...)\n | x.__ge__(y) <==> x>=y\n | \n | __getattribute__(...)\n | x.__getattribute__('name') <==> x.name\n | \n | __getitem__(...)\n | x.__getitem__(y) <==> x[y]\n | \n | __getslice__(...)\n | x.__getslice__(i, j) <==> x[i:j]\n | \n | Use of negative indices is not supported.\n | \n | __gt__(...)\n | x.__gt__(y) <==> x>y\n | \n | __iadd__(...)\n | x.__iadd__(y) <==> x+=y\n | \n | __imul__(...)\n | x.__imul__(y) <==> x*=y\n | \n | __init__(...)\n | x.__init__(...) initializes x; see help(type(x)) for signature\n | \n | __iter__(...)\n | x.__iter__() <==> iter(x)\n | \n | __le__(...)\n | x.__le__(y) <==> x<=y\n | \n | __len__(...)\n | x.__len__() <==> len(x)\n | \n | __lt__(...)\n | x.__lt__(y) <==> x<y\n | \n | __mul__(...)\n | x.__mul__(n) <==> x*n\n | \n | __ne__(...)\n | x.__ne__(y) <==> x!=y\n | \n | __repr__(...)\n | x.__repr__() <==> repr(x)\n | \n | __reversed__(...)\n | L.__reversed__() -- return a reverse iterator over the list\n | \n | __rmul__(...)\n | x.__rmul__(n) <==> n*x\n | \n | __setitem__(...)\n | x.__setitem__(i, y) <==> x[i]=y\n | \n | __setslice__(...)\n | x.__setslice__(i, j, y) <==> x[i:j]=y\n | \n | Use of negative indices is not supported.\n | \n | __sizeof__(...)\n | L.__sizeof__() -- size of L in memory, in bytes\n | \n | append(...)\n | L.append(object) -- append object to end\n | \n | count(...)\n | L.count(value) -> integer -- return number of occurrences of value\n | \n | extend(...)\n | L.extend(iterable) -- extend list by appending elements from the iterable\n | \n | index(...)\n | L.index(value, [start, [stop]]) -> integer -- return first index of value.\n | Raises ValueError if the value is not present.\n | \n | insert(...)\n | L.insert(index, object) -- insert object before index\n | \n | pop(...)\n | L.pop([index]) -> item -- remove and return item at index (default last).\n | Raises IndexError if list is empty or index is out of range.\n | \n | remove(...)\n | L.remove(value) -- remove first occurrence of value.\n | Raises ValueError if the value is not present.\n | \n | reverse(...)\n | L.reverse() -- reverse *IN PLACE*\n | \n | sort(...)\n | L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*;\n | cmp(x, y) -> -1, 0, 1\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | __hash__ = None\n | \n | __new__ = <built-in method __new__ of type object>\n | T.__new__(S, ...) -> a new object with type S, a subtype of T\n\n" ] ], [ [ "### Strings", "_____no_output_____" ] ], [ [ "# Strings are also sequences \ns = \"I am a dummy string. Lorem ipsum.\"\ns[8]", "_____no_output_____" ], [ "len(s)", "_____no_output_____" ], [ "s[2:4]", "_____no_output_____" ], [ "s[1] = 'x' # except for this! ", "_____no_output_____" ], [ "help(str) # see what you can do with strings ", "Help on class str in module __builtin__:\n\nclass str(basestring)\n | str(object='') -> string\n | \n | Return a nice string representation of the object.\n | If the argument is a string, the return value is the same object.\n | \n | Method resolution order:\n | str\n | basestring\n | object\n | \n | Methods defined here:\n | \n | __add__(...)\n | x.__add__(y) <==> x+y\n | \n | __contains__(...)\n | x.__contains__(y) <==> y in x\n | \n | __eq__(...)\n | x.__eq__(y) <==> x==y\n | \n | __format__(...)\n | S.__format__(format_spec) -> string\n | \n | Return a formatted version of S as described by format_spec.\n | \n | __ge__(...)\n | x.__ge__(y) <==> x>=y\n | \n | __getattribute__(...)\n | x.__getattribute__('name') <==> x.name\n | \n | __getitem__(...)\n | x.__getitem__(y) <==> x[y]\n | \n | __getnewargs__(...)\n | \n | __getslice__(...)\n | x.__getslice__(i, j) <==> x[i:j]\n | \n | Use of negative indices is not supported.\n | \n | __gt__(...)\n | x.__gt__(y) <==> x>y\n | \n | __hash__(...)\n | x.__hash__() <==> hash(x)\n | \n | __le__(...)\n | x.__le__(y) <==> x<=y\n | \n | __len__(...)\n | x.__len__() <==> len(x)\n | \n | __lt__(...)\n | x.__lt__(y) <==> x<y\n | \n | __mod__(...)\n | x.__mod__(y) <==> x%y\n | \n | __mul__(...)\n | x.__mul__(n) <==> x*n\n | \n | __ne__(...)\n | x.__ne__(y) <==> x!=y\n | \n | __repr__(...)\n | x.__repr__() <==> repr(x)\n | \n | __rmod__(...)\n | x.__rmod__(y) <==> y%x\n | \n | __rmul__(...)\n | x.__rmul__(n) <==> n*x\n | \n | __sizeof__(...)\n | S.__sizeof__() -> size of S in memory, in bytes\n | \n | __str__(...)\n | x.__str__() <==> str(x)\n | \n | capitalize(...)\n | S.capitalize() -> string\n | \n | Return a copy of the string S with only its first character\n | capitalized.\n | \n | center(...)\n | S.center(width[, fillchar]) -> string\n | \n | Return S centered in a string of length width. Padding is\n | done using the specified fill character (default is a space)\n | \n | count(...)\n | S.count(sub[, start[, end]]) -> int\n | \n | Return the number of non-overlapping occurrences of substring sub in\n | string S[start:end]. Optional arguments start and end are interpreted\n | as in slice notation.\n | \n | decode(...)\n | S.decode([encoding[,errors]]) -> object\n | \n | Decodes S using the codec registered for encoding. encoding defaults\n | to the default encoding. errors may be given to set a different error\n | handling scheme. Default is 'strict' meaning that encoding errors raise\n | a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'\n | as well as any other name registered with codecs.register_error that is\n | able to handle UnicodeDecodeErrors.\n | \n | encode(...)\n | S.encode([encoding[,errors]]) -> object\n | \n | Encodes S using the codec registered for encoding. encoding defaults\n | to the default encoding. errors may be given to set a different error\n | handling scheme. Default is 'strict' meaning that encoding errors raise\n | a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and\n | 'xmlcharrefreplace' as well as any other name registered with\n | codecs.register_error that is able to handle UnicodeEncodeErrors.\n | \n | endswith(...)\n | S.endswith(suffix[, start[, end]]) -> bool\n | \n | Return True if S ends with the specified suffix, False otherwise.\n | With optional start, test S beginning at that position.\n | With optional end, stop comparing S at that position.\n | suffix can also be a tuple of strings to try.\n | \n | expandtabs(...)\n | S.expandtabs([tabsize]) -> string\n | \n | Return a copy of S where all tab characters are expanded using spaces.\n | If tabsize is not given, a tab size of 8 characters is assumed.\n | \n | find(...)\n | S.find(sub [,start [,end]]) -> int\n | \n | Return the lowest index in S where substring sub is found,\n | such that sub is contained within S[start:end]. Optional\n | arguments start and end are interpreted as in slice notation.\n | \n | Return -1 on failure.\n | \n | format(...)\n | S.format(*args, **kwargs) -> string\n | \n | Return a formatted version of S, using substitutions from args and kwargs.\n | The substitutions are identified by braces ('{' and '}').\n | \n | index(...)\n | S.index(sub [,start [,end]]) -> int\n | \n | Like S.find() but raise ValueError when the substring is not found.\n | \n | isalnum(...)\n | S.isalnum() -> bool\n | \n | Return True if all characters in S are alphanumeric\n | and there is at least one character in S, False otherwise.\n | \n | isalpha(...)\n | S.isalpha() -> bool\n | \n | Return True if all characters in S are alphabetic\n | and there is at least one character in S, False otherwise.\n | \n | isdigit(...)\n | S.isdigit() -> bool\n | \n | Return True if all characters in S are digits\n | and there is at least one character in S, False otherwise.\n | \n | islower(...)\n | S.islower() -> bool\n | \n | Return True if all cased characters in S are lowercase and there is\n | at least one cased character in S, False otherwise.\n | \n | isspace(...)\n | S.isspace() -> bool\n | \n | Return True if all characters in S are whitespace\n | and there is at least one character in S, False otherwise.\n | \n | istitle(...)\n | S.istitle() -> bool\n | \n | Return True if S is a titlecased string and there is at least one\n | character in S, i.e. uppercase characters may only follow uncased\n | characters and lowercase characters only cased ones. Return False\n | otherwise.\n | \n | isupper(...)\n | S.isupper() -> bool\n | \n | Return True if all cased characters in S are uppercase and there is\n | at least one cased character in S, False otherwise.\n | \n | join(...)\n | S.join(iterable) -> string\n | \n | Return a string which is the concatenation of the strings in the\n | iterable. The separator between elements is S.\n | \n | ljust(...)\n | S.ljust(width[, fillchar]) -> string\n | \n | Return S left-justified in a string of length width. Padding is\n | done using the specified fill character (default is a space).\n | \n | lower(...)\n | S.lower() -> string\n | \n | Return a copy of the string S converted to lowercase.\n | \n | lstrip(...)\n | S.lstrip([chars]) -> string or unicode\n | \n | Return a copy of the string S with leading whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | If chars is unicode, S will be converted to unicode before stripping\n | \n | partition(...)\n | S.partition(sep) -> (head, sep, tail)\n | \n | Search for the separator sep in S, and return the part before it,\n | the separator itself, and the part after it. If the separator is not\n | found, return S and two empty strings.\n | \n | replace(...)\n | S.replace(old, new[, count]) -> string\n | \n | Return a copy of string S with all occurrences of substring\n | old replaced by new. If the optional argument count is\n | given, only the first count occurrences are replaced.\n | \n | rfind(...)\n | S.rfind(sub [,start [,end]]) -> int\n | \n | Return the highest index in S where substring sub is found,\n | such that sub is contained within S[start:end]. Optional\n | arguments start and end are interpreted as in slice notation.\n | \n | Return -1 on failure.\n | \n | rindex(...)\n | S.rindex(sub [,start [,end]]) -> int\n | \n | Like S.rfind() but raise ValueError when the substring is not found.\n | \n | rjust(...)\n | S.rjust(width[, fillchar]) -> string\n | \n | Return S right-justified in a string of length width. Padding is\n | done using the specified fill character (default is a space)\n | \n | rpartition(...)\n | S.rpartition(sep) -> (head, sep, tail)\n | \n | Search for the separator sep in S, starting at the end of S, and return\n | the part before it, the separator itself, and the part after it. If the\n | separator is not found, return two empty strings and S.\n | \n | rsplit(...)\n | S.rsplit([sep [,maxsplit]]) -> list of strings\n | \n | Return a list of the words in the string S, using sep as the\n | delimiter string, starting at the end of the string and working\n | to the front. If maxsplit is given, at most maxsplit splits are\n | done. If sep is not specified or is None, any whitespace string\n | is a separator.\n | \n | rstrip(...)\n | S.rstrip([chars]) -> string or unicode\n | \n | Return a copy of the string S with trailing whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | If chars is unicode, S will be converted to unicode before stripping\n | \n | split(...)\n | S.split([sep [,maxsplit]]) -> list of strings\n | \n | Return a list of the words in the string S, using sep as the\n | delimiter string. If maxsplit is given, at most maxsplit\n | splits are done. If sep is not specified or is None, any\n | whitespace string is a separator and empty strings are removed\n | from the result.\n | \n | splitlines(...)\n | S.splitlines(keepends=False) -> list of strings\n | \n | Return a list of the lines in S, breaking at line boundaries.\n | Line breaks are not included in the resulting list unless keepends\n | is given and true.\n | \n | startswith(...)\n | S.startswith(prefix[, start[, end]]) -> bool\n | \n | Return True if S starts with the specified prefix, False otherwise.\n | With optional start, test S beginning at that position.\n | With optional end, stop comparing S at that position.\n | prefix can also be a tuple of strings to try.\n | \n | strip(...)\n | S.strip([chars]) -> string or unicode\n | \n | Return a copy of the string S with leading and trailing\n | whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | If chars is unicode, S will be converted to unicode before stripping\n | \n | swapcase(...)\n | S.swapcase() -> string\n | \n | Return a copy of the string S with uppercase characters\n | converted to lowercase and vice versa.\n | \n | title(...)\n | S.title() -> string\n | \n | Return a titlecased version of S, i.e. words start with uppercase\n | characters, all remaining cased characters have lowercase.\n | \n | translate(...)\n | S.translate(table [,deletechars]) -> string\n | \n | Return a copy of the string S, where all characters occurring\n | in the optional argument deletechars are removed, and the\n | remaining characters have been mapped through the given\n | translation table, which must be a string of length 256 or None.\n | If the table argument is None, no translation is applied and\n | the operation simply removes the characters in deletechars.\n | \n | upper(...)\n | S.upper() -> string\n | \n | Return a copy of the string S converted to uppercase.\n | \n | zfill(...)\n | S.zfill(width) -> string\n | \n | Pad a numeric string S with zeros on the left, to fill a field\n | of the specified width. The string S is never truncated.\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | __new__ = <built-in method __new__ of type object>\n | T.__new__(S, ...) -> a new object with type S, a subtype of T\n\n" ] ], [ [ "## Ranges ", "_____no_output_____" ] ], [ [ "range(1, 10, 3) # includes 1 but stops just before 10 ", "_____no_output_____" ], [ "for j in range(1, 4): \n print \"Printing table for:\", j\n for i in range(1, 3):\n print j, \"X\", i, \"=\", j * i # 2 needs to change\n", "Printing table for: 1\n1 X 1 = 1\n1 X 2 = 2\nPrinting table for: 2\n2 X 1 = 2\n2 X 2 = 4\nPrinting table for: 3\n3 X 1 = 3\n3 X 2 = 6\n" ] ], [ [ "These are called **nested loops**. ", "_____no_output_____" ], [ "## List comprehensions ", "_____no_output_____" ] ], [ [ "nums = [1, 2, 3, 4, 5, 6, 101, 105, 1000]\nevens = [] \n\nfor i in nums: \n if i % 2 == 0: \n evens.append(i) # append in output \n \nprint evens", "[2, 4, 6, 1000]\n" ], [ "evens = [i for i in nums if i%5==0]\nprint evens", "[5, 105, 1000]\n" ], [ "n = 11\n[ x for x in range(1, n+1) if n % x == 0] \n", "_____no_output_____" ] ], [ [ "## Tuples - Immutable datatypes", "_____no_output_____" ] ], [ [ "digits = (1, 8, 2, 8) # Almost the same as lists ", "_____no_output_____" ], [ "len(digits)", "_____no_output_____" ], [ "digits[1]", "_____no_output_____" ], [ "digits[1:3]", "_____no_output_____" ], [ "digits[0] = 11 # except for this! ", "_____no_output_____" ] ], [ [ "## Reading Assignment\nComposing Programs: Section 2.3 Sequences (http://www.composingprograms.com/pages/23-sequences.html) -- Stop when you reach \"Higher-Order Functions\"", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4af4f9e6a5e5baafa16305c922f66fd298a090a8
84,506
ipynb
Jupyter Notebook
8_Getting_started_with_text_preprocessing/notebook/Getting_started_with_text_preprocessing.ipynb
hirenhk15/ga-code-alongs
fc8ba845eb60668e297fd53061c607f6986a8c25
[ "Apache-2.0" ]
null
null
null
8_Getting_started_with_text_preprocessing/notebook/Getting_started_with_text_preprocessing.ipynb
hirenhk15/ga-code-alongs
fc8ba845eb60668e297fd53061c607f6986a8c25
[ "Apache-2.0" ]
null
null
null
8_Getting_started_with_text_preprocessing/notebook/Getting_started_with_text_preprocessing.ipynb
hirenhk15/ga-code-alongs
fc8ba845eb60668e297fd53061c607f6986a8c25
[ "Apache-2.0" ]
null
null
null
40.491615
1,126
0.47083
[ [ [ "## Text Data Preprocessing\n\nIn any machine learning task, cleaning or preprocessing the data is as important as model building if not more. And when it comes to unstructured data like text, this process is even more important.\n\nObjective of this notebook is to understand the various text preprocessing steps with code examples.\n\nSome of the common text preprocessing / cleaning steps are:\n\n* Lower casing\n* Removal of Punctuations\n* Removal of Stopwords\n* Removal of Frequent words\n* Removal of Rare words\n* Stemming\n* Lemmatization\n* Removal of emojis\n* Removal of URLs\n\n\nSo these are the different types of text preprocessing steps which we can do on text data. But we need not do all of these all the times. We need to carefully choose the preprocessing steps based on our use case since that also play an important role.\n\nFor example, in sentiment analysis use case, we need not remove the emojis as it will convey some important information about the sentiment. Similarly we need to decide based on our use cases.", "_____no_output_____" ], [ "## Import libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport re\nimport nltk\nimport spacy\nimport string\npd.options.mode.chained_assignment = None", "_____no_output_____" ] ], [ [ "## Read the data", "_____no_output_____" ] ], [ [ "pd.set_option('max_colwidth', 100)", "_____no_output_____" ], [ "df = pd.read_csv('../data/text.csv', lineterminator='\\n')\ndf.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "## Lower Casing\nLower casing is a common text preprocessing technique. The idea is to convert the input text into same casing format so that 'text', 'Text' and 'TEXT' are treated the same way.\n\n", "_____no_output_____" ] ], [ [ "df['text_lower'] = df['text'].str.lower()\ndf.head()", "_____no_output_____" ] ], [ [ "## Removal of Punctuations\n\nOne another common text preprocessing technique is to remove the punctuations from the text data. This is again a text standardization process that will help to treat 'hurray' and 'hurray!' in the same way.\n\nWe also need to carefully choose the list of punctuations to exclude depending on the use case. For example, the string.punctuation in python contains the following punctuation symbols !\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_{|}~`\n\nWe can add or remove more punctuations as per our need.", "_____no_output_____" ] ], [ [ "string.punctuation", "_____no_output_____" ], [ "# Regex pattern to remove punctuations\n# here one slash is required to escape regex characters such as \"[]\" and one slash for python requirements\nregex_pattern = '[' + ''.join('\\\\'+c for c in string.punctuation) + ']'\nprint(regex_pattern)", "[\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^\\_\\`\\{\\|\\}\\~]\n" ], [ "df['text_wo_punct'] = df['text_lower'].str.replace(regex_pattern, '')\ndf.head()", "_____no_output_____" ] ], [ [ "## Removal of stopwords\nStopwords are commonly occuring words in a language like 'the', 'a' and so on. They can be removed from the text most of the times, as they don't provide valuable information for downstream analysis. In cases like Part of Speech tagging, we should not remove them as provide very valuable information about the POS.\n\nThese stopword lists are already compiled for different languages and we can safely use them. For example, the stopword list for english language from the nltk package can be seen below.", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download('stopwords')", "[nltk_data] Downloading package stopwords to C:\\Users\\Hiren\n[nltk_data] Kelaiya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "from nltk.corpus import stopwords\n\nSTOP_WORDS = stopwords.words('english') \n', '.join(STOP_WORDS)", "_____no_output_____" ], [ "df['text_wo_punct'].iloc[0]", "_____no_output_____" ], [ "' '.join([word for word in df['text_wo_punct'].iloc[0].split() if word not in STOP_WORDS])", "_____no_output_____" ], [ "def remove_stop_word(text: str, stopwords: list) -> str:\n \"\"\"Custom function to remove stopwords\n \n Args:\n text (str): A string\n \"\"\"\n return ' '.join([word for word in text.split() if word not in stopwords])", "_____no_output_____" ], [ "df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stop_word(text, STOP_WORDS))\ndf.head()", "_____no_output_____" ] ], [ [ "## Removal of Frequent words\nIn the previos preprocessing step, we removed the stopwords based on language information. But say, if we have a domain specific corpus, we might also have some frequent words which are of not so much importance to us.\n\nSo this step is to remove the frequent words in the given corpus. If we use something like tfidf, this is automatically taken care of.\n\nLet us get the most common words adn then remove them in the next step", "_____no_output_____" ] ], [ [ "from collections import Counter\n\ncnt = Counter()\n\nfor text in df['text_wo_stop'].values:\n for word in text.split():\n cnt[word] += 1\n\ncnt.most_common(10)", "_____no_output_____" ], [ "FREQ_WORDS = [word for word, _ in cnt.most_common(10)]\nFREQ_WORDS", "_____no_output_____" ], [ "def remove_frequent_word(text: str, freqwords: list) -> str:\n \"\"\"Custom function to remove frequent words\n \n Args:\n text (str): A string\n \"\"\"\n return ' '.join([word for word in text.split() if word not in freqwords])", "_____no_output_____" ], [ "df['text_wo_stopfreq'] = df['text_wo_stop'].apply(lambda text: remove_frequent_word(text, FREQ_WORDS))\ndf.head()", "_____no_output_____" ] ], [ [ "## Removal of Rare words\nThis is very similar to previous preprocessing step but we will remove the rare words from the corpus.", "_____no_output_____" ] ], [ [ "pd.DataFrame(cnt.most_common(), columns=['word', 'count']).query('count == 1')", "_____no_output_____" ], [ "n_rare_words = 10\nRARE_WORDS = [word for word, _ in cnt.most_common()[:-n_rare_words:-1]]\nRARE_WORDS", "_____no_output_____" ], [ "def remove_rare_word(text: str, rarewords: list) -> str:\n \"\"\"Custom function to remove rare words\n \n Args:\n text (str): A string\n \"\"\"\n return ' '.join([word for word in text.split() if word not in rarewords])", "_____no_output_____" ], [ "df['text_wo_stopfreqrare'] = df['text_wo_stopfreq'].apply(lambda text: remove_rare_word(text, RARE_WORDS))\ndf[['text', 'text_wo_stopfreq', 'text_wo_stopfreqrare']].head()", "_____no_output_____" ] ], [ [ "## Stemming\nStemming is the process of reducing inflected (or sometimes derived) words to their word stem, base or root form (From Wikipedia)\n\nFor example, if there are two words in the corpus walks and walking, then stemming will stem the suffix to make them walk. But say in another example, we have two words console and consoling, the stemmer will remove the suffix and make them consol which is not a proper english word.\n\nThere are several type of stemming algorithms available and one of the famous one is porter stemmer which is widely used. We can use nltk package for the same.", "_____no_output_____" ] ], [ [ "from nltk.stem import PorterStemmer\n\nstemmer = PorterStemmer()", "_____no_output_____" ], [ "def stem_words(text: str) -> str:\n \"\"\"Custom function to stem the words\n \n Args:\n text (str): A string\n \"\"\"\n return ' '.join([stemmer.stem(word) for word in text.split()])", "_____no_output_____" ], [ "df['text_stemmed'] = df['text_lower'].apply(stem_words)\ndf[['text', 'text_lower', 'text_stemmed']].head()", "_____no_output_____" ] ], [ [ "We can see that words like probable, unstable, update and website have their e at the end chopped off due to stemming. This is not intented. What can we do for that? We can use Lemmatization in such cases.\n\nAlso this porter stemmer is for English language. If we are working with other languages, we can use snowball stemmer. The supported languages for snowball stemmer are:", "_____no_output_____" ] ], [ [ "from nltk.stem.snowball import SnowballStemmer\nSnowballStemmer.languages", "_____no_output_____" ] ], [ [ "## Lemmatization\nLemmatization is similar to stemming in reducing inflected words to their word stem but differs in the way that it makes sure the root word (also called as lemma) belongs to the language.\n\nAs a result, this one is generally slower than stemming process. So depending on the speed requirement, we can choose to use either stemming or lemmatization.\n\nLet us use the WordNetLemmatizer in nltk to lemmatize our sentences", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download('wordnet')", "[nltk_data] Downloading package wordnet to C:\\Users\\Hiren\n[nltk_data] Kelaiya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n" ], [ "from nltk.stem import WordNetLemmatizer\n\nlemmatizer = WordNetLemmatizer()", "_____no_output_____" ], [ "def lemmatize_words(text: str) -> str:\n \"\"\"Custom function to lemmatize the words\n \n Args:\n text (str): A string\n \"\"\"\n return ' '.join([lemmatizer.lemmatize(word) for word in text.split()])", "_____no_output_____" ], [ "df['text_lemmatized'] = df['text_lower'].apply(lambda text: lemmatize_words(text))\ndf[['text', 'text_lower', 'text_stemmed', 'text_lemmatized']].head()", "_____no_output_____" ] ], [ [ "We can see that the trailing e in the unstable and website are retained when we use lemmatization unlike stemming.\n\nWait. There is one more thing in lemmatization. Let us try to lemmatize running now.", "_____no_output_____" ] ], [ [ "lemmatizer.lemmatize('running')", "_____no_output_____" ] ], [ [ "Wow! It returned running as such without converting it to the root form run. This is because the lemmatization process depends on the POS tag to come up with the correct lemma. Now let us lemmatize again by providing the POS tag for the word", "_____no_output_____" ] ], [ [ "lemmatizer.lemmatize('running', 'v')", "_____no_output_____" ] ], [ [ "## Redo the lemmatization process with POS tag for our dataset.", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download('averaged_perceptron_tagger')", "[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] C:\\Users\\Hiren Kelaiya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package averaged_perceptron_tagger is already up-to-\n[nltk_data] date!\n" ], [ "from nltk.corpus import wordnet\nfrom nltk.stem import WordNetLemmatizer\n\nlemmatizer = WordNetLemmatizer()\n\nwordnet_map = {\n 'N': wordnet.NOUN,\n 'V': wordnet.VERB,\n 'J': wordnet.ADJ,\n 'R': wordnet.ADV\n}\n\nwordnet_map", "_____no_output_____" ], [ "def lemmatize_words(text: str) -> str:\n \"\"\"Custom function to lemmatize the words\n \n Args:\n text (str): A string\n \"\"\"\n pos_tagged_text = nltk.pos_tag(text.split())\n \n return ' '.join([lemmatizer.lemmatize(word, wordnet_map.get(pos[0], wordnet.NOUN)) for word, pos in pos_tagged_text])", "_____no_output_____" ], [ "pos_tagged_text = nltk.pos_tag(df['text'].iloc[0].split())\npos_tagged_text", "_____no_output_____" ], [ "df['text_lemmatized'] = df['text_lower'].apply(lambda text: lemmatize_words(text))\ndf[['text', 'text_lower', 'text_stemmed', 'text_lemmatized']].head()", "_____no_output_____" ] ], [ [ "## Removal of Emojis\n\nWith more and more usage of social media platforms, there is an explosion in the usage of emojis in our day to day life as well. Probably we might need to remove these emojis for some of our textual analysis.\n\nThanks to [this code](https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b), please find below a helper function to remove emojis from our text.", "_____no_output_____" ] ], [ [ "# Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b\ndef remove_emoji(string: str) -> str:\n \"\"\"Custom function to remove emojis\"\"\"\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n return emoji_pattern.sub(r'', string)", "_____no_output_____" ], [ "remove_emoji(\"game is on 🔥🔥\")", "_____no_output_____" ], [ "remove_emoji(\"Hilarious😂\")", "_____no_output_____" ], [ "df['text_no_emoji'] = df['text'].apply(remove_emoji)\ndf[['text', 'text_no_emoji']].head()", "_____no_output_____" ] ], [ [ "## Removal of URLs\n\nNext preprocessing step is to remove any URLs present in the data. For example, if we are doing a twitter analysis, then there is a good chance that the tweet will have some URL in it. Probably we might need to remove them for our further analysis.\n\nWe can use the below code snippet to do that", "_____no_output_____" ] ], [ [ "def remove_urls(text: str) -> str:\n \"\"\"Custom function to remove URLs\"\"\"\n url_pattern = re.compile(r'https?://\\S+|www\\.\\S+')\n \n return url_pattern.sub(r'', text)", "_____no_output_____" ], [ "text = \"Driverless AI NLP blog post on https://www.h2o.ai/blog/detecting-sarcasm-is-difficult-but-ai-may-have-an-answer/\"\nremove_urls(text)", "_____no_output_____" ], [ "df['text_no_url'] = df['text'].apply(remove_urls)\ndf[['text', 'text_no_url']].head()", "_____no_output_____" ] ], [ [ "## Discussion activity:\n\n* What usecases can you think for NLP?\n - analysis of speech - news article, transcription of speeches -- topic modelling vs topic classification\n* What role does preprocessing play in the application of NLP?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4af4fa81f48d3c1850b20506d4141e4f4c85b599
25,021
ipynb
Jupyter Notebook
predict-employee-attrition/attrition_EDA.ipynb
Kim-Sha/mlp-binary-classifier
2ee5dbe0ce314b602b05b4546167ddd3ed64d164
[ "MIT" ]
null
null
null
predict-employee-attrition/attrition_EDA.ipynb
Kim-Sha/mlp-binary-classifier
2ee5dbe0ce314b602b05b4546167ddd3ed64d164
[ "MIT" ]
null
null
null
predict-employee-attrition/attrition_EDA.ipynb
Kim-Sha/mlp-binary-classifier
2ee5dbe0ce314b602b05b4546167ddd3ed64d164
[ "MIT" ]
null
null
null
34.751389
93
0.35746
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib as plt\nimport plotly as plotly", "_____no_output_____" ] ], [ [ "# IBM Attrition Dataset EDA", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"WA_Fn-UseC_-HR-Employee-Attrition.csv\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "print(sum(df.duplicated()), \"duplicated rows\")\ndf.describe()", "0 duplicated rows\n" ], [ "df.groupby('Attrition').Age.count()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af528de5b6f8c2fc7061be8169cbb162b2e3933
28,923
ipynb
Jupyter Notebook
Chapter01/Recipe-2-Quantifying-missing-data.ipynb
paulorobertolds/Python-Feature-Engineering-Cookbook
192e97743c7a586ee8a776a4fe5501edec908a26
[ "MIT" ]
245
2019-12-24T01:54:34.000Z
2022-03-25T02:59:45.000Z
Chapter01/Recipe-2-Quantifying-missing-data.ipynb
vlasvlasvlas/Python-Feature-Engineering-Cookbook
e140311b506cc156ab8b7dbe2862b4ba722139b8
[ "MIT" ]
3
2020-02-21T19:06:35.000Z
2021-09-29T07:08:58.000Z
Chapter01/Recipe-2-Quantifying-missing-data.ipynb
vlasvlasvlas/Python-Feature-Engineering-Cookbook
e140311b506cc156ab8b7dbe2862b4ba722139b8
[ "MIT" ]
130
2019-12-24T18:18:54.000Z
2022-03-10T10:07:55.000Z
87.380665
19,452
0.786468
[ [ [ "Missing data occurs when no data is stored for certain observations within a variable. In other words, missing data is the absence of values, and is a common occurrence in most data sets. In this recipe, we will quantify and visualize missing information in variables, utilizing the dataset from the [KDD-CUP-98](https://archive.ics.uci.edu/ml/datasets/KDD+Cup+1998+Data) available in the UCI Machine Learning Repository.\n\nDua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.\n\n====================================================================================================\n\nTo download the data, visit this [website](https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup98-mld/epsilon_mirror/)\n\nClick the 'cup98lrn.zip' to begin the download. Unzip the file and save 'cup98LRN.txt' to the parent directory of this repo (../cup98LRN.txt).\n\n====================================================================================================", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# to display the total number columns present in the dataset\npd.set_option('display.max_columns', None)", "_____no_output_____" ], [ "# we will use the selected variables for the recipe\ncols = [\n 'AGE',\n 'NUMCHLD',\n 'INCOME',\n 'WEALTH1',\n 'MBCRAFT',\n 'MBGARDEN',\n 'MBBOOKS',\n 'MBCOLECT',\n 'MAGFAML',\n 'MAGFEM',\n 'MAGMALE',\n]\n\n# load the dataset\ndata = pd.read_csv('cup98LRN.txt', usecols=cols)\n\n# let's inspect the first 5 rows\ndata.head()", "_____no_output_____" ], [ "# we can quantify the total number of missing values using\n# the isnull() method plus the sum() method on the dataframe\n\ndata.isnull().sum()", "_____no_output_____" ], [ "# alternatively, we can use the mean() method after isnull()\n# to visualise the percentage of missing values for each variable\n\ndata.isnull().mean()", "_____no_output_____" ], [ "# we can also plot the percentages of missing data utilising\n# pandas plot.bar(), and add labels with matplotlib methods \n# as shown below\n\ndata.isnull().mean().plot.bar(figsize=(12,6))\nplt.ylabel('Percentage of missing values')\nplt.xlabel('Variables')\nplt.title('Quantifying missing data')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4af52bc8dce6ddaeaf3d5c00dcc0dda6078387f5
5,567
ipynb
Jupyter Notebook
lectures/ch04.if.ipynb
HUFS-Programming-2022/JongbeenSong_202001862
769094558c19d41527c8b8e6b396c716932bc457
[ "MIT" ]
null
null
null
lectures/ch04.if.ipynb
HUFS-Programming-2022/JongbeenSong_202001862
769094558c19d41527c8b8e6b396c716932bc457
[ "MIT" ]
null
null
null
lectures/ch04.if.ipynb
HUFS-Programming-2022/JongbeenSong_202001862
769094558c19d41527c8b8e6b396c716932bc457
[ "MIT" ]
null
null
null
17.396875
69
0.406503
[ [ [ "### IF 조건문", "_____no_output_____" ], [ "#### 코드 짜는 팁\n- 주석 달기\n - '#' hash, sharp, ..\n - '#' 시작지점부터 라인 끝까지가 주석 처리됨. 라인이 바뀌면 코드로 인식\n - \"\"\" <주석> \"\"\"\n - 문자열 안에 있는 sharp는 부호로 인식한다.\n - 이유? 코드에 대한 설명은 최대한 하지 않는다. 의도를 나타내기 위해, 오류가 있다거나, ...\n \n- 라인 유지하기\n - 들여쓰기 (default space 4개, 탭을 이용해서)\n - 논리 구조를 나타내기 때문에 굉장히 중요함.\n - 한 라인에 80자 정도 (암묵적인 합의) -> 백슬래시('\\\\')로 라인 끊어줌)", "_____no_output_____" ] ], [ [ "a = 3 # 주석입니다", "_____no_output_____" ], [ "\"\"\"\n주석1\n주석2\n주석3\n\"\"\"\n\nprint(a)", "3\n" ], [ "print('hey, there #')", "hey, there #\n" ] ], [ [ "### if\nif <조건식>:\n <실행할 코드>", "_____no_output_____" ] ], [ [ "if 3 != 1:\n print('두 수는 같지 않다')", "두 수는 같지 않다\n" ] ], [ [ "- if-else\nif <조건식>:\n <실행할 코드1>\nelse:\n <실행할 코드2>", "_____no_output_____" ] ], [ [ "if 3 != 1:\n print('두 수는 같지 않다')\nelse:\n print('두 수가 같다')", "두 수는 같지 않다\n" ] ], [ [ "- if-elif-else\nif <조건식1>:\n <실행할 코드1>\nelif <조건식2>:\n <실행할 코드2>\nelse:\n <실행할 코드3>", "_____no_output_____" ] ], [ [ "a = 4\nif a < 3:\n print('3보다 작다')\nelif a == 3:\n print('3이다')\nelse:\n print('3보다 크다')", "3보다 크다\n" ], [ "if a < 3:\n print('3보다 작다')\n \nif a == 3:\n print('3이다')\n\nif a > 4:\n print('4보다 크다')", "_____no_output_____" ] ], [ [ "### 비교연산자\n- x < y\n- x > y\n- x >= y\n- x <= y\n- x == y (값이)\n- x != y\n- x is y (값, 메모리 주소)\n- x is not y", "_____no_output_____" ], [ "### 퀴즈\n- 1. 시험 점수를 입력받고, 80점 이상이면 True, 아니면 False 반환하기\n- 2. 시험 점수를 입력받고, 95점 이상 A+, 90점 이상 A, 85점 이상 B+, 나머지 F로 출력하세요.", "_____no_output_____" ] ], [ [ "test_score = int(input('input your test score: '))", "input your test score: 80\n" ], [ "if test_score >= 80:\n print(True)\nelse:\n print(False)", "True\n" ], [ "if test_score >= 95:\n print('A+')\nelif test_score >= 90:\n print('A')\nelif test_score >= 85:\n print('B+')\nelse:\n print('F')", "F\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
4af5333b5062147a55030f69cb690c21d627d712
319,503
ipynb
Jupyter Notebook
multiclass.ipynb
marshall4471/for_my_class_on_udemy_for_cnn
c6522cb89666f4e8f68f85d353ea969826060dc3
[ "MIT" ]
null
null
null
multiclass.ipynb
marshall4471/for_my_class_on_udemy_for_cnn
c6522cb89666f4e8f68f85d353ea969826060dc3
[ "MIT" ]
null
null
null
multiclass.ipynb
marshall4471/for_my_class_on_udemy_for_cnn
c6522cb89666f4e8f68f85d353ea969826060dc3
[ "MIT" ]
1
2022-01-10T07:50:40.000Z
2022-01-10T07:50:40.000Z
668.416318
120,814
0.939393
[ [ [ "\nfrom google.colab import drive\n\ndrive.mount('/content/drive/')\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom keras.layers import Conv2D, Activation, GlobalAvgPool2D, MaxPooling2D, Dense, Flatten, Dropout\n\nfrom keras.models import Sequential\n\nfile1='/content/drive/MyDrive/archive (1)/Train'\n\nfile2= '/content/drive/MyDrive/archive (1)/Test'\n\nfrom keras.preprocessing.image import ImageDataGenerator", "Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount(\"/content/drive/\", force_remount=True).\n" ], [ "train_datagen = ImageDataGenerator(\n horizontal_flip = True)", "_____no_output_____" ], [ "train_set = train_datagen.flow_from_directory(file1,\n target_size = (384, 384),\n class_mode='categorical',\n batch_size = 6)\n", "Found 64 images belonging to 3 classes.\n" ], [ "test_datagen = ImageDataGenerator(horizontal_flip = True)\n\ntest_set = test_datagen.flow_from_directory(file2,\n target_size = (384, 384),\n class_mode='categorical',\n batch_size =3)", "Found 58 images belonging to 3 classes.\n" ], [ "cnn = tf.keras.models.Sequential()\ncnn.add(tf.keras.layers.Dense(3, activation='relu', input_shape=[384,384,3]))\ncnn.add(tf.keras.layers.Conv2D(128, kernel_size=[3,3], padding='valid', activation='relu'))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[3,3], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Conv2D(64, kernel_size=[2,2],padding='valid', activation='relu' ))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[2,2], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Conv2D(32, kernel_size=[2,2],padding='valid', activation='relu' ))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[2,2], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Conv2D(16, kernel_size=[2,2],padding='valid', activation='relu' ))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[2,2], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Conv2D(8, kernel_size=[2,2],padding='valid', activation='relu' ))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[2,2], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Conv2D(4, kernel_size=[2,2],padding='valid', activation='relu' ))\ncnn.add(tf.keras.layers.MaxPooling2D(pool_size=[2,2], strides=2, padding='valid'))\ncnn.add(tf.keras.layers.Flatten())\ncnn.add(tf.keras.layers.Dense(3, activation='softmax'))\ncnn.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])", "_____no_output_____" ], [ "cnn.summary()", "Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense (Dense) (None, 384, 384, 3) 12 \n \n conv2d (Conv2D) (None, 382, 382, 128) 3584 \n \n max_pooling2d (MaxPooling2D (None, 190, 190, 128) 0 \n ) \n \n conv2d_1 (Conv2D) (None, 189, 189, 64) 32832 \n \n max_pooling2d_1 (MaxPooling (None, 94, 94, 64) 0 \n 2D) \n \n conv2d_2 (Conv2D) (None, 93, 93, 32) 8224 \n \n max_pooling2d_2 (MaxPooling (None, 46, 46, 32) 0 \n 2D) \n \n conv2d_3 (Conv2D) (None, 45, 45, 16) 2064 \n \n max_pooling2d_3 (MaxPooling (None, 22, 22, 16) 0 \n 2D) \n \n conv2d_4 (Conv2D) (None, 21, 21, 8) 520 \n \n max_pooling2d_4 (MaxPooling (None, 10, 10, 8) 0 \n 2D) \n \n conv2d_5 (Conv2D) (None, 9, 9, 4) 132 \n \n max_pooling2d_5 (MaxPooling (None, 4, 4, 4) 0 \n 2D) \n \n flatten (Flatten) (None, 64) 0 \n \n dense_1 (Dense) (None, 3) 195 \n \n=================================================================\nTotal params: 47,563\nTrainable params: 47,563\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "history = cnn.fit(train_set, validation_data =test_set, epochs=5, verbose=2)", "Epoch 1/5\n11/11 - 8s - loss: 0.3241 - categorical_accuracy: 0.8438 - val_loss: 1.4380 - val_categorical_accuracy: 0.6379 - 8s/epoch - 685ms/step\nEpoch 2/5\n11/11 - 7s - loss: 0.1869 - categorical_accuracy: 0.9531 - val_loss: 1.4695 - val_categorical_accuracy: 0.5517 - 7s/epoch - 677ms/step\nEpoch 3/5\n11/11 - 7s - loss: 0.2757 - categorical_accuracy: 0.8906 - val_loss: 1.1937 - val_categorical_accuracy: 0.6724 - 7s/epoch - 678ms/step\nEpoch 4/5\n11/11 - 8s - loss: 0.2841 - categorical_accuracy: 0.9219 - val_loss: 1.7580 - val_categorical_accuracy: 0.6034 - 8s/epoch - 688ms/step\nEpoch 5/5\n11/11 - 8s - loss: 0.1666 - categorical_accuracy: 0.9844 - val_loss: 1.4524 - val_categorical_accuracy: 0.6552 - 8s/epoch - 687ms/step\n" ], [ "import cv2\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport cv2\n\ndef prepare(image):\n IMG_SIZE=384\n img_array = cv2.imread('/content/audi.jpg', cv2.IMREAD_COLOR)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\ny=cnn.predict([prepare('/content/audi.jpg')])\n\nprint(y)\n\nimg = cv2.imread('/content/audi.jpg',0)\nplt.imshow(img, interpolation = 'bicubic')\nplt.xticks([]), plt.yticks([]) \nplt.show()", "[[0.9016767 0.08088464 0.01743862]]\n" ], [ "np.argmax(y)", "_____no_output_____" ], [ "def prepare(image):\n IMG_SIZE=384\n img_array = cv2.imread('/lamborg.jpg', cv2.IMREAD_COLOR)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\nx=cnn.predict([prepare('/lamborg.jpg')])\n\nprint(x)\n\nimg = cv2.imread('/lamborg.jpg',0)\nplt.imshow(img, interpolation = 'bicubic')\nplt.xticks([]), plt.yticks([]) \nplt.show()", "[[0.00205513 0.98969054 0.00825442]]\n" ], [ "np.argmax(x)", "_____no_output_____" ], [ "def prepare(image):\n IMG_SIZE=384\n img_array = cv2.imread('/content/mercedez.jpg', cv2.IMREAD_COLOR)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\nz=cnn.predict([prepare('/content/mercedez.jpg')])\n\nprint(z)\n\nimg = cv2.imread('/content/mercedez.jpg',0)\nplt.imshow(img, interpolation = 'bicubic')\nplt.xticks([]), plt.yticks([]) \nplt.show()", "[[0.03100607 0.03305162 0.93594235]]\n" ], [ "np.argmax(z)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af533c5d3fe4d89295e1aa0060b75aa6215d7d4
495,591
ipynb
Jupyter Notebook
examples/distgen_example.ipynb
wlin6927/lume-impact
f7946ae8abc1eb3171858bb52f56029733648c39
[ "Apache-2.0" ]
1
2021-08-06T06:37:35.000Z
2021-08-06T06:37:35.000Z
examples/distgen_example.ipynb
wlin6927/lume-impact
f7946ae8abc1eb3171858bb52f56029733648c39
[ "Apache-2.0" ]
2
2021-11-11T22:01:49.000Z
2022-01-04T01:02:55.000Z
examples/distgen_example.ipynb
wlin6927/lume-impact
f7946ae8abc1eb3171858bb52f56029733648c39
[ "Apache-2.0" ]
6
2021-01-20T19:17:58.000Z
2022-03-29T07:45:12.000Z
646.142112
143,324
0.945774
[ [ [ "# Useful for debugging\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "# Nicer plotting\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nmatplotlib.rcParams['figure.figsize'] = (8,4)", "_____no_output_____" ] ], [ [ "# Disgten example\n\nSimilar to the simple example, but generating particles with Distgen", "_____no_output_____" ] ], [ [ "from distgen import Generator\nYAML=\"\"\"\n\nn_particle: 10000\nrandom_type: hammersley\n\nstart:\n type: cathode\n MTE:\n value: 414\n units: meV \n\ntotal_charge:\n value: 250\n units: pC\n \nr_dist:\n n_sigma_cutoff: 1.5\n sigma_xy:\n value: 0.4\n units: mm\n type: radial_gaussian\n\nt_dist:\n type: superposition\n dists: \n d1: \n type: gaussian\n avg_t:\n units: ps\n value: -1\n sigma_t:\n units: ps\n value: 1\n d2: \n type: gaussian\n avg_t:\n units: ps\n value: 1\n sigma_t:\n units: ps\n value: 1\n \n\n\"\"\"\n\nG = Generator(YAML)", "_____no_output_____" ], [ "# Tune the two dist separation\nG['t_dist:dists:d1:avg_t:value'] = -1\nG['t_dist:dists:d2:avg_t:value'] = 1\nG.run()\nGP = G.particles\nGP.plot('t')\nGP.plot('pz')", "_____no_output_____" ], [ "from impact import Impact\n\nimport matplotlib.pyplot as plt\nimport os", "_____no_output_____" ], [ "ifile = 'templates/lcls_injector/ImpactT.in'\nos.path.exists(ifile)", "_____no_output_____" ], [ "# Make Impact object\nI = Impact(ifile, initial_particles = G.particles, verbose=True)", "Configured to run in: /var/folders/wj/lfgr01993dx79p9cm_skykbw0000gn/T/tmpuftj333y\n" ], [ "# This will use the initial particles\nI.write_initial_particles(update_header=True)", "writing 10000 particles to /var/folders/wj/lfgr01993dx79p9cm_skykbw0000gn/T/tmpuftj333y/partcl.data\nCathode start with cathode_kinetic_energy_ref = 1.0 eV\nCathode start: Replaced Np with 10000 according to initial particles\nCathode start: Replaced Bkenergy with 1.0 according to initial particles\nCathode start: Replaced Temission with 9.145639807439135e-12 according to initial particles\nCathode start: Replaced Tini with -4.714953504413114e-12 according to initial particles\nSetting total charge to 2.4999999999999996e-10 C\n" ], [ "# Change some things\nI.header['Nx'] = 16\nI.header['Ny'] = 16\nI.header['Nz'] = 16\nI.header['Dt'] = 5e-13\n\n# Turn Space Charge off\nI.header['Bcurr'] = 0 \n\n# Other switches\nI.timeout = 1000\n# Switches for MPI\nI.use_mpi=True\nI.header['Nprow'] = 1\nI.header['Npcol'] = 4", "_____no_output_____" ], [ "# Change stop location\nI.stop = 1.5\n#I.ele['stop_1']['s'] = I.ele['OTR2']['s']+.001", "Removed element: stop_1\nSet stop to s = 1.5\n" ], [ "I.run()", "Running Impact-T in /var/folders/wj/lfgr01993dx79p9cm_skykbw0000gn/T/tmpuftj333y\nmpirun -n 4 /Users/chrisonian/Code/miniconda3/envs/devel/bin/ImpactTexe-mpi\nwriting 10000 particles to /var/folders/wj/lfgr01993dx79p9cm_skykbw0000gn/T/tmpuftj333y/partcl.data\nCathode start with cathode_kinetic_energy_ref = 1.0 eV\nCathode start: Replaced Np with 10000 according to initial particles\nCathode start: Replaced Bkenergy with 1.0 according to initial particles\nCathode start: Replaced Temission with 9.145639807439135e-12 according to initial particles\nCathode start: Replaced Tini with -4.714953504413114e-12 according to initial particles\nLoaded fort 30 : Fourth root of the fourth moments of the beam distribution\nLoaded fort 25 : RMS Y information\nLoaded fort 24 : RMS X information\nLoaded fort 26 : RMS Z information\nLoaded fort 28 : Load balance and loss diagnostics\nLoaded fort 29 : Cube root of third moments of the beam distribution\nLoaded fort 18 : Time and energy\nLoaded fort 27 : Max amplitude information\nLoaded fort 70 : Slice information of the final distribution\nLoaded fort 60 : Slice information of the initial distribution\nLoading particles\nLoaded fort 40 : initial particle distribution at t = 0\nLoaded fort 50 : final particle distribution projected to the centroid location of the bunch\nLoaded write beam particles YAG02 fort.102\nConverting z to t according to cathode_kinetic_energy_ref = 1.0 eV\nConverted initial_particles to ParticleGroup\nConverted final_particles to ParticleGroup\nConverted YAG02 to ParticleGroup\n" ], [ "I.input.keys()", "_____no_output_____" ], [ "I.output.keys()", "_____no_output_____" ], [ "I.output['stats'].keys()", "_____no_output_____" ], [ "I.output['slice_info'].keys()", "_____no_output_____" ] ], [ [ "# Particles", "_____no_output_____" ] ], [ [ "# Particles are automatically parsed in to openpmd-beamphysics ParticleGroup objects\nI.output['particles']", "_____no_output_____" ], [ "PI = I.output['particles']['initial_particles']\nPF = I.output['particles']['final_particles']", "_____no_output_____" ], [ "# Original particles\nGP.plot('t', 'pz')", "_____no_output_____" ], [ "# Readback of initial particles from Impact-T. \nPI.plot('t', 'pz')", "_____no_output_____" ], [ "# The initial time was shifted to account for this\nI.header['Tini']", "_____no_output_____" ], [ "# Get the final particles, calculate some statistic\nP = I.output['particles']['final_particles']\nP['mean_energy']", "_____no_output_____" ], [ "# Show the units\nP.units('mean_energy')", "_____no_output_____" ], [ "P.plot('z', 'pz')", "_____no_output_____" ] ], [ [ "# Stats", "_____no_output_____" ] ], [ [ "# Impact's own calculated statistics can be retieved\nlen(I.stat('norm_emit_x')), I.stat('norm_emit_x')[-1]", "_____no_output_____" ], [ "# Compare these. \nkey1 = 'mean_z'\nkey2 = 'sigma_x'\nunits1 = str(I.units(key1))\nunits2 = str(I.units(key2))\nplt.xlabel(key1+f' ({units1})')\nplt.ylabel(key2+f' ({units2})')\nplt.plot(I.stat(key1), I.stat(key2))\nplt.scatter(\n [I.particles[name][key1] for name in I.particles], \n [I.particles[name][key2] for name in I.particles], color='red')", "_____no_output_____" ] ], [ [ "# Archive, and restart from the middle", "_____no_output_____" ] ], [ [ "afile = I.archive()\nI2 = Impact(verbose=False)\nI2.load_archive(afile)\n\n# Patch in these particles\nI2.initial_particles = I2.particles['YAG02']\n\n# Turn off cathode start\nI2.header['Flagimg'] = 0\nI2.configure()\n", "Archiving to file impact_9993c786f906489cbbdd0a7025dd75a5.h5\n" ], [ "# Run again\nI2.use_mpi=True\nI2.run()", "_____no_output_____" ], [ "# Compare these. \nkey1 = 'mean_z'\nkey2 = 'sigma_x'\nunits1 = str(I.units(key1))\nunits2 = str(I.units(key2))\nplt.xlabel(key1+f' ({units1})')\nplt.ylabel(key2+f' ({units2})')\nplt.plot(I.stat(key1), I.stat(key2), color='black', label='original run')\nplt.plot(I2.stat(key1), I2.stat(key2), color='red', label='restart run')\nplt.scatter(\n [I.particles[name][key1] for name in I.particles], \n [I.particles[name][key2] for name in I.particles], color='black')\n\nplt.scatter(\n [I2.particles[name][key1] for name in I2.particles], \n [I2.particles[name][key2] for name in I2.particles], color='red', marker='x')\nplt.legend()", "_____no_output_____" ], [ "# Cleanup\nos.remove(afile)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af5349f630e021f879e4cd34d8f5207aaffa8e2
3,057
ipynb
Jupyter Notebook
ipyfilechooser_examples.ipynb
danjjl/ipyfilechooser
19d2e906207b2c3426675eda7889267f5956b182
[ "MIT" ]
null
null
null
ipyfilechooser_examples.ipynb
danjjl/ipyfilechooser
19d2e906207b2c3426675eda7889267f5956b182
[ "MIT" ]
null
null
null
ipyfilechooser_examples.ipynb
danjjl/ipyfilechooser
19d2e906207b2c3426675eda7889267f5956b182
[ "MIT" ]
null
null
null
20.938356
80
0.535165
[ [ [ "# ipyfilechooser examples", "_____no_output_____" ] ], [ [ "from ipyfilechooser import FileChooser\nimport os\n\n# Create new FileChooser:\n# Path: current directory\n# File: test.txt\n# Title: <b>FileChooser example</b>\n# Show hidden files: no\n# Use the default path and filename as selection: yes\nfdialog = FileChooser(\n os.getcwd(),\n filename='test.txt',\n title='<b>FileChooser example</b>',\n show_hidden=False,\n select_default=True\n)\n\ndisplay(fdialog)", "_____no_output_____" ], [ "# Get the selected value\nfdialog.selected", "_____no_output_____" ], [ "# Callback example\ndef change_title():\n fdialog.title = '<b>Callback function executed</b>'\n\n# Register callback function\nfdialog.register_callback(change_title)", "_____no_output_____" ], [ "# Set or change the title\nfdialog.title = '<b>Select the output file</b>'", "_____no_output_____" ], [ "# Show hidden files and change rows to 10\nfdialog.show_hidden = True\nfdialog.rows = 10", "_____no_output_____" ], [ "# Change the default path and filename\nfdialog.default_path = os.path.abspath(os.path.join(os.getcwd(), '..'))\nfdialog.default_filename = 'foobar.txt'", "_____no_output_____" ], [ "# Reset to defaults and clear the selected value\nfdialog.reset()", "_____no_output_____" ], [ "# String representation\nprint(fdialog)", "_____no_output_____" ], [ "# Print the version number\nimport ipyfilechooser\nipyfilechooser.__version__", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af543daa162db5cbe52fd8cffc5117e8d58cbb0
5,518
ipynb
Jupyter Notebook
scikit/Chapter 5/Model Selection for Density Models.ipynb
KarthikKothareddy/Data-Science-Practice
2c12128e29f5e3455882db6c83d1e1ffafa7126f
[ "Apache-2.0" ]
2
2017-01-06T23:51:52.000Z
2018-12-26T08:42:19.000Z
scikit/Chapter 5/Model Selection for Density Models.ipynb
KarthikKothareddy/Data-Science-Practice
2c12128e29f5e3455882db6c83d1e1ffafa7126f
[ "Apache-2.0" ]
null
null
null
scikit/Chapter 5/Model Selection for Density Models.ipynb
KarthikKothareddy/Data-Science-Practice
2c12128e29f5e3455882db6c83d1e1ffafa7126f
[ "Apache-2.0" ]
5
2015-12-20T02:47:03.000Z
2018-12-26T08:42:22.000Z
20.138686
107
0.531171
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Kernel Density Estimation", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_features=1, n_samples=30, random_state=1)", "_____no_output_____" ], [ "plt.hist(X);\nplt.scatter(X.ravel(), -np.ones(len(X)))\nplt.yticks(())", "_____no_output_____" ], [ "from sklearn.neighbors import KernelDensity\n\nkde = KernelDensity(bandwidth=1).fit(X)", "_____no_output_____" ], [ "line = np.linspace(X.min() - 2, X.max() + 2, 100)\nline_scores = np.exp(kde.score_samples(line[:, np.newaxis]))\nplt.plot(line, line_scores)\nplt.twinx().scatter(X.ravel(), np.ones(len(X)))", "_____no_output_____" ], [ "kde.score(X)", "_____no_output_____" ], [ "kde = KernelDensity(bandwidth=0.2).fit(X)", "_____no_output_____" ], [ "line = np.linspace(X.min() - 2, X.max() + 2, 1000)\nline_scores = np.exp(kde.score_samples(line[:, np.newaxis]))\nplt.plot(line, line_scores)\nplt.twinx().scatter(X.ravel(), np.ones(len(X)))\n ", "_____no_output_____" ], [ "kde.score(X)", "_____no_output_____" ], [ "from sklearn.grid_search import GridSearchCV\nparam_grid = {'bandwidth': np.logspace(-1, 1, 20)}\ngrid = GridSearchCV(KernelDensity(), param_grid, cv=10)\ngrid.fit(X)", "_____no_output_____" ], [ "grid.best_params_", "_____no_output_____" ], [ "line_scores = np.exp(grid.best_estimator_.score_samples(line[:, np.newaxis]))\nplt.plot(line, line_scores)\nplt.twinx().scatter(X.ravel(), -np.ones(len(X)))", "_____no_output_____" ] ], [ [ "## PCA as probabilistic model", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_low_rank_matrix\nfrom sklearn.decomposition import PCA\n\nX = make_low_rank_matrix(n_features=100, effective_rank=10, random_state=0)", "_____no_output_____" ], [ "pca = PCA(n_components=20).fit(X)\npca.score(X)", "_____no_output_____" ], [ "pca = PCA(n_components=50).fit(X)\npca.score(X)", "_____no_output_____" ], [ "from sklearn.learning_curve import validation_curve\nparam_range = range(2, 40, 2)\ntraining_scores, validation_scores = validation_curve(PCA(), X, None, param_name=\"n_components\",\n param_range=param_range, cv=10)", "_____no_output_____" ], [ "from figures import plot_validation_curve\nplot_validation_curve(param_range, training_scores, validation_scores)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4af5494626235f3b7f0b886a59627268f758a44e
8,649
ipynb
Jupyter Notebook
staging/graphs_trees/binary_tree/binary_tree_challenge.ipynb
filippovitale/interactive-coding-challenges
8380a7aa98618c3cc9c0271c30bd320937d431ad
[ "Apache-2.0" ]
null
null
null
staging/graphs_trees/binary_tree/binary_tree_challenge.ipynb
filippovitale/interactive-coding-challenges
8380a7aa98618c3cc9c0271c30bd320937d431ad
[ "Apache-2.0" ]
null
null
null
staging/graphs_trees/binary_tree/binary_tree_challenge.ipynb
filippovitale/interactive-coding-challenges
8380a7aa98618c3cc9c0271c30bd320937d431ad
[ "Apache-2.0" ]
1
2020-01-05T11:28:00.000Z
2020-01-05T11:28:00.000Z
29.518771
308
0.54145
[ [ [ "<small><i>This notebook was prepared by Marco Guajardo. Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).</i></small>", "_____no_output_____" ], [ "# Challenge Notebook", "_____no_output_____" ], [ "## Problem: Implement a binary search tree with insert, delete, different traversals & max/min node values\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)\n* [Solution Notebook](#Solution-Notebook)", "_____no_output_____" ], [ "## Constraints\n* Is this a binary tree?\n * Yes\n* Is the root set to None initially?\n * Yes\n* Do we care if the tree is balanced?\n * No\n* What do we return for the traversals?\n * Return a list of the data in the desired order\n* What type of data can the tree hold?\n * Assume the tree only takes ints. In a realistic example, we'd use a hash table to convert other types to ints.", "_____no_output_____" ], [ "## Test Cases\n\n### Insert \n\n* Always start with the root\n* If value is less than the root, go to the left child\n* if value is more than the root, go to the right child\n\n\n### Delete\n\n* Deleting a node from a binary tree is tricky. Make sure you arrange the tree correctly when deleting a node.\n* Here are some basic [instructions](http://www.algolist.net/Data_structures/Binary_search_tree/Removal)\n* If the value to delete isn't on the tree return False\n\n### Traverals \n\n* In order traversal -left, center, right\n* Pre order traversal - center, left, right\n* Post order traversal - left, right, center\n* Return list for all traverals \n\n### Max & Min\n* Find the max node in the binary search tree\n* Find the min node in the binary search tree\n\n### treeIsEmpty\n* check if the tree is empty\n\n\n## Algorithm\n\nRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/binary_tree_implementation/binary_tree_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.", "_____no_output_____" ], [ "## Code", "_____no_output_____" ] ], [ [ "class Node (object):\n def __init__ (self, data=None):\n #TODO:implement me\n pass\n \n def __str__ (self):\n #TODO:implement me\n pass", "_____no_output_____" ], [ "class BinaryTree (object):\n def __init__ (self):\n #TODO:implement me\n pass\n \n def insert (self, newData):\n #TODO:implement me\n pass\n \n def delete (self, key):\n #TODO:implement me\n pass\n \n def maxNode (self):\n #TODO:implement me\n pass\n \n def minNode (self):\n #TODO:implement me\n pass\n \n def printPostOrder (self):\n #TODO:implement me\n pass\n \n def printPreOrder (self):\n #TODO:implement me\n pass\n \n def printInOrder (self):\n #TODO:implement me\n pass\n \n def treeIsEmpty (self):\n #TODO: implement me\n pass\n", "_____no_output_____" ] ], [ [ "## Unit Test", "_____no_output_____" ] ], [ [ "from nose.tools import assert_equal\n\nclass TestBinaryTree(object):\n\n\tdef test_insert_traversals (self):\n\t\tmyTree = BinaryTree()\n\t\tmyTree2 = BinaryTree()\n\t\tfor num in [50, 30, 70, 10, 40, 60, 80, 7, 25, 38]:\n\t\t\tmyTree.insert(num)\n\t\t[myTree2.insert(num) for num in range (1, 100, 10)]\n\n\t\tprint(\"Test: insert checking with in order traversal\")\n\t\texpectVal = [7, 10, 25, 30, 38, 40, 50, 60, 70, 80]\n\t\tassert_equal(myTree.printInOrder(), expectVal)\n\t\texpectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]\n\t\tassert_equal(myTree2.printInOrder(), expectVal)\n\n\t\tprint(\"Test: insert checking with post order traversal\")\n\t\texpectVal = [7, 25, 10, 38, 40, 30, 60, 80, 70, 50]\n\t\tassert_equal(myTree.printPostOrder(), expectVal)\n\t\texpectVal = [91, 81, 71, 61, 51, 41, 31, 21, 11, 1]\n\t\tassert_equal(myTree2.printPostOrder(), expectVal)\n\n\n\t\tprint(\"Test: insert checking with pre order traversal\")\n\t\texpectVal = [50, 30, 10, 7, 25, 40, 38, 70, 60, 80]\n\t\tassert_equal(myTree.printPreOrder(), expectVal)\n\t\texpectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]\n\t\tassert_equal(myTree2.printPreOrder(), expectVal)\n\n\n\t\tprint(\"Success: test_insert_traversals\")\n\n\tdef test_max_min_nodes (self):\n\t\tmyTree = BinaryTree()\n\t\tmyTree.insert(5)\n\t\tmyTree.insert(1)\n\t\tmyTree.insert(21)\n\n\t\tprint(\"Test: max node\")\n\t\tassert_equal(myTree.maxNode(), 21)\n\t\tmyTree.insert(32)\n\t\tassert_equal(myTree.maxNode(), 32)\n\n\t\tprint(\"Test: min node\")\n\t\tassert_equal(myTree.minNode(), 1)\n\n\t\tprint(\"Test: min node inserting negative number\")\n\t\tmyTree.insert(-10)\n\t\tassert_equal(myTree.minNode(), -10)\n\n\t\tprint(\"Success: test_max_min_nodes\")\n\n\tdef test_delete (self):\n\t\tmyTree = BinaryTree()\n\t\tmyTree.insert(5)\n\n\t\tprint(\"Test: delete\")\n\t\tmyTree.delete(5)\n\t\tassert_equal(myTree.treeIsEmpty(), True)\n\t\t\n\t\tprint(\"Test: more complex deletions\")\n\t\t[myTree.insert(x) for x in range(1, 5)]\n\t\tmyTree.delete(2)\n\t\tassert_equal(myTree.root.rightChild.data, 3)\n \n\t\tprint(\"Test: delete invalid value\")\n\t\tassert_equal(myTree.delete(100), False)\n\n\n\t\tprint(\"Success: test_delete\")\n\ndef main():\n testing = TestBinaryTree()\n testing.test_insert_traversals()\n testing.test_max_min_nodes()\n testing.test_delete()\n \nif __name__=='__main__':\n main()", "_____no_output_____" ] ], [ [ "**The following unit test is expected to fail until you solve the challenge.**", "_____no_output_____" ], [ "## Solution NoteBook\n\nReview the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/binary_tree_implementation/binary_tree_solution.ipynb) for a discussion on algorithms and code solutions.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4af549c564bd6a864690f3e6fd6a2aedd0e0eb76
50,770
ipynb
Jupyter Notebook
.ipynb_checkpoints/machine_learning_tutorial-checkpoint.ipynb
lzt68/Online-Learning-Implementation
303692a901dcc58555bd2acf0aa6cf6ae5b392da
[ "MIT" ]
1
2021-11-26T08:46:01.000Z
2021-11-26T08:46:01.000Z
.ipynb_checkpoints/machine_learning_tutorial-checkpoint.ipynb
lzt68/Online-Learning-Implementation
303692a901dcc58555bd2acf0aa6cf6ae5b392da
[ "MIT" ]
null
null
null
.ipynb_checkpoints/machine_learning_tutorial-checkpoint.ipynb
lzt68/Online-Learning-Implementation
303692a901dcc58555bd2acf0aa6cf6ae5b392da
[ "MIT" ]
null
null
null
40.877617
5,148
0.529013
[ [ [ "#https://pytorch.org/tutorials/beginner/pytorch_with_examples.html", "_____no_output_____" ] ], [ [ "# MNIST Dataset\n### http://yann.lecun.com/exdb/mnist/\n### The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport h5py #pip install h5py -- https://www.h5py.org/\n\n#load train\nf = h5py.File('MNISTdata.hdf5', 'r')\ntrain_x, train_y = f['x_train'][:], f['y_train'][:,0]\nf.close()\n\nprint(\"train_x\", train_x.shape, train_x.dtype)\n#each image is stored in 784*1 numpy.ndarray, basically 28*28 image", "train_x (60000, 784) float32\n" ], [ "type(train_x)", "_____no_output_____" ], [ "plt.imshow(train_x[0].reshape(28, 28)), train_y[0]", "_____no_output_____" ], [ "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nimport os\nimport os.path\nimport argparse\nfrom torch.autograd import Variable", "_____no_output_____" ], [ "class FNN(nn.Module):#Fully connected Neural Network\n \"\"\"FNN.\"\"\"\n\n def __init__(self):\n \"\"\"FNN Builder.\"\"\"\n super(FNN, self).__init__()\n \n self.fc_layer = nn.Sequential(\n nn.Linear(784, 100),#100 is the number of hidden nodes in the hidden layer\n nn.ReLU(inplace=True),\n nn.Linear(100, 10)\n )\n #self.layer1 = nn.Linear(784, 100)\n #self.layer2 = nn.ReLU(inplace=True)\n #self.layer3 = nn.Linear(100, 10)\n\n\n\n def forward(self, x):\n \"\"\"Perform forward.\"\"\"\n x = self.fc_layer(x)\n return x\n \n #x = self.layer1(x)\n #x = self.layer2(x)\n #x = self.layer3(x)\n \n #y = self.fc_layer(x)\n #return y", "_____no_output_____" ], [ "# 784*100 + 100*10 - NN\n# 784", "_____no_output_____" ], [ "def calculate_accuracy(loader, is_gpu):\n \"\"\"Calculate accuracy.\n\n Args:\n loader (torch.utils.data.DataLoader): training / test set loader\n is_gpu (bool): whether to run on GPU\n Returns:\n tuple: (overall accuracy, class level accuracy)\n \"\"\"\n correct = 0\n total = 0\n\n for data in loader:\n inputs, labels = data\n if is_gpu:\n inputs = inputs.cuda()\n labels = labels.cuda()\n inputs, labels = Variable(inputs), Variable(labels)\n outputs = net(inputs)\n \n _, predicted = torch.max(outputs.data, 1)\n \n\n\n \n\n # forward + backward + optimize\n outputs = net(inputs)#forward\n \n \n total += labels.size(0)\n #correct += (predicted == labels).sum()\n correct += (predicted == labels[:,0].T).sum()\n\n return 100*correct.item()/float(total)", "_____no_output_____" ], [ "parser = argparse.ArgumentParser()\n\n# hyperparameters settings\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\nparser.add_argument('--wd', type=float, default=5e-4, help='weight decay')#lr/(c+wd)\nparser.add_argument('--epochs', type=int, default=50,\n help='number of epochs to train')\nparser.add_argument('--batch_size_train', type=int,\n default=16, help='training set input batch size')\nparser.add_argument('--batch_size_test', type=int,\n default=16, help='test set input batch size')\nparser.add_argument('--is_gpu', type=bool, default=False,\n help='whether training using GPU')\nimport sys\nsys.argv=['']\ndel sys\n \n# parse the arguments\nopt = parser.parse_args()", "_____no_output_____" ], [ "f = h5py.File('MNISTdata.hdf5','r')\n\nx_test_set=np.float32(f['x_test'][:])\ny_test_set=np.int32(np.array(f['y_test'][:,0])).reshape(-1,1)\nx_train_set=np.float32(f['x_train'][:])\ny_train_set=np.int32(np.array(f['y_train'][:,0])).reshape(-1,1)\n\nf.close()\n\n#num_samples = y_train_set.shape[0]\n#y_train_set = y_train_set.reshape(1, num_samples)\n#y_train_set = np.eye(10)[y_train_set.astype('int32')]\n#y_train_set = y_train_set.T.reshape(10, num_samples)\n\n#num_samples = y_test_set.shape[0]\n#y_test_set = y_test_set.reshape(1, num_samples)\n#y_test_set = np.eye(10)[y_test_set.astype('int32')]\n#y_test_set = y_test_set.T.reshape(10, num_samples)\n\n\ntrainset = torch.utils.data.TensorDataset(torch.Tensor(x_train_set), torch.Tensor(y_train_set)) # create your datset\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=opt.batch_size_train, shuffle=True)\n#mini-batch gradient, stochastic gradient descent - 1 sample\n\ntestset = torch.utils.data.TensorDataset(torch.Tensor(x_test_set), torch.Tensor(y_test_set)) # create your datset\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=opt.batch_size_test, shuffle=False)", "_____no_output_____" ], [ "type(trainset), type(trainloader)", "_____no_output_____" ], [ "# create the FNN instance\nnet = FNN()\n# For training on GPU, transfer net and data into the GPU\nif opt.is_gpu:\n net = net.cuda()\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n cudnn.benchmark = True\nelse:\n print('Training on CPU')", "Training on CPU\n" ], [ "# Loss function and optimizer\ncriterion = nn.CrossEntropyLoss()#N dim -> prob (softmax) -> CrossEntropyLoss()\noptimizer = optim.Adam(net.parameters(), lr=opt.lr, weight_decay=opt.wd)#a variant of SGD", "_____no_output_____" ], [ "for epoch in range(opt.epochs):\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n \n #if training on GPU, wrap the data into the cuda\n if opt.is_gpu:\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n # wrap them in Variable\n inputs, labels = Variable(inputs), Variable(labels)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)#forward\n loss = criterion(outputs, labels[:, 0].long())\n loss.backward()#compute gradients\n optimizer.step()#descent\n\n # calculate loss\n running_loss += loss.data.item()\n\n # Normalizing the loss by the total number of train batches\n running_loss /= len(trainloader)\n\n # Calculate training/test set accuracy of the existing model\n train_accuracy = calculate_accuracy(trainloader, opt.is_gpu)\n test_accuracy = calculate_accuracy(testloader, opt.is_gpu)\n\n print(\"Iteration: {0} | Loss: {1} | Training accuracy: {2}% | Test accuracy: {3}%\".format(\n epoch+1, running_loss, train_accuracy, test_accuracy))\n \n", "_____no_output_____" ], [ "loss, loss.requires_grad", "_____no_output_____" ], [ "outputs", "_____no_output_____" ], [ "labels[:, 0].long()", "_____no_output_____" ] ], [ [ "# Without Pytorch", "_____no_output_____" ] ], [ [ "import h5py\nimport numpy as np\nimport argparse\n\ndef sigmoid(x):\n \"\"\"\n define scale function\n \"\"\"\n return np.exp(x)/(1.0+np.exp(x))\n\ndef RELU(x):\n return np.np.maximum(x,0)\n\ndef reluDerivative(x):\n return np.array([reluDerivativeSingleElement(xi) for xi in x])\n\ndef reluDerivativeSingleElement(xi):\n if xi > 0:\n return 1\n elif xi <= 0:\n return 0\n \ndef compute_loss(Y,V):\n L_sum = np.sum(np.multiply(Y, np.log(V)))\n m = Y.shape[1]\n L = -(1./m) * L_sum\n return L\n \n\ndef feed_forward(X, params):\n tempt={}\n tempt[\"Z\"]=np.matmul(params[\"W\"], X) + params[\"b1\"]\n tempt[\"H\"]=sigmoid(tempt[\"Z\"])\n #tempt[\"H\"]=RELU(tempt[\"Z\"])\n tempt[\"U\"]=np.matmul(params[\"C\"], tempt[\"H\"]) + params[\"b2\"]\n tempt[\"V\"]=np.exp(tempt[\"U\"]) / np.sum(np.exp(tempt[\"U\"]), axis=0)\n return tempt\n\ndef back_propagate(X, Y, params, tempt, m_batch):\n # X is m*n matrix\n # Y is m*1 matrix\n # tempt is the value in each neural cell\n dU=tempt[\"V\"]-Y # the loss of output layer\n dC=(1. / m_batch) * np.matmul(dU, tempt[\"H\"].T)\n db2=(1. / m_batch) * np.sum(dU, axis=1, keepdims=True)\n dH=np.matmul(params[\"C\"].T, dU)\n dZ = dH * sigmoid(tempt[\"Z\"]) * (1 - sigmoid(tempt[\"Z\"]))\n #dZ=dH*reluDerivative(tempt[\"Z\"])\n dW = (1. / m_batch) * np.matmul(dZ, X.T)\n db1 = (1. / m_batch) * np.sum(dZ, axis=1, keepdims=True)\n grads={\"dW\":dW, \"db1\":db1, \"dC\":dC, \"db2\":db2}\n return grads\n\n#hyperparameters\nepochs=10\nbatch_size=1\nbatchs=np.int32(60000/batch_size)\nLR=0.01\ndh=100#number of hidden nodes\n\n#getting 60000 samples of training data and 10000 samples of testing data\nf=h5py.File('MNISTdata.hdf5','r')\nx_test_set=np.float32(f['x_test'][:])\ny_test_set=np.int32(np.array(f['y_test'][:,0])).reshape(-1,1)\nx_train_set=np.float32(f['x_train'][:])\ny_train_set=np.int32(np.array(f['y_train'][:,0])).reshape(-1,1)\nf.close()\nX=np.vstack((x_train_set,x_test_set))\nY=np.vstack((y_train_set,y_test_set))\nnum_samples=Y.shape[0]\nY=Y.reshape(1,num_samples)\nY_new = np.eye(10)[Y.astype('int32')]\nY_new = Y_new.T.reshape(10, num_samples)\nX_train, X_test=X[:60000].T, X[60000:].T\nY_train, Y_test=Y_new[:,:60000], Y_new[:,60000:]\n\n#building fully connected neural network with one hidden layer\n#initialization of parameters\nparams={\"b1\":np.zeros((dh,1)),\n \"W\":np.random.randn(dh,784)*np.sqrt(1. / 784),\n \"b2\":np.zeros((10,1)),\n \"C\":np.random.randn(10,dh)*np.sqrt(1. / dh)}\n\n\n#training the network\nfor num_epoches in range(epochs):\n if (num_epoches > 5):\n LR = 0.001\n if (num_epoches > 10):\n LR = 0.0001\n if (num_epoches > 15):\n LR = 0.00001\n #shuffle the training data\n shuffle_index=np.random.permutation(X_train.shape[1])\n X_train= X_train[:, shuffle_index]\n Y_train=Y_train[:, shuffle_index]\n \n for num_batch in range(batchs):\n left_index=num_batch*batch_size\n right_index=min(left_index+batch_size,x_train_set.shape[0]-1)\n m_batch=right_index-left_index\n X=X_train[:,left_index:right_index]\n Y=Y_train[:,left_index:right_index]\n\n tempt=feed_forward(X, params)\n grads = back_propagate(X, Y, params, tempt, 1)\n\n #gradient descent\n params[\"W\"] = params[\"W\"] - LR * grads[\"dW\"]\n params[\"b1\"] = params[\"b1\"] - LR * grads[\"db1\"]\n params[\"C\"] = params[\"C\"] - LR * grads[\"dC\"]\n params[\"b2\"] = params[\"b2\"] - LR * grads[\"db2\"]\n \n #compute loss on training data\n tempt = feed_forward(X_train, params)\n train_loss = compute_loss(Y_train, tempt[\"V\"])\n #compute loss on test set\n tempt=feed_forward(X_test, params)\n test_loss = compute_loss(Y_test, tempt[\"V\"])\n total_correct=0\n for n in range(Y_test.shape[1]):\n p = tempt[\"V\"][:,n]\n prediction = np.argmax(p)\n if prediction == np.argmax(Y_test[:,n]):\n total_correct+=1\n accuracy = np.float32(total_correct) / (Y_test.shape[1])\n #print(params)\n print(\"Epoch {}: training loss = {}, test loss = {}, accuracy={}\".format(\n num_epoches + 1, train_loss, test_loss, accuracy))", "Epoch 1: training loss = 0.23334259795546275, test loss = 0.23107303718807204, accuracy=0.9319\nEpoch 2: training loss = 0.17363187679587486, test loss = 0.17460703908888603, accuracy=0.9449\nEpoch 3: training loss = 0.12975675211847817, test loss = 0.13803599922775564, accuracy=0.9597\nEpoch 4: training loss = 0.10486907887581996, test loss = 0.11517843963481662, accuracy=0.9643\n" ] ], [ [ "# ML Model with JD Data", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nfrom scipy import stats", "_____no_output_____" ], [ "#read/write data from/to local files\nprefix_path = 'JD_data/'\n# 'skus' table\nskus = pd.read_csv(prefix_path + 'JD_sku_data.csv')\n# 'users' table\nusers = pd.read_csv(prefix_path + 'JD_user_data.csv')\n# 'clicks' table\nclicks = pd.read_csv(prefix_path + 'JD_click_data.csv')\n# 'orders' table\norders = pd.read_csv(prefix_path + 'JD_order_data.csv')\n# 'delivery' table\ndelivery = pd.read_csv(prefix_path + 'JD_delivery_data.csv')\n# 'inventory' table\ninventory = pd.read_csv(prefix_path + 'JD_inventory_data.csv')\n# 'network' table\nnetwork = pd.read_csv(prefix_path + 'JD_network_data.csv')", "_____no_output_____" ], [ "orders['order_date'] = pd.to_datetime(orders['order_date']) \norders['weekday'] = orders['order_date'].dt.dayofweek\ndf_temp = orders[['weekday','final_unit_price']]\n#Add dummy variables\ndf_temp1 = pd.get_dummies(df_temp['weekday'], prefix='weekday')\ncols_to_keep = ['final_unit_price']\ndf_temp = df_temp[cols_to_keep].join(df_temp1.iloc[:,0:])#not df_temp1.ix[:,0:], consider the gender case\ndf_temp['intercept'] = 1\ntrain_cols_ = df_temp.columns[1:]#can write ['x1', 'x2'] manually\ntrain_df = df_temp[train_cols_]", "_____no_output_____" ], [ "opt2 = parser.parse_args()", "_____no_output_____" ], [ "trainset_JD = torch.utils.data.TensorDataset(torch.Tensor(train_df.values), torch.Tensor(df_temp['final_unit_price'].values)) # create your datset\ntrainloader_JD = torch.utils.data.DataLoader(\n trainset_JD, batch_size=opt2.batch_size_train, shuffle=True)", "_____no_output_____" ], [ "class FNN_JD(nn.Module):\n \"\"\"FNN.\"\"\"\n\n def __init__(self):\n \"\"\"FNN Builder.\"\"\"\n super(FNN_JD, self).__init__()\n \n self.fc_layer = nn.Sequential(\n nn.Linear(8, 4),\n nn.ReLU(inplace=True),\n nn.Linear(4, 1)\n )\n #self.fc_layer = nn.Sequential(\n # nn.Linear(8, 4),\n # nn.ReLU(inplace=True),\n # nn.Linear(4, 2),\n # nn.ReLU(inplace=True),\n # nn.Linear(2, 1)\n #)\n\n\n def forward(self, x):\n \"\"\"Perform forward.\"\"\"\n x = self.fc_layer(x)\n return x", "_____no_output_____" ], [ "# create the FNN instance\nnet_JD = FNN_JD()\n# For training on GPU, transfer net and data into the GPU\nif opt2.is_gpu:\n net_JD = net.cuda()\n net_JD = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n cudnn.benchmark = True\nelse:\n print('Training on CPU')", "Training on CPU\n" ], [ "# Loss function and optimizer\ncriterion_JD = nn.MSELoss()\noptimizer_JD = optim.Adam(net_JD.parameters(), lr=opt2.lr, weight_decay=opt2.wd)", "_____no_output_____" ], [ "train_df", "_____no_output_____" ], [ "for epoch in range(opt2.epochs):\n\n running_loss = 0.0\n for i, data in enumerate(trainloader_JD, 0):\n # get the inputs\n inputs, prices = data\n \n #if training on GPU, wrap the data into the cuda\n if opt2.is_gpu:\n inputs = inputs.cuda()\n prices = prices.cuda()\n\n # wrap them in Variable\n inputs, prices = Variable(inputs), Variable(prices)\n\n # zero the parameter gradients\n optimizer_JD.zero_grad()\n\n # forward + backward + optimize\n outputs = net_JD(inputs)\n loss = criterion_JD(outputs[:,0], prices)\n loss.backward()\n\n\n optimizer_JD.step()\n\n # calculate loss\n running_loss += loss.data.item()\n\n # Normalizing the loss by the total number of train batches\n #running_loss /= len(trainloader)\n\n # Calculate training/test set accuracy of the existing model\n #train_accuracy = calculate_accuracy(trainloader, opt.is_gpu)\n\n print(\"Iteration: {0} | Loss: {1}\".format(\n epoch+1, running_loss))\n \n\n", "_____no_output_____" ], [ "#sum of squared error\nopt2.batch_size_train * 197859128", "_____no_output_____" ] ], [ [ "## Ways to improve accuracy:\n### 1. hyperparameter tuning: different algorithm and learning rate - SGD, different loss function, batch size\n### 2. different network structures, different activiation layer\n### 3. more features/inputs", "_____no_output_____" ], [ "# Compare with Linear Regression", "_____no_output_____" ] ], [ [ "import statsmodels.api as sm\ndf_temp = orders[['weekday','final_unit_price']]\n#Add dummy variables\ndf_temp1 = pd.get_dummies(df_temp['weekday'], prefix='weekday')\ncols_to_keep = ['final_unit_price']\ndf_temp = df_temp[cols_to_keep].join(df_temp1.iloc[:,1:])#not df_temp1.ix[:,0:], consider the gender case\ndf_temp['intercept'] = 1\ntrain_cols_ = df_temp.columns[1:]#can write ['x1', 'x2'] manually\ntrain_df = df_temp[train_cols_]\nlinear_model = sm.OLS(df_temp['final_unit_price'], train_df)\nres = linear_model.fit()\nprint(res.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: final_unit_price R-squared: 0.000\nModel: OLS Adj. R-squared: 0.000\nMethod: Least Squares F-statistic: 37.65\nDate: Sat, 22 Aug 2020 Prob (F-statistic): 5.94e-46\nTime: 07:32:01 Log-Likelihood: -3.1613e+06\nNo. Observations: 549989 AIC: 6.323e+06\nDf Residuals: 549982 BIC: 6.323e+06\nDf Model: 6 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nweekday_1 1.7547 0.416 4.218 0.000 0.939 2.570\nweekday_2 0.7403 0.405 1.829 0.067 -0.053 1.534\nweekday_3 -1.2450 0.372 -3.344 0.001 -1.975 -0.515\nweekday_4 -1.2111 0.395 -3.063 0.002 -1.986 -0.436\nweekday_5 3.0500 0.403 7.563 0.000 2.260 3.840\nweekday_6 1.5055 0.429 3.511 0.000 0.665 2.346\nintercept 71.1017 0.298 238.376 0.000 70.517 71.686\n==============================================================================\nOmnibus: 1500889.055 Durbin-Watson: 1.923\nProb(Omnibus): 0.000 Jarque-Bera (JB): 406614359100.909\nSkew: 33.204 Prob(JB): 0.00\nKurtosis: 4214.783 Cond. No. 8.65\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "res.params", "_____no_output_____" ], [ "coef = res.params.values\nx = train_df.values\ny = df_temp['final_unit_price']\nloss = 0\nfor i in range(len(y)):\n predict = np.dot(coef, x[i])\n loss += (predict - y[i])**2", "_____no_output_____" ], [ "loss", "_____no_output_____" ], [ "# 8*4 + 4*1\n# 7", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4af55f323920a2cbd041c804b1e0197903cf476f
824
ipynb
Jupyter Notebook
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DarkDisasters/winglets_python
4b8a13ef8aefae6ffc0e59a87aa5131036bd2dff
[ "MIT" ]
2
2021-12-28T07:46:35.000Z
2022-01-13T19:44:44.000Z
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DarkDisasters/winglets_python
4b8a13ef8aefae6ffc0e59a87aa5131036bd2dff
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DarkDisasters/winglets_python
4b8a13ef8aefae6ffc0e59a87aa5131036bd2dff
[ "MIT" ]
1
2021-05-27T10:58:55.000Z
2021-05-27T10:58:55.000Z
17.166667
56
0.470874
[ [ [ "dict = {'1': 'd', '100':'c', '2':'a', '11':'e'}\ndict.keys()\nmax(dict.keys())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
4af56e798350b0dcb62d4a3bf4b82b80ca3bb417
18,546
ipynb
Jupyter Notebook
GradientBoostingJulia.ipynb
vikashranjan/gradient-boosting
f129ac64e2d12219a1c56650739f6087606146ac
[ "MIT" ]
null
null
null
GradientBoostingJulia.ipynb
vikashranjan/gradient-boosting
f129ac64e2d12219a1c56650739f6087606146ac
[ "MIT" ]
null
null
null
GradientBoostingJulia.ipynb
vikashranjan/gradient-boosting
f129ac64e2d12219a1c56650739f6087606146ac
[ "MIT" ]
null
null
null
59.442308
4,932
0.466516
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af584550114449cdaa1f47b406d05c5a45ed0e6
387,595
ipynb
Jupyter Notebook
slides/2022-04-13-quadrature.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
null
null
null
slides/2022-04-13-quadrature.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
null
null
null
slides/2022-04-13-quadrature.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
2
2022-02-09T21:05:12.000Z
2022-03-11T20:34:46.000Z
212.730516
15,563
0.679761
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af58bad2196eb289429f031c036b13cfa4a81da
19,085
ipynb
Jupyter Notebook
notes/J4-List_Comprehensions.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
28
2019-03-01T23:42:41.000Z
2022-03-29T01:01:00.000Z
notes/J4-List_Comprehensions.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
1
2019-04-18T18:29:42.000Z
2019-04-18T18:29:42.000Z
notes/J4-List_Comprehensions.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
43
2018-12-12T20:11:01.000Z
2022-03-29T01:45:22.000Z
30.197785
420
0.587582
[ [ [ "# List, Set, and Dictionary Comprehensions", "_____no_output_____" ], [ "In our prior session we discussed a variety of loop patterns. \n\nOne of the most common patterns that we encounter in practice is the need to iterate through a list of values, transform the elements of the list using some operations, filter out the results, and return back a new list of values.\n\n", "_____no_output_____" ], [ "## Example", "_____no_output_____" ], [ "Let's examine again our example with the NBA teams and franchise names:", "_____no_output_____" ] ], [ [ "nba_teams = [\n \"Atlanta Hawks\", \"Boston Celtics\", \"Brooklyn Nets\", \"Charlotte Hornets\",\n \"Chicago Bulls\", \"Cleveland Cavaliers\", \"Dallas Mavericks\",\n \"Denver Nuggets\", \"Detroit Pistons\", \"Golden State Warriors\",\n \"Houston Rockets\", \"Indiana Pacers\", \"LA Clippers\", \"Los Angeles Lakers\",\n \"Memphis Grizzlies\", \"Miami Heat\", \"Milwaukee Bucks\",\n \"Minnesota Timberwolves\", \"New Orleans Pelicans\", \"New York Knicks\",\n \"Oklahoma City Thunder\", \"Orlando Magic\", \"Philadelphia 76ers\",\n \"Phoenix Suns\", \"Portland Trail Blazers\", \"Sacramento Kings\",\n \"San Antonio Spurs\", \"Toronto Raptors\", \"Utah Jazz\", \"Washington Wizards\"\n]\nprint(\"The list contains\", len(nba_teams), \"teams\")", "_____no_output_____" ], [ "franchise_names = [] # We create an empty list\nfor team in nba_teams: # We iterate over all elements of the list\n # Do some operation on the list element \"team\"\n # and get back the result \"franchise\"\n franchise = team.split()[-1] \n # Append the \"franchise\" element in the list that we created before the loop\n franchise_names.append(franchise)", "_____no_output_____" ] ], [ [ "And below we re-write the code above as a **list comprehension**. ", "_____no_output_____" ] ], [ [ "franchise_names = [ team.split()[-1] for team in nba_teams ] ", "_____no_output_____" ] ], [ [ "In other words, list comprehensions give us the ability to write a very common loop pattern as a one-liner. However, it is not just about brevity; when we see code that uses a list comprehension we understand quickly that the code is processing one list to create another, and the various elements are together in a very specific order. Such a clarity is not guaranteed with a loop, as loops may have many uses.\n\n", "_____no_output_____" ], [ "## Defining List Comprehensions", "_____no_output_____" ], [ "The syntax of list comprehensions is based on the way mathematicians define sets and lists, a syntax that leaves it clear what the contents should be. \n\nFor example `S` is a set of the square of all integer numbers from 0 to 9. In math notation, we write:\n\n+ `S = {x² : x in {0 ... 9}}`\n\nPython's list comprehensions give a very natural way to write statements just like these. It may look strange early on, but it becomes a very natural and concise way of creating lists, without having to write for-loops.\n\nLet's see again the comparison with for loops:", "_____no_output_____" ] ], [ [ "# This code below will create a list with the squares\n# of the numbers from 0 to 9 \nS = [] # we create an empty list\nfor i in range(10): # We iterate over all numbers from 0 to 9\n S.append(i*i) # We add in the list the square of the number i\nprint(S )# we print(the list)", "_____no_output_____" ], [ "S = [i*i for i in range(10)]\nprint(S)", "_____no_output_____" ] ], [ [ "Let's do one more example. The `V` is the powers of 2 from $2^0$ until $2^{12}$:\n\n+ `V = (1, 2, 4, 8, ..., 2¹²)`\n", "_____no_output_____" ] ], [ [ "V=[] # Create a list\nfor i in range(13): # Change i to be from 0 to 12\n V.append(2**i) # Add 2**i in the new list\nprint(V)", "_____no_output_____" ], [ "# And rewritten as a list comprehension:\nV = [2**i for i in range(13)]\nprint(V)", "_____no_output_____" ] ], [ [ "Again notice the structure:\n```python\nnewlist = []\nfor i in somelist:\n x = do_something_with(i)\n newlist.append(x)\n```\ngets rewritten as\n```python\nnewlist = [do_something_with(i) for i in somelist]\n```", "_____no_output_____" ], [ "## The *if* statement within a list comprehension\n\n", "_____no_output_____" ], [ "Now let's consider the following case. We want to process the list of NBA teams, and keep in a list the teams that have a franchise name that contains a given substring. \n\nIn the example below, we will try to find all the teams that start with the letter `B`.\n", "_____no_output_____" ] ], [ [ "nba_teams = [\n \"Atlanta Hawks\", \"Boston Celtics\", \"Brooklyn Nets\", \"Charlotte Hornets\",\n \"Chicago Bulls\", \"Cleveland Cavaliers\", \"Dallas Mavericks\",\n \"Denver Nuggets\", \"Detroit Pistons\", \"Golden State Warriors\",\n \"Houston Rockets\", \"Indiana Pacers\", \"LA Clippers\", \"Los Angeles Lakers\",\n \"Memphis Grizzlies\", \"Miami Heat\", \"Milwaukee Bucks\",\n \"Minnesota Timberwolves\", \"New Orleans Pelicans\", \"New York Knicks\",\n \"Oklahoma City Thunder\", \"Orlando Magic\", \"Philadelphia 76ers\",\n \"Phoenix Suns\", \"Portland Trail Blazers\", \"Sacramento Kings\",\n \"San Antonio Spurs\", \"Toronto Raptors\", \"Utah Jazz\", \"Washington Wizards\"\n]", "_____no_output_____" ], [ "franchise_names = []\nlook_for = 'B' #looking\nfor team in nba_teams:\n franchise = team.split()[-1]\n if franchise.startswith(look_for):\n franchise_names.append(franchise)\nprint(franchise_names)", "_____no_output_____" ] ], [ [ "This pattern, where we do not add *all* the elements in the resulting list is also very common. List comprehensions allow such patterns to be also expressed as list comprehensions", "_____no_output_____" ] ], [ [ "look_for = 'B'\nfranchise_names = [team.split()[-1] for team in nba_teams if team.split()[-1].startswith(look_for)]", "_____no_output_____" ], [ "print(franchise_names)", "_____no_output_____" ], [ "# Alternatively, you can even break the lines within a comprehension\n# This may help with readability\nfranchise_names = [team.split()[-1] \n for team in nba_teams \n if team.split()[-1].startswith(look_for)]", "_____no_output_____" ], [ "print(franchise_names)", "_____no_output_____" ] ], [ [ "Here is another example, with a list comprehension. We have `S` is a set of the square of all integer numbers from 0 to 9, and we define `M` to be all the elements in `S` that are even. In math notation:\n\n+ `S = {x² : x in {0 ... 9}}`\n+ `M = {x | x in S and x even}`\n\nNow let's write the above as list comprehensions. **Note the list comprehension for deriving M uses a \"if statement\" to filter out those values that aren't of interest**, restricting to only the even squares.", "_____no_output_____" ] ], [ [ "S = [i*i for i in range(10)]\nprint(S)", "_____no_output_____" ], [ "M = []\nfor i in S: # iterate through all elements in S\n if i%2 == 0: # if i is an event number\n M.append(i) # ..add it to the list\nprint(M)", "_____no_output_____" ], [ "M = [x for x in S if x%2 == 0]\nprint(M)", "_____no_output_____" ] ], [ [ "These are simple examples, using numerical compuation. Let's see a more \"practical\" use: In the following operation we transform a string into an list of values, a more complex operation: ", "_____no_output_____" ] ], [ [ "sentence = 'The quick brown fox jumps over the lazy dog'\nwords = [(w.upper(), w.lower(), len(w)) for w in sentence.split()]\nwords", "_____no_output_____" ] ], [ [ "So, what the code does here? It takes as input the string `sentence`, creates a list of words, and for each word it creates a tuple, with the word in uppercase, lowercase, together with the length of the word.", "_____no_output_____" ], [ "## Set and Dictionary Comprehensions", "_____no_output_____" ], [ "In addition to _list_ comprehensions, we also have the same principle for sets and dictionaries. We can create sets and dictionaries in the same way, but now we do not use square brackets to surround the comprehension, but use braces instead. ", "_____no_output_____" ] ], [ [ "# Creating a set instead of a list.\nS = {i*i for i in range(10)}\nS", "_____no_output_____" ], [ "# Dictionary comprehension, where team name becomes the key, and franchise name the value\nteams_franchise = {team:team.split()[-1] for team in nba_teams}\nteams_franchise", "_____no_output_____" ], [ "# Dictionary comprehension, where team name becomes the key, and franchise name the value\nwords = {w:len(w) for w in sentence.split()}\nwords", "_____no_output_____" ] ], [ [ "## Exercise", "_____no_output_____" ], [ "You are given the sentence 'The quick brown fox jumps over the lazy dog', ", "_____no_output_____" ] ], [ [ "sentence = 'The quick brown fox jumps over the lazy dog'", "_____no_output_____" ] ], [ [ "\n**Question 1**: List each word and its length from the string 'The quick brown fox jumps over the lazy dog', conditioned on the length of the word being four characters and above\n\n**Question 2**: List only words with the letter o in them\n", "_____no_output_____" ] ], [ [ "# List each word and its length from the string \n# 'The quick brown fox jumps over the lazy dog', \n# conditioned on the length of the word being four characters and above\n", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "[ (word, len(word)) for word in sentence.split() if len(word)>=4]", "_____no_output_____" ], [ "# List only words with the letter o in them\n", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "[ word for word in sentence.split() if 'o' in word]", "_____no_output_____" ] ], [ [ "## Exercise", "_____no_output_____" ], [ "We will work now on a more challenging exercise. This will not only require the use of comprehensions, but will also ask you to put together things that we learned earlier in the course, especially when we studied strings.\n\n**Question 1**: You are given the `wsj` article below. Write a list comprehension for getting the words that appear more than once. \n * Use the `.split()` command for splitting, without passing a parameter.\n * When counting words, case does not matter (i.e., YAHOO is the same as Yahoo).\n\n**Question 2**: Find all the *characters* in the article that are not letters or numbers. You can use the isdigit() and isalpha() functions, which work on strings. (e.g, `\"Panos\".isalpha()` and `\"1234\".isdigit()` return True) ", "_____no_output_____" ] ], [ [ "wsj = \"\"\"\nYahoo Inc. disclosed a massive security breach by a “state-sponsored actor” affecting at least 500 million users, potentially the largest such data breach on record and the latest hurdle for the beaten-down internet company as it works through the sale of its core business.\nYahoo said certain user account information—including names, email addresses, telephone numbers, dates of birth, hashed passwords and, in some cases, encrypted or unencrypted security questions and answers—was stolen from the company’s network in late 2014 by what it believes is a state-sponsored actor.\nYahoo said it is notifying potentially affected users and has taken steps to secure their accounts by invalidating unencrypted security questions and answers so they can’t be used to access an account and asking potentially affected users to change their passwords.\nYahoo recommended users who haven’t changed their passwords since 2014 do so. It also encouraged users change their passwords as well as security questions and answers for any other accounts on which they use the same or similar information used for their Yahoo account.\nThe company, which is working with law enforcement, said the continuing investigation indicates that stolen information didn't include unprotected passwords, payment-card data or bank account information.\nWith 500 million user accounts affected, this is the largest-ever publicly disclosed data breach, according to Paul Stephens, director of policy and advocacy with Privacy Rights Clearing House, a not-for-profit group that compiles information on data breaches.\nNo evidence has been found to suggest the state-sponsored actor is currently in Yahoo’s network, and Yahoo didn’t name the country it suspected was involved. In August, a hacker called “Peace” appeared in online forums, offering to sell 200 million of the company’s usernames and passwords for about $1,900 in total. Peace had previously sold data taken from breaches at Myspace and LinkedIn Corp.\n\"\"\"", "_____no_output_____" ], [ "# getting the words that appear more than once\n", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "words = wsj.lower().split()\nrecurring = [w for w in words if words.count(w)>1]\nprint(recurring)", "_____no_output_____" ], [ "print(sorted(set(recurring)))", "_____no_output_____" ], [ "# Find all the *characters* in the article that are not letters or numbers", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "# Let's use a set comprehension here, to eliminate duplicates\nnonalphanumeric = {c for c in wsj if not c.isdigit() and not c.isalpha()}\nprint(nonalphanumeric)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4af5a42bfd13355511d4c483cd9b45cbd0ead466
7,115
ipynb
Jupyter Notebook
Jupyter notebooks/Circuitos Eletricos I - Semana 10.ipynb
Willh-AM/ElectricCircuits
32dc2cd79498f2819967b747a792b7db2822f8bc
[ "MIT" ]
9
2021-05-19T18:36:53.000Z
2022-01-18T16:30:17.000Z
Jupyter notebooks/Circuitos Eletricos I - Semana 10.ipynb
Willh-AM/ElectricCircuits
32dc2cd79498f2819967b747a792b7db2822f8bc
[ "MIT" ]
null
null
null
Jupyter notebooks/Circuitos Eletricos I - Semana 10.ipynb
Willh-AM/ElectricCircuits
32dc2cd79498f2819967b747a792b7db2822f8bc
[ "MIT" ]
10
2021-06-25T12:52:40.000Z
2022-03-11T14:25:48.000Z
22.587302
113
0.475193
[ [ [ "# *Circuitos Elétricos I - Semana 10*", "_____no_output_____" ], [ "### Problema 1\n \n(Problema 7.19 - Nilsson) Para o circuito abaixo, pede-se:\n\n<img src=\"./figures/J13C1.png\" width=\"400\">\n\na) Determine a tensão $v_0(t)$ sobre o indutor de $48\\;mH$ para $t\\geq0$.\\\nb) Determine a corrente $i_0(t)$ sobre o indutor de $48\\;mH$ para $t\\geq0$.\\\nc) Determine a energia consumida pelo resistor de $2.5\\;k\\Omega$ no intervalo $0\\leq t \\leq\\infty$.\n\nLink para a simulação do circuito: https://tinyurl.com/yj69udn8", "_____no_output_____" ] ], [ [ "# valores das indutâncias\nL1 = 20e-3\nL2 = 80e-3\nL3 = 48e-3\n\n# valores iniciais das correntes\ni1_0 = 5e-3\ni2_0 = 5e-3\ni3_0 = 0", "_____no_output_____" ], [ "# indutância equivalente\nLeq1 = (L2*L3)/(L2+L3)\nLeq = L1 + Leq1\n\nprint('Leq = ', Leq/1e-3, ' mH')", "Leq = 50.0 mH\n" ], [ "R = 2.5e3\n\n# constante de tempo\nτ = Leq/R\n\nprint('τ = ', τ, ' s')", "τ = 2e-05 s\n" ], [ "import sympy as sp\n\niL_inf = 0\niL_0 = i1_0\n\n# define as variável tempo \nt = sp.symbols('t')\n\n# define i(t)\niL = iL_inf + (iL_0 - iL_inf)*sp.exp(-t/τ)\n\nprint('Corrente no indutor equivalente:')\nprint('iL(t) = ', iL/1e-3 , ' mA')", "Corrente no indutor equivalente:\niL(t) = 5.0*exp(-50000.0*t) mA\n" ], [ "# calcula v0\nv0 = Leq1*sp.diff(iL,t)\n\nprint('v0(t) = ', v0 , ' V')", "v0(t) = -7.5*exp(-50000.0*t) V\n" ], [ "# correntes nos indutores em função da tensão aplicada aos terminais\ni1 = iL\ni2 = (1/L2)*sp.integrate(v0, (t, 0, t)) + i2_0\ni3 = (1/L3)*sp.integrate(v0, (t, 0, t)) + i3_0\n\nprint('Correntes nos indutores:')\nprint('i1(t) = ', i1/1e-3 , ' mA')\nprint('i2(t) = ', i2/1e-3 , ' mA')\nprint('i3(t) = ', i3/1e-3 , ' mA')", "Correntes nos indutores:\ni1(t) = 5.0*exp(-50000.0*t) mA\ni2(t) = 3.125 + 1.875*exp(-50000.0*t) mA\ni3(t) = -3.125 + 3.125*exp(-50000.0*t) mA\n" ], [ "# calculando os valores de energia em t=0\nE1_0 = (1/2)*L1*(i1.evalf(subs={t:0}))**2\nE2_0 = (1/2)*L2*(i2.evalf(subs={t:0}))**2\nE3_0 = (1/2)*L3*(i3.evalf(subs={t:0}))**2\n\nprint('Energia inicial armazenada nos indutores:')\nprint('E1(0) = %.2f μJ' %(E1_0/1e-6))\nprint('E2(0) = %.2f μJ' %(E2_0/1e-6))\nprint('E3(0) = %.2f μJ' %(E3_0/1e-6))", "Energia inicial armazenada nos indutores:\nE1(0) = 0.25 μJ\nE2(0) = 1.00 μJ\nE3(0) = 0.00 μJ\n" ], [ "# calculando os valores de energia em t =oo\nE1_inf = (1/2)*L1*(i1.evalf(subs={t:100}))**2\nE2_inf = (1/2)*L2*(i2.evalf(subs={t:100}))**2\nE3_inf = (1/2)*L3*(i3.evalf(subs={t:100}))**2\n\nprint('Energia final armazenada nos indutores:')\nprint('E1(oo) = %.2f μJ' %(E1_inf/1e-6))\nprint('E2(oo) = %.2f μJ' %(E2_inf/1e-6))\nprint('E3(oo) = %.2f μJ' %(E3_inf/1e-6))", "Energia final armazenada nos indutores:\nE1(oo) = 0.00 μJ\nE2(oo) = 0.39 μJ\nE3(oo) = 0.23 μJ\n" ], [ "# calculando a variação de energia nos indutores\n\nΔE = (E1_inf-E1_0) + (E2_inf-E2_0) + (E3_inf-E3_0)\n\nprint('Variação da energia armazenada nos indutores:')\nprint('ΔE = %.2f μJ' %(ΔE/1e-6))", "Variação da energia armazenada nos indutores:\nΔE = -0.63 μJ\n" ], [ "# define tensão sobre o resistor vR(t)\nvR = R*i1 \n\n# potência consumida pelo resistor\np = vR*i1\n\n# energia consumida pelo resistor\nE = sp.integrate(p, (t, 0, sp.oo))\nprint('Energia consumida pelo resistor:')\nprint('E = %.2f μJ' %(E/1e-6))", "Energia consumida pelo resistor:\nE = 0.63 μJ\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af5a5101f98fe27fca41b856f3dbc14be03dee3
40,053
ipynb
Jupyter Notebook
Data_analysis/SF Salaries Exercise.ipynb
suyogdahal/Data-Science
230fd55ff8fb9799507a875c413623234985479c
[ "MIT" ]
null
null
null
Data_analysis/SF Salaries Exercise.ipynb
suyogdahal/Data-Science
230fd55ff8fb9799507a875c413623234985479c
[ "MIT" ]
null
null
null
Data_analysis/SF Salaries Exercise.ipynb
suyogdahal/Data-Science
230fd55ff8fb9799507a875c413623234985479c
[ "MIT" ]
null
null
null
44.751955
17,140
0.659651
[ [ [ "___\n\n<a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a>\n___", "_____no_output_____" ], [ "# SF Salaries Exercise \n\nWelcome to a quick exercise for you to practice your pandas skills! We will be using the [SF Salaries Dataset](https://www.kaggle.com/kaggle/sf-salaries) from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along.", "_____no_output_____" ], [ "** Import pandas as pd.**", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "** Read Salaries.csv as a dataframe called sal.**", "_____no_output_____" ] ], [ [ "sal = pd.read_csv('data/Salaries.csv')", "_____no_output_____" ] ], [ [ "** Check the head of the DataFrame. **", "_____no_output_____" ] ], [ [ "sal.head()", "_____no_output_____" ] ], [ [ "** Use the .info() method to find out how many entries there are.**", "_____no_output_____" ] ], [ [ "sal.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 148654 entries, 0 to 148653\nData columns (total 13 columns):\nId 148654 non-null int64\nEmployeeName 148654 non-null object\nJobTitle 148654 non-null object\nBasePay 148045 non-null float64\nOvertimePay 148650 non-null float64\nOtherPay 148650 non-null float64\nBenefits 112491 non-null float64\nTotalPay 148654 non-null float64\nTotalPayBenefits 148654 non-null float64\nYear 148654 non-null int64\nNotes 0 non-null float64\nAgency 148654 non-null object\nStatus 0 non-null float64\ndtypes: float64(8), int64(2), object(3)\nmemory usage: 14.7+ MB\n" ] ], [ [ "**What is the average BasePay ?**", "_____no_output_____" ] ], [ [ "sal['BasePay'].mean()", "_____no_output_____" ] ], [ [ "** What is the highest amount of OvertimePay in the dataset ? **", "_____no_output_____" ] ], [ [ "sal['OvertimePay'].max()", "_____no_output_____" ] ], [ [ "** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). **", "_____no_output_____" ] ], [ [ "sal[sal['EmployeeName'] == 'JOSEPH DRISCOLL']['JobTitle']", "_____no_output_____" ] ], [ [ "** How much does JOSEPH DRISCOLL make (including benefits)? **", "_____no_output_____" ] ], [ [ "sal[sal['EmployeeName'] == 'JOSEPH DRISCOLL']['TotalPayBenefits']", "_____no_output_____" ] ], [ [ "** What is the name of highest paid person (including benefits)?**", "_____no_output_____" ] ], [ [ "sal[sal[\"TotalPayBenefits\"]==sal[\"TotalPayBenefits\"].max()]", "_____no_output_____" ] ], [ [ "** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?**", "_____no_output_____" ] ], [ [ "sal[sal[\"TotalPayBenefits\"]==sal[\"TotalPayBenefits\"].min()]", "_____no_output_____" ] ], [ [ "** What was the average (mean) BasePay of all employees per year? (2011-2014) ? **", "_____no_output_____" ] ], [ [ "sal.groupby('Year').mean()['BasePay']", "_____no_output_____" ] ], [ [ "** How many unique job titles are there? **", "_____no_output_____" ] ], [ [ "sal['JobTitle'].nunique()", "_____no_output_____" ] ], [ [ "** What are the top 5 most common jobs? **", "_____no_output_____" ] ], [ [ "sal['JobTitle'].value_counts().sort_values(ascending=False).head()", "_____no_output_____" ] ], [ [ "** How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?) **", "_____no_output_____" ] ], [ [ "(sal[sal['Year']==2013]['JobTitle'].value_counts()==1).sum()\n", "_____no_output_____" ] ], [ [ "** How many people have the word Chief in their job title? (This is pretty tricky) **", "_____no_output_____" ] ], [ [ "def check(x):\n '''To convert the jobtitles into lower case,split it and check if it has 'chief' in it '''\n x=x.lower().split()\n if 'chief' in x:\n return True\n else:\n return False\n ", "_____no_output_____" ], [ "a=list()\nfor i in sal['JobTitle']:\n a.append(check(i))\nsum(a)", "_____no_output_____" ] ], [ [ "** Bonus: Is there a correlation between length of the Job Title string and Salary? **", "_____no_output_____" ] ], [ [ "sal['len'] = sal['JobTitle'].apply(len)", "_____no_output_____" ], [ "sal[['len','TotalPayBenefits']].corr()", "_____no_output_____" ] ], [ [ "**Plotting**", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n%matplotlib inline\nplt.scatter(sal['len'],sal['TotalPayBenefits'],color='R')\nplt.xlabel('Length of JobTitles')\nplt.ylabel('TotalPayBenefits')", "_____no_output_____" ] ], [ [ "# Great Job!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af5ae9bbcca24c83a24e0381eb4d414a6c19a34
21,552
ipynb
Jupyter Notebook
03_Grouping/Alcohol_Consumption/Exercise.ipynb
ZhuoqunWang0120/pandas_exercises
e3aabe63757ed98aef4bc4cf6b0ae295e98079bc
[ "BSD-3-Clause" ]
null
null
null
03_Grouping/Alcohol_Consumption/Exercise.ipynb
ZhuoqunWang0120/pandas_exercises
e3aabe63757ed98aef4bc4cf6b0ae295e98079bc
[ "BSD-3-Clause" ]
null
null
null
03_Grouping/Alcohol_Consumption/Exercise.ipynb
ZhuoqunWang0120/pandas_exercises
e3aabe63757ed98aef4bc4cf6b0ae295e98079bc
[ "BSD-3-Clause" ]
null
null
null
29.362398
133
0.32874
[ [ [ "# Ex - GroupBy", "_____no_output_____" ], [ "### Introduction:\n\nGroupBy can be summarized as Split-Apply-Combine.\n\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\nCheck out this [Diagram](http://i.imgur.com/yjNkiwL.png) \n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called drinks.", "_____no_output_____" ] ], [ [ "url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv'\ndrinks = pd.read_csv(url, sep = ',')\ndrinks.head(10)", "_____no_output_____" ] ], [ [ "### Step 4. Which continent drinks more beer on average?", "_____no_output_____" ] ], [ [ "drinks.groupby('continent').beer_servings.mean().sort_values(ascending = False)", "_____no_output_____" ] ], [ [ "### Step 5. For each continent print the statistics for wine consumption.", "_____no_output_____" ] ], [ [ "drinks.groupby('continent').wine_servings.describe()", "_____no_output_____" ] ], [ [ "### Step 6. Print the mean alcohol consumption per continent for every column", "_____no_output_____" ] ], [ [ "drinks.groupby('continent').mean()", "_____no_output_____" ] ], [ [ "### Step 7. Print the median alcohol consumption per continent for every column", "_____no_output_____" ] ], [ [ "drinks.groupby('continent').median()", "_____no_output_____" ] ], [ [ "### Step 8. Print the mean, min and max values for spirit consumption.\n#### This time output a DataFrame", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af5ba84c849dfd00f491e26a1b97012e3539e27
294,256
ipynb
Jupyter Notebook
Chapter03/Exercise3.07/Exercise03_07.ipynb
PacktWorkshops/The-Reinforcement-Learning-Workshop
04e8c72bc9e46d66846b748c074b26a1b724fae0
[ "MIT" ]
24
2020-04-08T01:57:02.000Z
2022-03-24T18:36:14.000Z
Chapter03/Exercise3.07/Exercise03_07.ipynb
PacktWorkshops/The-Reinforcement-Learning-Workshop
04e8c72bc9e46d66846b748c074b26a1b724fae0
[ "MIT" ]
10
2020-03-24T19:49:14.000Z
2022-03-12T00:33:01.000Z
Chapter03/Exercise3.07/Exercise03_07.ipynb
PacktWorkshops/The-Reinforcement-Learning-Workshop
04e8c72bc9e46d66846b748c074b26a1b724fae0
[ "MIT" ]
32
2020-04-08T12:07:11.000Z
2022-03-25T15:49:10.000Z
84.483491
193
0.653166
[ [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom IPython import display\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nimport tensorflow as tf\n\n!rm -rf ./logs/ \n\n# Load the TensorBoard notebook extension\n%load_ext tensorboard", "_____no_output_____" ], [ "higgs_path = tf.keras.utils.get_file('HIGGSSmall.csv.gz', 'https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/blob/master/Chapter03/Dataset/HIGGSSmall.csv.gz?raw=true')", "_____no_output_____" ], [ "N_TEST = int(1e3)\nN_VALIDATION = int(1e3)\nN_TRAIN = int(1e4)\nBUFFER_SIZE = int(N_TRAIN)\nBATCH_SIZE = 500\nSTEPS_PER_EPOCH = N_TRAIN//BATCH_SIZE\n\nN_FEATURES = 28\n\nds = tf.data.experimental.CsvDataset(higgs_path,[float(),]*(N_FEATURES+1), compression_type=\"GZIP\")\n\ndef pack_row(*row):\n label = row[0]\n features = tf.stack(row[1:],1)\n return features, label\n\npacked_ds = ds.batch(N_TRAIN).map(pack_row).unbatch()", "_____no_output_____" ], [ "validate_ds = packed_ds.take(N_VALIDATION).cache()\ntest_ds = packed_ds.skip(N_VALIDATION).take(N_TEST).cache()\ntrain_ds = packed_ds.skip(N_VALIDATION+N_TEST).take(N_TRAIN).cache()\n\ntest_ds = test_ds.batch(BATCH_SIZE)\nvalidate_ds = validate_ds.batch(BATCH_SIZE)\ntrain_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)", "_____no_output_____" ], [ "lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(\n 0.001,\n decay_steps=STEPS_PER_EPOCH*1000,\n decay_rate=1,\n staircase=False)", "_____no_output_____" ], [ "log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\ndef compile_and_fit(model, name, max_epochs=3000):\n \n optimizer = tf.keras.optimizers.Adam(lr_schedule)\n \n model.compile(optimizer=optimizer,\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\n tf.keras.losses.BinaryCrossentropy(\n from_logits=True, name='binary_crossentropy'),\n 'accuracy'])\n\n model.summary()\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=0)\n \n history = model.fit(train_ds,\n steps_per_epoch = STEPS_PER_EPOCH,\n epochs=max_epochs,\n validation_data=validate_ds,\n callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),\n tensorboard_callback],\n verbose=2)\n return history", "_____no_output_____" ], [ "regularization_model = tf.keras.Sequential([\n tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001),\n activation='elu', input_shape=(N_FEATURES,)),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001),\n activation='elu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001),\n activation='elu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(512, kernel_regularizer=tf.keras.regularizers.l2(0.0001),\n activation='elu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(1)\n])\n\ncompile_and_fit(regularization_model, \"regularizers/regularization\", max_epochs=9000)", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 512) 14848 \n_________________________________________________________________\ndropout (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 1) 513 \n=================================================================\nTotal params: 803,329\nTrainable params: 803,329\nNon-trainable params: 0\n_________________________________________________________________\nTrain for 20 steps\nEpoch 1/9000\n20/20 - 3s - loss: 0.9489 - binary_crossentropy: 0.7904 - accuracy: 0.5092 - val_loss: 0.8285 - val_binary_crossentropy: 0.6707 - val_accuracy: 0.4850\nEpoch 2/9000\n20/20 - 1s - loss: 0.9025 - binary_crossentropy: 0.7454 - accuracy: 0.5135 - val_loss: 0.8213 - val_binary_crossentropy: 0.6651 - val_accuracy: 0.5110\nEpoch 3/9000\n20/20 - 0s - loss: 0.8834 - binary_crossentropy: 0.7280 - accuracy: 0.5225 - val_loss: 0.8154 - val_binary_crossentropy: 0.6610 - val_accuracy: 0.5120\nEpoch 4/9000\n20/20 - 0s - loss: 0.8602 - binary_crossentropy: 0.7067 - accuracy: 0.5365 - val_loss: 0.8075 - val_binary_crossentropy: 0.6550 - val_accuracy: 0.5290\nEpoch 5/9000\n20/20 - 1s - loss: 0.8518 - binary_crossentropy: 0.7001 - accuracy: 0.5423 - val_loss: 0.8002 - val_binary_crossentropy: 0.6495 - val_accuracy: 0.5660\nEpoch 6/9000\n20/20 - 0s - loss: 0.8402 - binary_crossentropy: 0.6904 - accuracy: 0.5528 - val_loss: 0.8016 - val_binary_crossentropy: 0.6527 - val_accuracy: 0.5830\nEpoch 7/9000\n20/20 - 0s - loss: 0.8290 - binary_crossentropy: 0.6811 - accuracy: 0.5529 - val_loss: 0.7915 - val_binary_crossentropy: 0.6447 - val_accuracy: 0.5510\nEpoch 8/9000\n20/20 - 1s - loss: 0.8231 - binary_crossentropy: 0.6771 - accuracy: 0.5498 - val_loss: 0.7883 - val_binary_crossentropy: 0.6434 - val_accuracy: 0.5630\nEpoch 9/9000\n20/20 - 0s - loss: 0.8179 - binary_crossentropy: 0.6739 - accuracy: 0.5584 - val_loss: 0.7901 - val_binary_crossentropy: 0.6472 - val_accuracy: 0.5710\nEpoch 10/9000\n20/20 - 0s - loss: 0.8074 - binary_crossentropy: 0.6653 - accuracy: 0.5662 - val_loss: 0.7834 - val_binary_crossentropy: 0.6424 - val_accuracy: 0.5430\nEpoch 11/9000\n20/20 - 0s - loss: 0.8047 - binary_crossentropy: 0.6646 - accuracy: 0.5597 - val_loss: 0.7763 - val_binary_crossentropy: 0.6372 - val_accuracy: 0.5720\nEpoch 12/9000\n20/20 - 1s - loss: 0.7945 - binary_crossentropy: 0.6564 - accuracy: 0.5723 - val_loss: 0.7812 - val_binary_crossentropy: 0.6440 - val_accuracy: 0.6060\nEpoch 13/9000\n20/20 - 0s - loss: 0.7945 - binary_crossentropy: 0.6583 - accuracy: 0.5698 - val_loss: 0.7711 - val_binary_crossentropy: 0.6359 - val_accuracy: 0.5910\nEpoch 14/9000\n20/20 - 0s - loss: 0.7920 - binary_crossentropy: 0.6577 - accuracy: 0.5655 - val_loss: 0.7679 - val_binary_crossentropy: 0.6347 - val_accuracy: 0.5770\nEpoch 15/9000\n20/20 - 1s - loss: 0.7880 - binary_crossentropy: 0.6558 - accuracy: 0.5677 - val_loss: 0.7710 - val_binary_crossentropy: 0.6398 - val_accuracy: 0.5980\nEpoch 16/9000\n20/20 - 0s - loss: 0.7855 - binary_crossentropy: 0.6552 - accuracy: 0.5750 - val_loss: 0.7619 - val_binary_crossentropy: 0.6326 - val_accuracy: 0.5840\nEpoch 17/9000\n20/20 - 0s - loss: 0.7779 - binary_crossentropy: 0.6495 - accuracy: 0.5725 - val_loss: 0.7613 - val_binary_crossentropy: 0.6340 - val_accuracy: 0.5650\nEpoch 18/9000\n20/20 - 1s - loss: 0.7779 - binary_crossentropy: 0.6516 - accuracy: 0.5698 - val_loss: 0.7490 - val_binary_crossentropy: 0.6237 - val_accuracy: 0.5940\nEpoch 19/9000\n20/20 - 0s - loss: 0.7691 - binary_crossentropy: 0.6448 - accuracy: 0.5830 - val_loss: 0.7539 - val_binary_crossentropy: 0.6306 - val_accuracy: 0.6150\nEpoch 20/9000\n20/20 - 0s - loss: 0.7650 - binary_crossentropy: 0.6427 - accuracy: 0.5912 - val_loss: 0.7425 - val_binary_crossentropy: 0.6212 - val_accuracy: 0.5860\nEpoch 21/9000\n20/20 - 1s - loss: 0.7591 - binary_crossentropy: 0.6387 - accuracy: 0.5868 - val_loss: 0.7427 - val_binary_crossentropy: 0.6233 - val_accuracy: 0.6090\nEpoch 22/9000\n20/20 - 0s - loss: 0.7555 - binary_crossentropy: 0.6370 - accuracy: 0.5977 - val_loss: 0.7336 - val_binary_crossentropy: 0.6161 - val_accuracy: 0.5820\nEpoch 23/9000\n20/20 - 0s - loss: 0.7566 - binary_crossentropy: 0.6401 - accuracy: 0.5921 - val_loss: 0.7363 - val_binary_crossentropy: 0.6208 - val_accuracy: 0.6130\nEpoch 24/9000\n20/20 - 1s - loss: 0.7531 - binary_crossentropy: 0.6384 - accuracy: 0.6009 - val_loss: 0.7319 - val_binary_crossentropy: 0.6183 - val_accuracy: 0.5940\nEpoch 25/9000\n20/20 - 0s - loss: 0.7492 - binary_crossentropy: 0.6366 - accuracy: 0.5832 - val_loss: 0.7259 - val_binary_crossentropy: 0.6142 - val_accuracy: 0.6090\nEpoch 26/9000\n20/20 - 0s - loss: 0.7445 - binary_crossentropy: 0.6337 - accuracy: 0.6044 - val_loss: 0.7221 - val_binary_crossentropy: 0.6123 - val_accuracy: 0.6040\nEpoch 27/9000\n20/20 - 1s - loss: 0.7441 - binary_crossentropy: 0.6353 - accuracy: 0.6038 - val_loss: 0.7244 - val_binary_crossentropy: 0.6166 - val_accuracy: 0.6000\nEpoch 28/9000\n20/20 - 0s - loss: 0.7387 - binary_crossentropy: 0.6318 - accuracy: 0.6058 - val_loss: 0.7218 - val_binary_crossentropy: 0.6159 - val_accuracy: 0.5950\nEpoch 29/9000\n20/20 - 0s - loss: 0.7359 - binary_crossentropy: 0.6308 - accuracy: 0.6045 - val_loss: 0.7102 - val_binary_crossentropy: 0.6062 - val_accuracy: 0.6170\nEpoch 30/9000\n20/20 - 1s - loss: 0.7340 - binary_crossentropy: 0.6308 - accuracy: 0.6088 - val_loss: 0.7085 - val_binary_crossentropy: 0.6062 - val_accuracy: 0.6080\nEpoch 31/9000\n20/20 - 0s - loss: 0.7324 - binary_crossentropy: 0.6311 - accuracy: 0.6044 - val_loss: 0.7051 - val_binary_crossentropy: 0.6048 - val_accuracy: 0.6320\nEpoch 32/9000\n20/20 - 0s - loss: 0.7275 - binary_crossentropy: 0.6280 - accuracy: 0.6113 - val_loss: 0.7011 - val_binary_crossentropy: 0.6026 - val_accuracy: 0.6030\nEpoch 33/9000\n20/20 - 1s - loss: 0.7263 - binary_crossentropy: 0.6286 - accuracy: 0.6156 - val_loss: 0.7026 - val_binary_crossentropy: 0.6060 - val_accuracy: 0.6120\nEpoch 34/9000\n20/20 - 0s - loss: 0.7224 - binary_crossentropy: 0.6265 - accuracy: 0.6158 - val_loss: 0.6961 - val_binary_crossentropy: 0.6012 - val_accuracy: 0.6100\nEpoch 35/9000\n20/20 - 0s - loss: 0.7182 - binary_crossentropy: 0.6241 - accuracy: 0.6168 - val_loss: 0.6958 - val_binary_crossentropy: 0.6026 - val_accuracy: 0.6130\nEpoch 36/9000\n20/20 - 0s - loss: 0.7175 - binary_crossentropy: 0.6252 - accuracy: 0.6159 - val_loss: 0.6937 - val_binary_crossentropy: 0.6022 - val_accuracy: 0.6420\nEpoch 37/9000\n20/20 - 0s - loss: 0.7143 - binary_crossentropy: 0.6236 - accuracy: 0.6138 - val_loss: 0.6910 - val_binary_crossentropy: 0.6013 - val_accuracy: 0.6680\nEpoch 38/9000\n20/20 - 0s - loss: 0.7162 - binary_crossentropy: 0.6272 - accuracy: 0.6191 - val_loss: 0.6959 - val_binary_crossentropy: 0.6080 - val_accuracy: 0.5820\nEpoch 39/9000\n20/20 - 0s - loss: 0.7138 - binary_crossentropy: 0.6266 - accuracy: 0.6077 - val_loss: 0.6868 - val_binary_crossentropy: 0.6005 - val_accuracy: 0.6020\nEpoch 40/9000\n20/20 - 0s - loss: 0.7080 - binary_crossentropy: 0.6225 - accuracy: 0.6191 - val_loss: 0.6843 - val_binary_crossentropy: 0.5996 - val_accuracy: 0.6170\nEpoch 41/9000\n20/20 - 0s - loss: 0.7034 - binary_crossentropy: 0.6195 - accuracy: 0.6267 - val_loss: 0.6861 - val_binary_crossentropy: 0.6031 - val_accuracy: 0.6330\n" ], [ "test_accuracy = tf.keras.metrics.Accuracy()\n\nfor (features, labels) in test_ds:\n logits = regularization_model(features)\n probabilities = tf.keras.activations.sigmoid(logits)\n predictions = 1*(probabilities.numpy() > 0.5)\n test_accuracy(predictions, labels)\n regularization_model_accuracy = test_accuracy.result()\n\nprint(\"Test set accuracy: {:.3%}\".format(regularization_model_accuracy))", "Test set accuracy: 69.300%\n" ], [ "%tensorboard --logdir logs/fit", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af5c074e50101726050bd02e43d823dcf57e327
574
ipynb
Jupyter Notebook
_build/jupyter_execute/chapters/hmm/hmm_sampling.ipynb
ssm-jax/ssm-book
f3bfa29a1c474b7dc85792a563df0f29736a44c6
[ "MIT" ]
10
2022-03-22T21:28:03.000Z
2022-03-29T17:42:06.000Z
_build/jupyter_execute/chapters/hmm/hmm_sampling.ipynb
ssm-jax/ssm-book
f3bfa29a1c474b7dc85792a563df0f29736a44c6
[ "MIT" ]
null
null
null
_build/jupyter_execute/chapters/hmm/hmm_sampling.ipynb
ssm-jax/ssm-book
f3bfa29a1c474b7dc85792a563df0f29736a44c6
[ "MIT" ]
1
2022-03-23T02:15:23.000Z
2022-03-23T02:15:23.000Z
17.9375
57
0.547038
[ [ [ "# Forwards-filtering backwards-sampling algorithm\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
4af5d213bc3c58be1e1604639b787cb95ed00d0e
33,133
ipynb
Jupyter Notebook
04-pipelineBasedTransformer.ipynb
satish860/sentiment_analyser
46f6127b0e699c710e7ba556c90fe752cd0a12cd
[ "Apache-2.0" ]
null
null
null
04-pipelineBasedTransformer.ipynb
satish860/sentiment_analyser
46f6127b0e699c710e7ba556c90fe752cd0a12cd
[ "Apache-2.0" ]
null
null
null
04-pipelineBasedTransformer.ipynb
satish860/sentiment_analyser
46f6127b0e699c710e7ba556c90fe752cd0a12cd
[ "Apache-2.0" ]
1
2022-02-05T06:30:23.000Z
2022-02-05T06:30:23.000Z
102.578947
23,584
0.826849
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('data/small_corpus.csv')\ndf['reviews']= df['reviews'].astype(str)", "_____no_output_____" ], [ "from transformers import pipeline\nclassifier = pipeline('sentiment-analysis')", "_____no_output_____" ], [ "def classify(item):\n output = classifier(item)[0]\n label = output['label']\n score = output['score']\n return ','.join([label,str(score)])", "_____no_output_____" ], [ "df['label_score'] = df['reviews'].apply(lambda x : classify(x[:512]))", "_____no_output_____" ], [ "def prediction_to_class(label_score,threshold):\n val = label_score.split(',')\n label = val[0]\n score = float(val[1])\n if label == \"NEGATIVE\" and score > threshold:\n return 0\n elif label == \"POSITIVE\" and score > threshold:\n return 2\n else:\n return 1", "_____no_output_____" ], [ "df['predicted'] = df['label_score'].apply(lambda x : prediction_to_class(x,0.75))", "_____no_output_____" ] ], [ [ "# Results", "_____no_output_____" ] ], [ [ "def score_to_Target(value):\n if value >= 5:\n return 2\n if value <= 4 and value >= 2:\n return 1\n else:\n return 0", "_____no_output_____" ], [ "df['rating_classes'] = df['ratings'].apply(lambda x:score_to_Target(x))", "_____no_output_____" ], [ "df['predicted'] = df['label_score'].apply(lambda x : prediction_to_class(x,0.99))", "_____no_output_____" ], [ "rating_classes = list(df[\"rating_classes\"])\npredicated_values = list(df[\"predicted\"])\ntarget_names = [\"negative\", \"neutral\", \"positive\"]\nfrom sklearn.metrics import classification_report\nprint(classification_report(rating_classes, predicated_values, target_names=target_names))", " precision recall f1-score support\n\n negative 0.64 0.82 0.72 1500\n neutral 0.49 0.30 0.37 1500\n positive 0.71 0.78 0.74 1500\n\n accuracy 0.63 4500\n macro avg 0.61 0.63 0.61 4500\nweighted avg 0.61 0.63 0.61 4500\n\n" ], [ "import altair as alt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nx, y = np.meshgrid(range(0, 3), range(0, 3))\ncm=confusion_matrix(rating_classes, predicated_values, labels=[0, 1, 2])", "_____no_output_____" ], [ "import numpy as np\n\n\ndef plot_confusion_matrix(cm,\n target_names,\n title='Confusion matrix',\n cmap=None,\n normalize=True):\n \"\"\"\n given a sklearn confusion matrix (cm), make a nice plot\n\n Arguments\n ---------\n cm: confusion matrix from sklearn.metrics.confusion_matrix\n\n target_names: given classification classes such as [0, 1, 2]\n the class names, for example: ['high', 'medium', 'low']\n\n title: the text to display at the top of the matrix\n\n cmap: the gradient of the values displayed from matplotlib.pyplot.cm\n see http://matplotlib.org/examples/color/colormaps_reference.html\n plt.get_cmap('jet') or plt.cm.Blues\n\n normalize: If False, plot the raw numbers\n If True, plot the proportions\n\n Usage\n -----\n plot_confusion_matrix(cm = cm, # confusion matrix created by\n # sklearn.metrics.confusion_matrix\n normalize = True, # show proportions\n target_names = y_labels_vals, # list of names of the classes\n title = best_estimator_name) # title of graph\n\n Citiation\n ---------\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n import itertools\n\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()", "_____no_output_____" ], [ "plot_confusion_matrix(cm = cm,\n normalize = False,\n target_names = [\"negative\", \"neutral\", \"positive\"],\n title = \"Confusion Matrix\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4af5dd40bd19956df0f3d292af8395674ca0965b
169,073
ipynb
Jupyter Notebook
notebooks/cobweb-models.ipynb
davidrpugh/sfi-complexity-mooc
b92044f9804076a50ed15ad8863e4bdbcb242727
[ "MIT" ]
11
2016-09-24T20:33:28.000Z
2021-01-11T21:28:16.000Z
notebooks/cobweb-models.ipynb
davidrpugh/sfi-complexity-mooc
b92044f9804076a50ed15ad8863e4bdbcb242727
[ "MIT" ]
14
2016-02-10T15:06:23.000Z
2016-02-23T13:34:38.000Z
notebooks/cobweb-models.ipynb
davidrpugh/sfi-complexity-mooc
b92044f9804076a50ed15ad8863e4bdbcb242727
[ "MIT" ]
10
2016-02-13T09:58:20.000Z
2020-08-17T15:49:37.000Z
390.468822
62,640
0.91299
[ [ [ "<h1 align=center>The Cobweb Model</h1>\n\nPresentation follows <a href=\"http://www.parisschoolofeconomics.eu/docs/guesnerie-roger/hommes94.pdf\">Hommes, <em>JEBO 1994</em></a>. Let $p_t$ denote the <em>observed price</em> of goods and $p_t^e$ the <em>expected price</em> of goods in period $t$. Similarly, let $q_t^d$ denote the <em>quantity demanded</em> of all goods in period $t$ and $q_t^s$ the <em>quantity supplied</em> of all goods in period $t$.\n\n\\begin{align}\n q_t^d =& D(p_t) \\tag{1} \\\\\n q_t^s =& S(p_t^e) \\tag{2} \\\\\n q_t^d =& q_t^s \\tag{3} \\\\\n p_t^e =& p_{t-1}^e + w\\big(p_{t-1} - p_{t-1}^e\\big) = (1 - w)p_{t-1}^e + w p_{t-1} \\tag{4}\n\\end{align}\n\nEquation 1 says that the quantity demanded of goods in period $t$ is some function of the <em>observed price</em> in period $t$. Equation 2, meanwhile, states that the quantity of goods supplied in period $t$ is a function of the <em>expected price</em> in period $t$. Equation 3 is a market clearing equilibrium condition. Finally, equation 4 is an adaptive expectation formation rule that specifies how goods producers form their expectations about the price of goods in period $t$ as a function of past prices.\n\nCombine the equations as follows. Note that equation 3 implies that...\n\n$$ D(p_t) = q_t^d = q_t^s = S(p_t^e) $$\n\n...and therefore, assuming the demand function $D$ is invertible, we can write the observed price of goods in period $t$ as...\n\n$$ p_t = D^{-1}\\big(S(p_t^e)\\big). \\tag{5}$$\n\nSubstituting equation 5 into equation 4 we arrive at the following difference equation\n\n$$ p_{t+1}^e = w D^{-1}\\big(S(p_t^e)\\big) + (1 - w)p_t^e. \\tag{7}$$", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "%load_ext autoreload", "_____no_output_____" ], [ "%autoreload 2", "_____no_output_____" ], [ "import functools\n\nimport ipywidgets\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import optimize\nimport seaborn as sns\n\nimport cobweb", "_____no_output_____" ], [ "def observed_price(D_inverse, S, expected_price, **params):\n \"\"\"The observed price of goods in a particular period.\"\"\"\n actual_price = D_inverse(S(expected_price, **params), **params)\n return actual_price\n\n\ndef adaptive_expectations(D_inverse, S, expected_price, w, **params):\n \"\"\"An adaptive expectations price forecasting rule.\"\"\"\n actual_price = observed_price(D_inverse, S, expected_price, **params)\n price_forecast = w * actual_price + (1 - w) * expected_price\n return price_forecast\n", "_____no_output_____" ] ], [ [ "<h2> Non-linear supply functions </h2>\n\nWhen thinking about supply it helps to start with the following considerations...\n<ol>\n <li> ...when prices are low, the quantity supplied increases slowly because of fixed costs of production (think startup costs, etc).\n <li> ...when prices are high, supply also increases slowly because of capacity constraints.\n</ol>\n\nThese considerations motivate our focus on \"S-shaped\" supply functions...\n\n$$ S_{\\gamma}(p_t^e) = -tan^{-1}(-\\gamma \\bar{p}) + tan^{-1}(\\gamma (p_t^e - \\bar{p})). \\tag{10}$$\n\nThe parameter $0 < \\gamma < \\infty$ controls the \"steepness\" of the supply function.", "_____no_output_____" ] ], [ [ "def quantity_supply(expected_price, gamma, p_bar, **params):\n \"\"\"The quantity of goods supplied in period t given the epxected price.\"\"\"\n return -np.arctan(-gamma * p_bar) + np.arctan(gamma * (expected_price - p_bar))", "_____no_output_____" ] ], [ [ "<h3> Exploring supply shocks </h3>\n\nInteractively change the value of $\\gamma$ to see the impact on the shape of the supply function.", "_____no_output_____" ] ], [ [ "ipywidgets.interact?", "_____no_output_____" ], [ "interactive_quantity_supply_plot = ipywidgets.interact(cobweb.quantity_supply_plot,\n S=ipywidgets.fixed(quantity_supply),\n gamma=cobweb.gamma_float_slider,\n p_bar=cobweb.p_bar_float_slider)\n", "_____no_output_____" ] ], [ [ "<h2> Special case: Linear demand functions </h2>\n\nSuppose the the quantity demanded of goods is a simple, decresing linear function of the observed price.\n\n$$ q_t^d = D(p_t) = a - b p_t \\implies p_t = D^{-1}(q_t^d) = \\frac{a}{b} - \\frac{1}{b}q_t^d \\tag{11} $$\n\n...where $-\\infty < a < \\infty$ and $0 < b < \\infty$. ", "_____no_output_____" ] ], [ [ "def quantity_demand(observed_price, a, b):\n \"\"\"The quantity demand of goods in period t given the price.\"\"\"\n quantity = a - b * observed_price\n return quantity\n\n\ndef inverse_demand(quantity_demand, a, b, **params):\n \"\"\"The price of goods in period t given the quantity demanded.\"\"\"\n price = (a / b) - (1 / b) * quantity_demand\n return price", "_____no_output_____" ] ], [ [ "<h3> Exploring demand shocks </h3>\n\nInteractively change the values of $a$ and $b$ to get a feel for how they impact demand. Shocks to $a$ shift the entire demand curve; shocks to $b$ change the slope of the demand curve (higher $b$ implies greater sensitivity to price; lower $b$ implies less sensitivity to price).", "_____no_output_____" ] ], [ [ "interactive_quantity_demand_plot = ipywidgets.interact(cobweb.quantity_demand_plot,\n D=ipywidgets.fixed(quantity_demand),\n a=cobweb.a_float_slider,\n b=cobweb.b_float_slider)\n", "_____no_output_____" ] ], [ [ "<h2> Supply and demand </h2>\n\nMarket clearing equilibrium price, $p^*$, satisfies...\n\n$$ D(p_t) = S(p_t^e). $$\n\nReally this is also an equilibrium in beliefs because we also require that $p_t = p_t^e$!", "_____no_output_____" ] ], [ [ "interactive_supply_demand_plot = ipywidgets.interact(cobweb.supply_demand_plot,\n D=ipywidgets.fixed(quantity_demand),\n S=ipywidgets.fixed(quantity_supply),\n a=cobweb.a_float_slider,\n b=cobweb.b_float_slider,\n gamma=cobweb.gamma_float_slider,\n p_bar=cobweb.p_bar_float_slider)\n", "_____no_output_____" ] ], [ [ "<h2> Analyzing dynamics of the model via simulation... </h2>\n\nModel has no closed form solution (i.e., we can not solve for a function that describes $p_t^e$ as a function of time and model parameters). BUT, we can simulate equation 7 above to better understand the dynamics of the model...", "_____no_output_____" ], [ "We can simulate our model and plot time series for different parameter values. Questions for discussion...\n\n<ol>\n <li> Can you find a two-cycle? What does this mean?</li>\n <li> Can you find higher cycles? Perhaps a four-cycle? Maybe even a three-cycle?</li>\n <li> Do simulations with similar initial conditions converge or diverge over time? </li> \n</ol> \n\nCan we relate these things to other SFI MOOCS on non-linear dynamics and chaos? Surely yes!", "_____no_output_____" ] ], [ [ "model = functools.partial(adaptive_expectations, inverse_demand, quantity_supply)\ninteractive_time_series_plot = ipywidgets.interact(cobweb.time_series_plot,\n F=ipywidgets.fixed(model),\n X0=cobweb.initial_expected_price_slider,\n T=cobweb.T_int_slider,\n a=cobweb.a_float_slider,\n b=cobweb.b_float_slider,\n w=cobweb.w_float_slider,\n gamma=cobweb.gamma_float_slider,\n p_bar=cobweb.p_bar_float_slider)", "_____no_output_____" ] ], [ [ "<h2> Forecast errors </h2>\n\nHow do we measure forecast error? What does the distribution of forecast errors look like for different parameters? Could an agent learn to avoid chaos? Specifically, suppose an agent learned to tune the value of $w$ in order to minimize its mean forecast error. Would this eliminate chaotic dynamics?", "_____no_output_____" ] ], [ [ "interactive_forecast_error_plot = ipywidgets.interact(cobweb.forecast_error_plot,\n D_inverse=ipywidgets.fixed(inverse_demand),\n S=ipywidgets.fixed(quantity_supply),\n F=ipywidgets.fixed(model),\n X0=cobweb.initial_expected_price_slider,\n T=cobweb.T_int_slider,\n a=cobweb.a_float_slider,\n b=cobweb.b_float_slider,\n w=cobweb.w_float_slider,\n gamma=cobweb.gamma_float_slider,\n p_bar=cobweb.p_bar_float_slider)", "_____no_output_____" ] ], [ [ "<h2> Other things of possible interest? </h2>\n\nImpulse response functions?\nCompare constrast model predictions for rational expectations, naive expectations, adaptive expectations. Depending on what Cars might have in mind, we could also add other expectation formation rules from his more recent work and have students analyze those...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af5de932aa66f41f8e21f75d98c439a285959f8
729,205
ipynb
Jupyter Notebook
stoned-selfies-main/stoned_selfies_tut.ipynb
hanmingcr/selfies_ML
75157ea8d01bd007c7cdbc63afaac83c302f46f5
[ "MIT" ]
null
null
null
stoned-selfies-main/stoned_selfies_tut.ipynb
hanmingcr/selfies_ML
75157ea8d01bd007c7cdbc63afaac83c302f46f5
[ "MIT" ]
null
null
null
stoned-selfies-main/stoned_selfies_tut.ipynb
hanmingcr/selfies_ML
75157ea8d01bd007c7cdbc63afaac83c302f46f5
[ "MIT" ]
null
null
null
484.84375
135,796
0.933262
[ [ [ "# Efficient Interpolation & Exploration with STONED SELFIES", "_____no_output_____" ], [ "\n### By: AkshatKumar Nigam, Robert Pollice, Mario Krenn, Gabriel dos Passos Gomes, and Alan Aspuru-Guzik\n\nPaper Link: https://doi.org/10.26434/chemrxiv.13383266.v2 \\\nPaper Github: https://github.com/aspuru-guzik-group/stoned-selfies \n\n<img src=\"https://github.com/aspuru-guzik-group/stoned-selfies/blob/main/readme_docs/fig_main_algo.png?raw=True\" width=\"900\" />\n", "_____no_output_____" ], [ "# Experiment Imports / Functions: ", "_____no_output_____" ] ], [ [ "import time \nimport selfies\nimport rdkit\nimport random\nimport numpy as np\nimport random\nfrom rdkit import Chem\nfrom selfies import encoder, decoder\nfrom rdkit.Chem import MolFromSmiles as smi2mol\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs.cDataStructs import TanimotoSimilarity\nfrom rdkit.Chem import Mol\nfrom rdkit.Chem.AtomPairs.Sheridan import GetBPFingerprint, GetBTFingerprint\nfrom rdkit.Chem.Pharm2D import Generate, Gobbi_Pharm2D\nfrom rdkit.Chem import Draw\n\nfrom rdkit.Chem import MolToSmiles as mol2smi\nfrom rdkit import RDLogger\nRDLogger.DisableLog('rdApp.*')\n\ndef randomize_smiles(mol):\n '''Returns a random (dearomatized) SMILES given an rdkit mol object of a molecule.\n Parameters:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n \n Returns:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n '''\n if not mol:\n return None\n\n Chem.Kekulize(mol)\n return rdkit.Chem.MolToSmiles(mol, canonical=False, doRandom=True, isomericSmiles=False, kekuleSmiles=True) \n\n\ndef sanitize_smiles(smi):\n '''Return a canonical smile representation of smi\n \n Parameters:\n smi (string) : smile string to be canonicalized \n \n Returns:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)\n conversion_successful (bool): True/False to indicate if conversion was successful \n '''\n try:\n mol = smi2mol(smi, sanitize=True)\n smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)\n return (mol, smi_canon, True)\n except:\n return (None, None, False)\n \n\ndef get_selfie_chars(selfie):\n '''Obtain a list of all selfie characters in string selfie\n \n Parameters: \n selfie (string) : A selfie string - representing a molecule \n \n Example: \n >>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')\n ['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']\n \n Returns:\n chars_selfie: list of selfie characters present in molecule selfie\n '''\n chars_selfie = [] # A list of all SELFIE sybols from string selfie\n while selfie != '':\n chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])\n selfie = selfie[selfie.find(']')+1:]\n return chars_selfie\n\n\nclass _FingerprintCalculator:\n ''' Calculate the fingerprint for a molecule, given the fingerprint type\n Parameters: \n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n fp_type (string) :Fingerprint type (choices: AP/PHCO/BPF,BTF,PAT,ECFP4,ECFP6,FCFP4,FCFP6) \n Returns:\n RDKit fingerprint object\n '''\n\n def get_fingerprint(self, mol: Mol, fp_type: str):\n method_name = 'get_' + fp_type\n method = getattr(self, method_name)\n if method is None:\n raise Exception(f'{fp_type} is not a supported fingerprint type.')\n return method(mol)\n\n def get_AP(self, mol: Mol):\n return AllChem.GetAtomPairFingerprint(mol, maxLength=10)\n\n def get_PHCO(self, mol: Mol):\n return Generate.Gen2DFingerprint(mol, Gobbi_Pharm2D.factory)\n\n def get_BPF(self, mol: Mol):\n return GetBPFingerprint(mol)\n\n def get_BTF(self, mol: Mol):\n return GetBTFingerprint(mol)\n\n def get_PATH(self, mol: Mol):\n return AllChem.RDKFingerprint(mol)\n\n def get_ECFP4(self, mol: Mol):\n return AllChem.GetMorganFingerprint(mol, 2)\n\n def get_ECFP6(self, mol: Mol):\n return AllChem.GetMorganFingerprint(mol, 3)\n\n def get_FCFP4(self, mol: Mol):\n return AllChem.GetMorganFingerprint(mol, 2, useFeatures=True)\n\n def get_FCFP6(self, mol: Mol):\n return AllChem.GetMorganFingerprint(mol, 3, useFeatures=True)\n\n\ndef get_fingerprint(mol: Mol, fp_type: str):\n ''' Fingerprint getter method. Fingerprint is returned after using object of \n class '_FingerprintCalculator'\n \n Parameters: \n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n fp_type (string) :Fingerprint type (choices: AP/PHCO/BPF,BTF,PAT,ECFP4,ECFP6,FCFP4,FCFP6) \n Returns:\n RDKit fingerprint object\n \n '''\n return _FingerprintCalculator().get_fingerprint(mol=mol, fp_type=fp_type)\n\ndef mutate_selfie(selfie, max_molecules_len, write_fail_cases=False):\n '''Return a mutated selfie string (only one mutation on slefie is performed)\n \n Mutations are done until a valid molecule is obtained \n Rules of mutation: With a 33.3% propbabily, either: \n 1. Add a random SELFIE character in the string\n 2. Replace a random SELFIE character with another\n 3. Delete a random character\n \n Parameters:\n selfie (string) : SELFIE string to be mutated \n max_molecules_len (int) : Mutations of SELFIE string are allowed up to this length\n write_fail_cases (bool) : If true, failed mutations are recorded in \"selfie_failure_cases.txt\"\n \n Returns:\n selfie_mutated (string) : Mutated SELFIE string\n smiles_canon (string) : canonical smile of mutated SELFIE string\n '''\n valid=False\n fail_counter = 0\n chars_selfie = get_selfie_chars(selfie)\n \n while not valid:\n fail_counter += 1\n \n alphabet = list(selfies.get_semantic_robust_alphabet()) # 34 SELFIE characters \n\n choice_ls = [1, 2, 3] # 1=Insert; 2=Replace; 3=Delete\n random_choice = np.random.choice(choice_ls, 1)[0]\n \n # Insert a character in a Random Location\n if random_choice == 1: \n random_index = np.random.randint(len(chars_selfie)+1)\n random_character = np.random.choice(alphabet, size=1)[0]\n \n selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index:]\n\n # Replace a random character \n elif random_choice == 2: \n random_index = np.random.randint(len(chars_selfie))\n random_character = np.random.choice(alphabet, size=1)[0]\n if random_index == 0:\n selfie_mutated_chars = [random_character] + chars_selfie[random_index+1:]\n else:\n selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index+1:]\n \n # Delete a random character\n elif random_choice == 3: \n random_index = np.random.randint(len(chars_selfie))\n if random_index == 0:\n selfie_mutated_chars = chars_selfie[random_index+1:]\n else:\n selfie_mutated_chars = chars_selfie[:random_index] + chars_selfie[random_index+1:]\n \n else: \n raise Exception('Invalid Operation trying to be performed')\n\n selfie_mutated = \"\".join(x for x in selfie_mutated_chars)\n sf = \"\".join(x for x in chars_selfie)\n \n try:\n smiles = decoder(selfie_mutated)\n mol, smiles_canon, done = sanitize_smiles(smiles)\n if len(selfie_mutated_chars) > max_molecules_len or smiles_canon==\"\":\n done = False\n if done:\n valid = True\n else:\n valid = False\n except:\n valid=False\n if fail_counter > 1 and write_fail_cases == True:\n f = open(\"selfie_failure_cases.txt\", \"a+\")\n f.write('Tried to mutate SELFIE: '+str(sf)+' To Obtain: '+str(selfie_mutated) + '\\n')\n f.close()\n \n return (selfie_mutated, smiles_canon)\n\ndef get_mutated_SELFIES(selfies_ls, num_mutations): \n ''' Mutate all the SELFIES in 'selfies_ls' 'num_mutations' number of times. \n \n Parameters:\n selfies_ls (list) : A list of SELFIES \n num_mutations (int) : number of mutations to perform on each SELFIES within 'selfies_ls'\n \n Returns:\n selfies_ls (list) : A list of mutated SELFIES\n \n '''\n for _ in range(num_mutations): \n selfie_ls_mut_ls = []\n for str_ in selfies_ls: \n \n str_chars = get_selfie_chars(str_)\n max_molecules_len = len(str_chars) + num_mutations\n \n selfie_mutated, _ = mutate_selfie(str_, max_molecules_len)\n selfie_ls_mut_ls.append(selfie_mutated)\n \n selfies_ls = selfie_ls_mut_ls.copy()\n return selfies_ls\n\n\ndef get_fp_scores(smiles_back, target_smi, fp_type): \n '''Calculate the Tanimoto fingerprint (using fp_type fingerint) similarity between a list \n of SMILES and a known target structure (target_smi). \n \n Parameters:\n smiles_back (list) : A list of valid SMILES strings \n target_smi (string) : A valid SMILES string. Each smile in 'smiles_back' will be compared to this stucture\n fp_type (string) : Type of fingerprint (choices: AP/PHCO/BPF,BTF,PAT,ECFP4,ECFP6,FCFP4,FCFP6) \n \n Returns: \n smiles_back_scores (list of floats) : List of fingerprint similarities\n '''\n smiles_back_scores = []\n target = Chem.MolFromSmiles(target_smi)\n\n fp_target = get_fingerprint(target, fp_type)\n\n for item in smiles_back: \n mol = Chem.MolFromSmiles(item)\n fp_mol = get_fingerprint(mol, fp_type)\n score = TanimotoSimilarity(fp_mol, fp_target)\n smiles_back_scores.append(score)\n return smiles_back_scores\n", "_____no_output_____" ] ], [ [ "# Fomation of Local Chemical Subspaces: \n\nThe task here is to generate multiple molecules in the chemical subspace of a structure. We consider the molecule Celecoxib from the paper. We consider 3 experiments: \n1. Generating the chemical subspace of Celecoxib, without restrictions, \n3. Generating the chemical subspace of Celecoxib while filtering out un-synthetic structures, and\n2. Generating the chemical subspace of Celecoxib while preserving a specific substructure.\n", "_____no_output_____" ], [ "### 1. Generating the chemical subspace of Celecoxib, without any restrictions", "_____no_output_____" ] ], [ [ "smi = 'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F' # Celecoxib\nfp_type = 'ECFP4'\n\n\ntotal_time = time.time()\n# num_random_samples = 50000 # For a more exhaustive search! \nnum_random_samples = 1000 \nnum_mutation_ls = [1, 2, 3, 4, 5]\n\nmol = Chem.MolFromSmiles(smi)\nif mol == None: \n raise Exception('Invalid starting structure encountered')\n\nstart_time = time.time()\nrandomized_smile_orderings = [randomize_smiles(mol) for _ in range(num_random_samples)]\n\n# Convert all the molecules to SELFIES\nselfies_ls = [encoder(x) for x in randomized_smile_orderings]\nprint('Randomized molecules (in SELFIES) time: ', time.time()-start_time)\n\n\nall_smiles_collect = []\nall_smiles_collect_broken = []\n\nstart_time = time.time()\nfor num_mutations in num_mutation_ls: \n # Mutate the SELFIES: \n selfies_mut = get_mutated_SELFIES(selfies_ls.copy(), num_mutations=num_mutations)\n\n # Convert back to SMILES: \n smiles_back = [decoder(x) for x in selfies_mut]\n all_smiles_collect = all_smiles_collect + smiles_back\n all_smiles_collect_broken.append(smiles_back)\n\n\nprint('Mutation obtainment time (back to smiles): ', time.time()-start_time)\n\n\n# Work on: all_smiles_collect\nstart_time = time.time()\ncanon_smi_ls = []\nfor item in all_smiles_collect: \n mol, smi_canon, did_convert = sanitize_smiles(item)\n if mol == None or smi_canon == '' or did_convert == False: \n raise Exception('Invalid smile string found')\n canon_smi_ls.append(smi_canon)\ncanon_smi_ls = list(set(canon_smi_ls))\nprint('Unique mutated structure obtainment time: ', time.time()-start_time)\n\nstart_time = time.time()\ncanon_smi_ls_scores = get_fp_scores(canon_smi_ls, target_smi=smi, fp_type=fp_type)\nprint('Fingerprint calculation time: ', time.time()-start_time)\nprint('Total time: ', time.time()-total_time)\n\n# Molecules with fingerprint similarity > 0.8\nindices_thresh_8 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.8]\nmols_8 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_8]\n\n# Molecules with fingerprint similarity > 0.6\nindices_thresh_6 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.6 and x < 0.8]\nmols_6 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_6]\n\n# Molecules with fingerprint similarity > 0.4\nindices_thresh_4 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.4 and x < 0.6]\nmols_4 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_4]", "Randomized molecules (in SELFIES) time: 0.09882092475891113\nMutation obtainment time (back to smiles): 6.560421466827393\nUnique mutated structure obtainment time: 0.7343411445617676\nFingerprint calculation time: 0.6410195827484131\nTotal time: 8.035475969314575\n" ] ], [ [ "### Visualizing Molecules with Similarity > 0.8", "_____no_output_____" ] ], [ [ "img=Draw.MolsToGridImage(mols_8[:8],molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ] ], [ [ "### Visualizing Molecules with Similarity > 0.6 & Less than 0.8", "_____no_output_____" ] ], [ [ "img=Draw.MolsToGridImage(mols_6[:8],molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ] ], [ [ "### Visualizing Molecules with Similarity > 0.4 Less than 0.6", "_____no_output_____" ] ], [ [ "img=Draw.MolsToGridImage(mols_4[:8],molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ] ], [ [ "### 2. Generating the chemical subspace of Celecoxib while filtering out un-synthetic structures.\n\nFor this example, we make use of SYBA to filter out the most synthetic structures: \n1. Code: https://github.com/lich-uct/syba\n2. Paper: https://jcheminf.biomedcentral.com/articles/10.1186/s13321-020-00439-2\n\n", "_____no_output_____" ] ], [ [ "from syba.syba import SybaClassifier\nsyba = SybaClassifier()\nsyba.fitDefaultScore()\n\nsyba_scores = []\nfor item in canon_smi_ls: \n syba_scores.append(syba.predict(smi=item))\n \nA = np.argsort(syba_scores)\nsmi_arranged = [canon_smi_ls[i] for i in A]\nsmi_arranged = smi_arranged[-20:]\n\nmols_ = [Chem.MolFromSmiles(x) for x in smi_arranged]\n\nimg=Draw.MolsToGridImage(mols_,molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ] ], [ [ "### 3. Generating the chemical subspace of Celecoxib while preserving a specific substructure", "_____no_output_____" ], [ "We will preserve the structure marked in red for Celecoxib: \n\n<img src=\"https://github.com/aspuru-guzik-group/stoned-selfies/blob/main/data/struct_pres.png?raw=True\" width=\"250\" />\n\nWe write a function with RDKit that can detect the highlighted structure ( substructure_preserver ). While performing mutations with SELFIES, we check if the function returns True (has the substructure). Else, the algorithm is asked to retry/perform a different mutation. Have a look at the specific line: \n```\nif len(selfie_mutated_chars) > max_molecules_len or smiles_canon==\"\" or substructure_preserver(mol)==False:\n```", "_____no_output_____" ] ], [ [ "def substructure_preserver(mol):\n \"\"\"\n Check for substructure violates\n Return True: contains a substructure violation\n Return False: No substructure violation\n \"\"\" \n \n if mol.HasSubstructMatch(rdkit.Chem.MolFromSmarts('NS(=O)(=O)c1ccc(-n2cccn2)cc1')) == True:\n return True # The has substructure! \n else: \n return False # Molecule does not have substructure!\n\ndef mutate_selfie(selfie, max_molecules_len, write_fail_cases=False):\n '''Return a mutated selfie string (only one mutation on slefie is performed)\n \n Mutations are done until a valid molecule is obtained \n Rules of mutation: With a 33.3% propbabily, either: \n 1. Add a random SELFIE character in the string\n 2. Replace a random SELFIE character with another\n 3. Delete a random character\n \n Parameters:\n selfie (string) : SELFIE string to be mutated \n max_molecules_len (int) : Mutations of SELFIE string are allowed up to this length\n write_fail_cases (bool) : If true, failed mutations are recorded in \"selfie_failure_cases.txt\"\n \n Returns:\n selfie_mutated (string) : Mutated SELFIE string\n smiles_canon (string) : canonical smile of mutated SELFIE string\n '''\n valid=False\n fail_counter = 0\n chars_selfie = get_selfie_chars(selfie)\n \n while not valid:\n fail_counter += 1\n \n alphabet = list(selfies.get_semantic_robust_alphabet()) # 34 SELFIE characters \n\n choice_ls = [1, 2, 3] # 1=Insert; 2=Replace; 3=Delete\n random_choice = np.random.choice(choice_ls, 1)[0]\n \n # Insert a character in a Random Location\n if random_choice == 1: \n random_index = np.random.randint(len(chars_selfie)+1)\n random_character = np.random.choice(alphabet, size=1)[0]\n \n selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index:]\n\n # Replace a random character \n elif random_choice == 2: \n random_index = np.random.randint(len(chars_selfie))\n random_character = np.random.choice(alphabet, size=1)[0]\n if random_index == 0:\n selfie_mutated_chars = [random_character] + chars_selfie[random_index+1:]\n else:\n selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index+1:]\n \n # Delete a random character\n elif random_choice == 3: \n random_index = np.random.randint(len(chars_selfie))\n if random_index == 0:\n selfie_mutated_chars = chars_selfie[random_index+1:]\n else:\n selfie_mutated_chars = chars_selfie[:random_index] + chars_selfie[random_index+1:]\n \n else: \n raise Exception('Invalid Operation trying to be performed')\n\n selfie_mutated = \"\".join(x for x in selfie_mutated_chars)\n sf = \"\".join(x for x in chars_selfie)\n \n try:\n smiles = decoder(selfie_mutated)\n mol, smiles_canon, done = sanitize_smiles(smiles)\n if len(selfie_mutated_chars) > max_molecules_len or smiles_canon==\"\" or substructure_preserver(mol)==False:\n done = False\n if done:\n valid = True\n else:\n valid = False\n except:\n valid=False\n if fail_counter > 1 and write_fail_cases == True:\n f = open(\"selfie_failure_cases.txt\", \"a+\")\n f.write('Tried to mutate SELFIE: '+str(sf)+' To Obtain: '+str(selfie_mutated) + '\\n')\n f.close()\n \n return (selfie_mutated, smiles_canon)", "_____no_output_____" ], [ "smi = 'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F' # Celecoxib\nfp_type = 'ECFP4'\n\n\ntotal_time = time.time()\n# num_random_samples = 50000 # For a more exhaustive search! \nnum_random_samples = 100 \nnum_mutation_ls = [1, 2, 3, 4, 5]\n\nmol = Chem.MolFromSmiles(smi)\nif mol == None: \n raise Exception('Invalid starting structure encountered')\n\nstart_time = time.time()\nrandomized_smile_orderings = [randomize_smiles(mol) for _ in range(num_random_samples)]\n\n# Convert all the molecules to SELFIES\nselfies_ls = [encoder(x) for x in randomized_smile_orderings]\nprint('Randomized molecules (in SELFIES) time: ', time.time()-start_time)\n\n\nall_smiles_collect = []\nall_smiles_collect_broken = []\n\nstart_time = time.time()\nfor num_mutations in num_mutation_ls: \n # Mutate the SELFIES: \n selfies_mut = get_mutated_SELFIES(selfies_ls.copy(), num_mutations=num_mutations)\n\n # Convert back to SMILES: \n smiles_back = [decoder(x) for x in selfies_mut]\n all_smiles_collect = all_smiles_collect + smiles_back\n all_smiles_collect_broken.append(smiles_back)\n\n\nprint('Mutation obtainment time (back to smiles): ', time.time()-start_time)\n\n\n# Work on: all_smiles_collect\nstart_time = time.time()\ncanon_smi_ls = []\nfor item in all_smiles_collect: \n mol, smi_canon, did_convert = sanitize_smiles(item)\n if mol == None or smi_canon == '' or did_convert == False: \n raise Exception('Invalid smile string found')\n canon_smi_ls.append(smi_canon)\ncanon_smi_ls = list(set(canon_smi_ls))\nprint('Unique mutated structure obtainment time: ', time.time()-start_time)\n\nstart_time = time.time()\ncanon_smi_ls_scores = get_fp_scores(canon_smi_ls, target_smi=smi, fp_type=fp_type)\nprint('Fingerprint calculation time: ', time.time()-start_time)\nprint('Total time: ', time.time()-total_time)\n\n# Molecules with fingerprint similarity > 0.8\nindices_thresh_8 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.8]\nmols_8 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_8]\n\n# Molecules with fingerprint similarity > 0.6\nindices_thresh_6 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.6 and x < 0.8]\nmols_6 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_6]\n\n# Molecules with fingerprint similarity > 0.4\nindices_thresh_4 = [i for i,x in enumerate(canon_smi_ls_scores) if x > 0.4 and x < 0.6]\nmols_4 = [Chem.MolFromSmiles(canon_smi_ls[idx]) for idx in indices_thresh_4]", "Randomized molecules (in SELFIES) time: 0.010784387588500977\nMutation obtainment time (back to smiles): 3.724590301513672\nUnique mutated structure obtainment time: 0.09048938751220703\nFingerprint calculation time: 0.06728267669677734\nTotal time: 3.8942043781280518\n" ], [ "img=Draw.MolsToGridImage(mols_8[:8],molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ], [ "img=Draw.MolsToGridImage(mols_6[:8],molsPerRow=4,subImgSize=(200,200)) \nimg", "_____no_output_____" ], [ "img=Draw.MolsToGridImage(mols_4[:8],molsPerRow=4,subImgSize=(200,200)) \nimg\n", "_____no_output_____" ] ], [ [ "# Chemical Path Formation: \n## Imports for Chemical Path Formation", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np \nimport random\nfrom random import randrange\nimport matplotlib.pyplot as plt\nimport rdkit\nfrom rdkit.Chem import MolFromSmiles as smi2mol\nfrom rdkit.Chem import MolToSmiles as mol2smi\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs.cDataStructs import TanimotoSimilarity\nfrom selfies import encoder, decoder \n\n\nimport seaborn as sns\nimport selfies\nimport random\nimport numpy as np\nfrom selfies import encoder, decoder\n\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs.cDataStructs import TanimotoSimilarity\n\nfrom rdkit import RDLogger\nRDLogger.DisableLog('rdApp.*')\n\ndef get_ECFP4(mol):\n ''' Return rdkit ECFP4 fingerprint object for mol\n\n Parameters: \n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object \n\n Returns: \n rdkit ECFP4 fingerprint object for mol\n '''\n return AllChem.GetMorganFingerprint(mol, 2)\n\ndef sanitize_smiles(smi):\n '''Return a canonical smile representation of smi\n \n Parameters:\n smi (string) : smile string to be canonicalized \n \n Returns:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)\n conversion_successful (bool): True/False to indicate if conversion was successful \n '''\n try:\n mol = smi2mol(smi, sanitize=True)\n smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)\n return (mol, smi_canon, True)\n except:\n return (None, None, False)\n\ndef get_fp_scores(smiles_back, target_smi): \n '''Calculate the Tanimoto fingerprint (ECFP4 fingerint) similarity between a list \n of SMILES and a known target structure (target_smi). \n \n Parameters:\n smiles_back (list) : A list of valid SMILES strings \n target_smi (string) : A valid SMILES string. Each smile in 'smiles_back' will be compared to this stucture\n \n Returns: \n smiles_back_scores (list of floats) : List of fingerprint similarities\n '''\n smiles_back_scores = []\n target = Chem.MolFromSmiles(target_smi)\n fp_target = get_ECFP4(target)\n for item in smiles_back: \n mol = Chem.MolFromSmiles(item)\n fp_mol = get_ECFP4(mol)\n score = TanimotoSimilarity(fp_mol, fp_target)\n smiles_back_scores.append(score)\n return smiles_back_scores\n\n\ndef get_selfie_chars(selfie):\n '''Obtain a list of all selfie characters in string selfie\n \n Parameters: \n selfie (string) : A selfie string - representing a molecule \n \n Example: \n >>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')\n ['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']\n \n Returns:\n chars_selfie: list of selfie characters present in molecule selfie\n '''\n chars_selfie = [] # A list of all SELFIE sybols from string selfie\n while selfie != '':\n chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])\n selfie = selfie[selfie.find(']')+1:]\n return chars_selfie\n\n\ndef randomize_smiles(mol):\n '''Returns a random (dearomatized) SMILES given an rdkit mol object of a molecule.\n\n Parameters:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n \n Returns:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)\n '''\n if not mol:\n return None\n\n Chem.Kekulize(mol)\n \n return rdkit.Chem.MolToSmiles(mol, canonical=False, doRandom=True, isomericSmiles=False, kekuleSmiles=True)\n\n\ndef get_random_smiles(smi, num_random_samples): \n ''' Obtain 'num_random_samples' non-unique SMILES orderings of smi\n \n Parameters:\n smi (string) : Input SMILES string (needs to be a valid molecule)\n num_random_samples (int): Number fo unique different SMILES orderings to form \n \n Returns:\n randomized_smile_orderings (list) : list of SMILES strings\n '''\n mol = Chem.MolFromSmiles(smi)\n if mol == None: \n raise Exception('Invalid starting structure encountered')\n randomized_smile_orderings = [randomize_smiles(mol) for _ in range(num_random_samples)]\n randomized_smile_orderings = list(set(randomized_smile_orderings)) # Only consider unique SMILE strings\n return randomized_smile_orderings\n\n\ndef obtain_path(starting_smile, target_smile, filter_path=False): \n ''' Obtain a path/chemical path from starting_smile to target_smile\n \n Parameters:\n starting_smile (string) : SMILES string (needs to be a valid molecule)\n target_smile (int) : SMILES string (needs to be a valid molecule)\n filter_path (bool) : If True, a chemical path is returned, else only a path\n \n Returns:\n path_smiles (list) : A list of smiles in path between starting_smile & target_smile\n path_fp_scores (list of floats) : Fingerprint similarity to 'target_smile' for each smiles in path_smiles\n smiles_path (list) : A list of smiles in CHEMICAL path between starting_smile & target_smile (if filter_path==False, then empty)\n filtered_path_score (list of floats): Fingerprint similarity to 'target_smile' for each smiles in smiles_path (if filter_path==False, then empty)\n '''\n starting_selfie = encoder(starting_smile)\n target_selfie = encoder(target_smile)\n \n starting_selfie_chars = get_selfie_chars(starting_selfie)\n target_selfie_chars = get_selfie_chars(target_selfie)\n \n # Pad the smaller string\n if len(starting_selfie_chars) < len(target_selfie_chars): \n for _ in range(len(target_selfie_chars)-len(starting_selfie_chars)):\n starting_selfie_chars.append(' ')\n else: \n for _ in range(len(starting_selfie_chars)-len(target_selfie_chars)):\n target_selfie_chars.append(' ')\n \n indices_diff = [i for i in range(len(starting_selfie_chars)) if starting_selfie_chars[i] != target_selfie_chars[i]]\n path = {}\n path[0] = starting_selfie_chars\n \n for iter_ in range(len(indices_diff)): \n idx = np.random.choice(indices_diff, 1)[0] # Index to be operated on\n indices_diff.remove(idx) # Remove that index\n \n # Select the last member of path: \n path_member = path[iter_].copy()\n \n # Mutate that character to the correct value: \n path_member[idx] = target_selfie_chars[idx]\n path[iter_+1] = path_member.copy()\n \n # Collapse path to make them into SELFIE strings\n paths_selfies = []\n for i in range(len(path)):\n selfie_str = ''.join(x for x in path[i])\n paths_selfies.append(selfie_str.replace(' ', ''))\n \n if paths_selfies[-1] != target_selfie: \n raise Exception(\"Unable to discover target structure!\")\n \n # Obtain similarity scores, and only choose the increasing members: \n path_smiles = [decoder(x) for x in paths_selfies]\n path_fp_scores = []\n filtered_path_score = []\n smiles_path = []\n \n if filter_path: \n path_fp_scores = get_fp_scores(path_smiles, target_smile)\n \n filtered_path_score = []\n smiles_path = []\n for i in range(1, len(path_fp_scores)-1): \n if i == 1: \n filtered_path_score.append(path_fp_scores[1])\n smiles_path.append(path_smiles[i])\n continue\n if filtered_path_score[-1] < path_fp_scores[i]:\n filtered_path_score.append(path_fp_scores[i])\n smiles_path.append(path_smiles[i])\n\n return path_smiles, path_fp_scores, smiles_path, filtered_path_score\n\n\ndef get_compr_paths(starting_smile, target_smile, num_tries, num_random_samples, collect_bidirectional):\n ''' Obtaining multiple paths/chemical paths from starting_smile to target_smile. \n \n Parameters:\n starting_smile (string) : SMILES string (needs to be a valid molecule)\n target_smile (int) : SMILES string (needs to be a valid molecule)\n num_tries (int) : Number of path/chemical path attempts between the exact same smiles\n num_random_samples (int) : Number of different SMILES string orderings to conside for starting_smile & target_smile \n collect_bidirectional (bool): If true, forms paths from target_smiles-> target_smiles (doubles number of paths)\n \n Returns:\n smiles_paths_dir1 (list): list paths containing smiles in path between starting_smile -> target_smile\n smiles_paths_dir2 (list): list paths containing smiles in path between target_smile -> starting_smile\n '''\n starting_smile_rand_ord = get_random_smiles(starting_smile, num_random_samples=num_random_samples)\n target_smile_rand_ord = get_random_smiles(target_smile, num_random_samples=num_random_samples)\n \n smiles_paths_dir1 = [] # All paths from starting_smile -> target_smile\n for smi_start in starting_smile_rand_ord: \n for smi_target in target_smile_rand_ord: \n \n if Chem.MolFromSmiles(smi_start) == None or Chem.MolFromSmiles(smi_target) == None: \n raise Exception('Invalid structures')\n \n for _ in range(num_tries): \n path, _, _, _ = obtain_path(smi_start, smi_target, filter_path=True)\n smiles_paths_dir1.append(path)\n \n smiles_paths_dir2 = [] # All paths from starting_smile -> target_smile\n if collect_bidirectional == True: \n starting_smile_rand_ord = get_random_smiles(target_smile, num_random_samples=num_random_samples)\n target_smile_rand_ord = get_random_smiles(starting_smile, num_random_samples=num_random_samples)\n \n for smi_start in starting_smile_rand_ord: \n for smi_target in target_smile_rand_ord: \n \n if Chem.MolFromSmiles(smi_start) == None or Chem.MolFromSmiles(smi_target) == None: \n raise Exception('Invalid structures')\n \n for _ in range(num_tries): \n path, _, _, _ = obtain_path(smi_start, smi_target, filter_path=True)\n smiles_paths_dir2.append(path)\n \n return smiles_paths_dir1, smiles_paths_dir2\n\n\n", "_____no_output_____" ] ], [ [ "## Analyzing QED & LogP values for a Chemical Path between Tadalafil & Sildenafil: \nThe get_compr_paths() function generates multiple chemical paths between two input SMILES. ", "_____no_output_____" ] ], [ [ "def get_ECFP4(mol):\n return AllChem.GetMorganFingerprint(mol, 2)\n\ndef get_fp_scores(smiles_back, target_smi): \n smiles_back_scores = []\n target = Chem.MolFromSmiles(target_smi)\n fp_target = get_ECFP4(target)\n for item in smiles_back: \n mol = Chem.MolFromSmiles(item)\n fp_mol = get_ECFP4(mol)\n score = TanimotoSimilarity(fp_mol, fp_target)\n smiles_back_scores.append(score)\n return smiles_back_scores\n\n\ndef get_logP(mol):\n '''Calculate logP of a molecule \n \n Parameters:\n mol (rdkit.Chem.rdchem.Mol) : RdKit mol object, for which logP is to calculates\n \n Returns:\n float : logP of molecule (mol)\n '''\n return Descriptors.MolLogP(mol)\n\ndef get_selfie_chars(selfie):\n '''Obtain a list of all selfie characters in string selfie\n \n Parameters: \n selfie (string) : A selfie string - representing a molecule \n \n Example: \n >>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')\n ['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']\n \n Returns:\n chars_selfie: list of selfie characters present in molecule selfie\n '''\n chars_selfie = [] # A list of all SELFIE sybols from string selfie\n while selfie != '':\n chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])\n selfie = selfie[selfie.find(']')+1:]\n return chars_selfie\n\n\n\n\nstarting_smile = 'CN1CC(=O)N2C(C1=O)CC3=C(C2C4=CC5=C(C=C4)OCO5)NC6=CC=CC=C36' # Tadalafil\ntarget_smile = 'CCCC1=NN(C2=C1N=C(NC2=O)C3=C(C=CC(=C3)S(=O)(=O)N4CCN(CC4)C)OCC)C' # Sildenafil \n\nmol_starting = Chem.MolFromSmiles(starting_smile)\nmol_target = Chem.MolFromSmiles(target_smile)\n\nqed_starting = Chem.QED.qed(mol_starting)\nqed_target = Chem.QED.qed(mol_target)\n\nlogP_starting = get_logP(mol_starting)\nlogP_target = get_logP(mol_target)\n\n\nscores_start_1 = get_fp_scores([starting_smile], starting_smile) # similarity to target\nscores_target_1 = get_fp_scores([starting_smile], target_smile) # similarity to starting structure\ndata = np.array([scores_target_1, scores_start_1])\navg_score_1 = np.average(data, axis=0)\nbetter_score_1 = avg_score_1 - (np.abs(data[0] - data[1])) \nbetter_score_1 = ((1/9) * better_score_1**3) - ((7/9) * better_score_1**2) + ((19/12) * better_score_1)\n\n\nscores_start_2 = get_fp_scores([target_smile], starting_smile) # similarity to target\nscores_target_2 = get_fp_scores([target_smile], target_smile) # similarity to starting structure\ndata = np.array([scores_target_2, scores_start_2])\navg_score_2 = np.average(data, axis=0)\nbetter_score_2 = avg_score_2 - (np.abs(data[0] - data[1])) \nbetter_score_2 = ((1/9) * better_score_2**3) - ((7/9) * better_score_2**2) + ((19/12) * better_score_2)\n\n\nprint('Starting logP:{} QED:{}'.format(logP_starting, qed_starting))\nprint('Target logP:{} QED:{}'.format(logP_target, qed_target))\n\n \nnum_tries = 2 \nnum_random_samples = 2 \ncollect_bidirectional = True # Doubles the number of paths: source->target & target->source\n\nprint('Initiating path collection')\nsmiles_paths_dir1, smiles_paths_dir2 = get_compr_paths(starting_smile, target_smile, num_tries, num_random_samples, collect_bidirectional)\nprint('Path collection complete')\n\n\n# Find the median molecule & plot: \nall_smiles_dir_1 = [item for sublist in smiles_paths_dir1 for item in sublist] # all the smile string of dir1\nall_smiles_dir_2 = [item for sublist in smiles_paths_dir2 for item in sublist] # all the smile string of dir2\n\nall_smiles = all_smiles_dir_1 + all_smiles_dir_2\nlogP_path = [get_logP(Chem.MolFromSmiles(x)) for x in all_smiles]\nQED_path = [Chem.QED.qed(Chem.MolFromSmiles(x)) for x in all_smiles]\n \nscores_start = get_fp_scores(all_smiles, starting_smile) # similarity to target\nscores_target = get_fp_scores(all_smiles, target_smile) # similarity to starting structure\ndata = np.array([scores_target, scores_start])\navg_score = np.average(data, axis=0)\nbetter_score = avg_score - (np.abs(data[0] - data[1])) \nbetter_score = ((1/9) * better_score**3) - ((7/9) * better_score**2) + ((19/12) * better_score)\n\n\n\n\n# Filter based on better score: \napply_score_threshold = False \nif apply_score_threshold: \n indices_threshold = []\n for i in range(len(better_score)): \n if better_score[i] >= -20: # 0.2 = if required, Threshold! \n indices_threshold.append(i)\n \n all_smiles = [all_smiles[i] for i in indices_threshold]\n logP_path = [get_logP(Chem.MolFromSmiles(x)) for x in all_smiles]\n QED_path = [Chem.QED.qed(Chem.MolFromSmiles(x)) for x in all_smiles]\n\n scores_start = get_fp_scores(all_smiles, starting_smile) # similarity to target\n scores_target = get_fp_scores(all_smiles, target_smile) # similarity to starting structure\n data = np.array([scores_target, scores_start])\n avg_score = np.average(data, axis=0)\n better_score = avg_score - (np.abs(data[0] - data[1])) \n better_score = ((1/9) * better_score**3) - ((7/9) * better_score**2) + ((19/12) * better_score)\n\n\n\nprint('Min {} Max {}'.format(min(better_score), max(better_score)))\n# raise Exception('get vmax value')\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\ncm = plt.cm.get_cmap('viridis')\nsc = ax.scatter(logP_path, QED_path, c=better_score.tolist(), cmap=cm, s=13) \nclb = plt.colorbar(sc)\n\nsc = ax.plot([logP_starting, logP_target], [qed_starting, qed_target], 'o', c='black', markersize=7, linewidth=3) # TARGETS \n\nclb.set_label('Joint Similarity', fontsize=10)\nax.set_xlabel('LogP', fontsize=10)\nax.set_ylabel('QED', fontsize=10)\nplt.xlim([-4, 8])\n\nax.grid(True)\nfig.tight_layout()\n\nplt.show()\n\nalphabet = list(selfies.get_semantic_robust_alphabet()) # 34 SELFIE characters \nmax_len_random_struct = max([len(get_selfie_chars(encoder(starting_smile))), len(get_selfie_chars(encoder(target_smile)))])\nmin_len_random_struct = min([len(get_selfie_chars(encoder(starting_smile))), len(get_selfie_chars(encoder(target_smile)))])\nnum_samples = len(logP_path)\nrandom_selfies = []\n\nfor _ in range(num_samples): \n selfie = ''\n\n for i in range(random.randint(min_len_random_struct, max_len_random_struct)): # max_molecules_len = max random selfie string length \n selfie = selfie + np.random.choice(alphabet, size=1)[0]\n random_selfies.append(selfie)\n \nrandom_smiles = [decoder(x) for x in random_selfies]\nscores_start_rnd = get_fp_scores(random_smiles, starting_smile) # similarity to target\nscores_target_rnd = get_fp_scores(random_smiles, target_smile) # similarity to starting structure\ndata_rnd = np.array([scores_target_rnd, scores_start_rnd])\navg_score_rnd = np.average(data_rnd, axis=0)\nbetter_score_random = avg_score_rnd - (np.abs(data_rnd[0] - data_rnd[1])) \n\nbetter_score_random = ((1/9) * better_score_random**3) - ((7/9) * better_score_random**2) + ((19/12) * better_score_random)\n\nlogP_path_random = [get_logP(Chem.MolFromSmiles(x)) for x in random_smiles]\nQED_path_random = [Chem.QED.qed(Chem.MolFromSmiles(x)) for x in random_smiles]\n\n# DISTRIBUTION PLOTS! \nA = sns.kdeplot(logP_path_random, bw_method=0.2, label=\"Random SELFIES\")\nA = sns.kdeplot(logP_path, bw_method=0.2, label=\"Chemical path\", color='yellowgreen')\n\nplt.axvline(logP_starting, 0, 1.0, c='black') # vertical line\nplt.axvline(logP_target, 0, 1.0, c='black') # vertical line\n\n\nA.set_xlabel('LogP', fontsize=10)\nA.set_ylabel('Density', fontsize=10)\nplt.xlim([-4, 8])\nplt.legend()\n# plt.savefig('./final_saved/logP_distrb.svg', dpi=500)\nplt.show()\n\n\nB = sns.kdeplot(QED_path_random, bw=.2, label=\"Random SELFIES\")\nB = sns.kdeplot(QED_path, bw=.2, label=\"Chemical path\", color='yellowgreen')\nplt.axvline(qed_starting, 0, 1.0, c='black') # vertical line\nplt.axvline(qed_target, 0, 1.0, c='black') # vertical line\nB.set_xlabel('QED', fontsize=10)\nB.set_ylabel('Density', fontsize=10)\nplt.xlim([0, 1])\nplt.legend()\nplt.show()", "Starting logP:2.2113000000000005 QED:0.6925587527204076\nTarget logP:1.6109 QED:0.5534046105187581\nInitiating path collection\nPath collection complete\nMin -0.42633599999999994 Max 0.22690870640151978\n" ] ], [ [ "# Median Molecule Formation: \n", "_____no_output_____" ], [ "### Imports for Obtaining median molecules between two input SMILES", "_____no_output_____" ] ], [ [ "def get_ECFP4(mol):\n return AllChem.GetMorganFingerprint(mol, 2)\n\ndef get_fp_scores(smiles_back, target_smi): \n smiles_back_scores = []\n target = Chem.MolFromSmiles(target_smi)\n fp_target = get_ECFP4(target)\n for item in smiles_back: \n mol = Chem.MolFromSmiles(item)\n fp_mol = get_ECFP4(mol)\n score = TanimotoSimilarity(fp_mol, fp_target)\n smiles_back_scores.append(score)\n return smiles_back_scores\n\n\n\ndef sanitize_smiles(smi):\n try:\n mol = smi2mol(smi, sanitize=True)\n smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)\n return (mol, smi_canon, True)\n except:\n return (None, None, False)\n \n\ndef get_median_mols(starting_smile, target_smile, num_tries, num_random_samples, collect_bidirectional, num_top_iter): \n \n smiles_paths_dir1, smiles_paths_dir2 = get_compr_paths(starting_smile, target_smile, num_tries, num_random_samples, collect_bidirectional)\n \n # Find the median molecule & plot: \n all_smiles_dir_1 = [item for sublist in smiles_paths_dir1 for item in sublist] # all the smile string of dir1\n all_smiles_dir_2 = [item for sublist in smiles_paths_dir2 for item in sublist] # all the smile string of dir2\n \n all_smiles = [] # Collection of valid smile strings \n for smi in all_smiles_dir_1 + all_smiles_dir_2: \n if Chem.MolFromSmiles(smi) != None: \n mol, smi_canon, _ = sanitize_smiles(smi)\n all_smiles.append(smi_canon)\n\n all_smiles = list(set(all_smiles))\n\n scores_start = get_fp_scores(all_smiles, starting_smile) # similarity to target\n scores_target = get_fp_scores(all_smiles, target_smile) # similarity to starting structure\n data = np.array([scores_target, scores_start])\n avg_score = np.average(data, axis=0)\n better_score = avg_score - (np.abs(data[0] - data[1])) \n better_score = ((1/9) * better_score**3) - ((7/9) * better_score**2) + ((19/12) * better_score)\n \n best_idx = better_score.argsort()[-num_top_iter:][::-1]\n best_smi = [all_smiles[i] for i in best_idx]\n best_scores = [better_score[i] for i in best_idx]\n\n return best_smi, best_scores ", "_____no_output_____" ] ], [ [ "## Obtain the best median molecules (i.e. possessing high joint similarity) for Tadalafil & Sildenafil", "_____no_output_____" ] ], [ [ "num_tries = 6\nnum_random_samples = 6\ncollect_bidirectional = True # Doubles the number of paths: source->target & target->source\napply_filter = False\nnum_top_iter = 12 # Number of molecules that are selected after each iteration\n\nsmi_1 = 'CN1CC(=O)N2C(C1=O)CC3=C(C2C4=CC5=C(C=C4)OCO5)NC6=CC=CC=C36' # Tadalafil\nsmi_2 = 'CCCC1=NN(C2=C1N=C(NC2=O)C3=C(C=CC(=C3)S(=O)(=O)N4CCN(CC4)C)OCC)C' # Sildenafil \n\n# SMILES, joint-sim score\nsmiles_, best_scores = get_median_mols(smi_1, smi_2, num_tries, num_random_samples, collect_bidirectional, num_top_iter)\n", "_____no_output_____" ], [ "mol_ = [Chem.MolFromSmiles(x) for x in smiles_]\n\nimg=Draw.MolsToGridImage(mol_[:40],molsPerRow=4,subImgSize=(200,200)) \nimg\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4af5edd15dbeaaf25f31224b10481375706f9c16
7,536
ipynb
Jupyter Notebook
notebooks/tensor01/03. EagerExecution.ipynb
mmercan/study_python
1170116d57aecf331b4d8d5dd78ac1c6abe5003a
[ "MIT" ]
null
null
null
notebooks/tensor01/03. EagerExecution.ipynb
mmercan/study_python
1170116d57aecf331b4d8d5dd78ac1c6abe5003a
[ "MIT" ]
null
null
null
notebooks/tensor01/03. EagerExecution.ipynb
mmercan/study_python
1170116d57aecf331b4d8d5dd78ac1c6abe5003a
[ "MIT" ]
null
null
null
18.607407
101
0.436438
[ [ [ "import tensorflow as tf", "_____no_output_____" ], [ "tf.executing_eagerly()", "_____no_output_____" ], [ "x = [[10.]]\n\nres = tf.matmul(x, x)\n\nres", "_____no_output_____" ], [ "a = tf.constant([[10, 20],\n [30, 40]])\n\na", "_____no_output_____" ], [ "b = tf.add(a, 2)\n\nprint(b)", "tf.Tensor(\n[[12 22]\n [32 42]], shape=(2, 2), dtype=int32)\n" ], [ "print(a * b)", "tf.Tensor(\n[[200 231]\n [264 299]], shape=(2, 2), dtype=int32)\n" ], [ "m = tf.Variable([4.0, 5.0, 6.0], tf.float32, name='m')\n\nc = tf.Variable([1.0, 1.0, 1.0], tf.float32, name='c')", "_____no_output_____" ], [ "m", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "x = tf.Variable([100.0, 100.0, 100.0], tf.float32, name='x')\n\nx", "_____no_output_____" ], [ "y = m * x + c\n\ny", "_____no_output_____" ] ], [ [ "### Dynamic Control Flow", "_____no_output_____" ] ], [ [ "def tensorflow(max_num):\n \n counter = tf.constant(0)\n max_num = tf.constant(max_num)\n \n for num in range(0, max_num.numpy() + 1):\n num = tf.constant(num)\n \n if int(num % 3) == 0 and int(num % 5) == 0:\n print('Divisible by 3 and 5: ', num.numpy())\n \n elif int(num % 3) == 0:\n print('Divisible by 3: ', num.numpy())\n \n elif int(num % 5) == 0:\n print('Divisible by 5: ', num.numpy())\n \n else:\n print(num.numpy())\n \n counter += 1", "_____no_output_____" ], [ "tensorflow(15)", "Divisible by 3 and 5: 0\n1\n2\nDivisible by 3: 3\n4\nDivisible by 5: 5\nDivisible by 3: 6\n7\n8\nDivisible by 3: 9\nDivisible by 5: 10\n11\nDivisible by 3: 12\n13\n14\nDivisible by 3 and 5: 15\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4af5fbefae363fdc406a445558e65e148252c2c9
41,602
ipynb
Jupyter Notebook
module_3/Module_suppl_notebooks/PYTHON_11_Feature_engineering.ipynb
EugeniaVoytik/Skillfactory_Data_Science_Degree
3d0cf5b2a02693af8d6c2cbffb7636ebd7299d50
[ "Apache-2.0" ]
null
null
null
module_3/Module_suppl_notebooks/PYTHON_11_Feature_engineering.ipynb
EugeniaVoytik/Skillfactory_Data_Science_Degree
3d0cf5b2a02693af8d6c2cbffb7636ebd7299d50
[ "Apache-2.0" ]
null
null
null
module_3/Module_suppl_notebooks/PYTHON_11_Feature_engineering.ipynb
EugeniaVoytik/Skillfactory_Data_Science_Degree
3d0cf5b2a02693af8d6c2cbffb7636ebd7299d50
[ "Apache-2.0" ]
null
null
null
27.902079
4,984
0.492428
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "log = pd.read_csv('../data/log.xls', header=None, names=['user_id', 'time', 'bet', 'win'])\nlog.time =log.time.str.replace('[', '')\nlog.time = pd.to_datetime(log.time)\nlog.head()", "_____no_output_____" ], [ "users = pd.read_csv('../data/users.xls', encoding=\"koi8-r\", sep='\\t', names=['user_id', 'email', 'geo'])\nusers.head()", "_____no_output_____" ], [ "sum(log.time.isna())", "_____no_output_____" ], [ "log_backup = log.copy()\nlog_backup.dropna(axis=1).shape", "_____no_output_____" ], [ "log_backup = log.copy()\nlog_backup.dropna(axis=0).shape", "_____no_output_____" ], [ "log_backup = log.copy()\nfor col in ['user_id', 'time']:\n if sum(log_backup[col].isna()) > 0:\n log_backup.drop(col, axis=1, inplace=True)\nlog_backup.shape", "_____no_output_____" ], [ "log_backup = log.copy()\nlog_backup.drop_duplicates(subset=['user_id', 'time']).shape", "_____no_output_____" ], [ "log.time.max().hour", "_____no_output_____" ], [ "# log = pd.read_csv('../data/log.xls', header=None, names=['user_id', 'time', 'bet', 'win'])\n# log = log.dropna() \n# log.columns = ['user_id', 'time', 'bet', 'win'] \n# log['time'] = log['time'].apply(lambda x: x[1:]) \n# log['time'] = pd.to_datetime(log['time']) \n# log['time'] = log['time'].apply(lambda x: x.minute)\n# log.time.head()", "_____no_output_____" ], [ "log['time'].dt.minute.value_counts().index[0]", "_____no_output_____" ], [ "log['time'].dt.month.value_counts(ascending=True).index[0]", "_____no_output_____" ], [ "log.time.dt.dayofweek.value_counts()[[5, 6]].sum()", "_____no_output_____" ], [ "def time_to_daytime(value):\n if 0 <= value < 6:\n return 'night'\n elif 6 <= value < 12:\n return 'morning'\n elif 12 <= value < 18:\n return 'afternoon'\n elif 18 <= value < 24:\n return 'evening'", "_____no_output_____" ], [ "log_backup = log.copy()\nlog_backup.dropna(axis=0, subset=['time'], inplace=True)\nlog_backup['time_of_day'] = log_backup.time.dt.hour.apply(time_to_daytime)\nlog_backup.time_of_day.value_counts().index[-1]", "_____no_output_____" ], [ "log.bet.fillna(0).value_counts()[0]", "_____no_output_____" ], [ "log.bet[0]", "_____no_output_____" ], [ "import math\n\ndef fill_win(win, bet):\n if not math.isnan(win):\n return win\n else:\n if math.isnan(bet):\n return 0\n else:\n return -bet", "_____no_output_____" ], [ "log['win'] = log.apply(lambda row: fill_win(row.win, row.bet), axis=1)", "_____no_output_____" ], [ "log[log.win < 0].shape[0]", "_____no_output_____" ], [ "def calculate_sum_win(win, bet):\n if win < 0:\n return win\n else:\n return win - bet\n \nlog['net'] = log.apply(lambda row: calculate_sum_win(row.win, row.bet), axis=1)", "_____no_output_____" ], [ "round(log[log.win > 0].net.mean(), 0)", "_____no_output_____" ], [ "round(log[log.win > 0].net.median(), 0)", "_____no_output_____" ], [ "net_above_0 = log[log.win > 0]\nnet_above_0.net.plot('box')", "_____no_output_____" ], [ "log.bet.mean(skipna=True)", "_____no_output_____" ], [ "log['bet'].dropna().mean()", "_____no_output_____" ], [ "log.bet.sum() / log.bet.dropna().shape[0]", "_____no_output_____" ], [ "np.mean(log.bet)", "_____no_output_____" ], [ "log.bet.mean()", "_____no_output_____" ], [ "log_backup = log.copy()\nlog_backup[['win', 'bet']] = log_backup[['win', 'bet']].fillna(0)\nlog_backup['net'] = log_backup.win - log_backup.bet", "_____no_output_____" ], [ "log_backup[log_backup.bet > 0].shape[0] * 100 / len(log_backup)", "_____no_output_____" ], [ "log_backup[log_backup.bet > 0].net.mean()", "_____no_output_____" ], [ "log_backup[log_backup.net < 0].net.mean()", "_____no_output_____" ], [ "print(f\"% of loses = {log_backup[log_backup.net < 0].shape[0] * 100 / len(log_backup)}\")\nprint(f\"% of wins = {log_backup[log_backup.net > 0].shape[0] * 100 / len(log_backup)}\")", "% of loses = 34.7\n% of wins = 13.8\n" ], [ "log_backup = pd.read_csv('log.csv', header=None, names=['user_id', 'time', 'bet', 'win'])\nmin_bet = log_backup.bet.min()\nmin_bet_amount = log_backup[log_backup.bet == min_bet].shape[0]", "_____no_output_____" ], [ "log = pd.read_csv('../data/log.xls', header=None, names=['user_id', 'time', 'bet', 'win'])\nlog.time =log.time.str.replace('[', '')\nlog.time = pd.to_datetime(log.time)\nlog.head()", "_____no_output_____" ], [ "users = pd.read_csv('../data/users.xls', encoding=\"koi8-r\", sep='\\t', names=['user_id', 'email', 'geo'])\nusers.head()", "_____no_output_____" ], [ "# Приведем признак user_id к одному формату в обоих датасетах \nusers.user_id = users.user_id.apply(lambda x: x.lower()) \n# Избавимся от ошибок в user_id \nlog = log[log.user_id != '#error'] \nlog.user_id = log.user_id.str.split(' - ').apply(lambda x: x[1])", "_____no_output_____" ], [ "df = pd.merge(users, log, on='user_id')", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.groupby('user_id').win.median().median() ", "_____no_output_____" ], [ "df[['win', 'bet']] = df[['win', 'bet']].fillna(0)\ndf['net'] = df.apply(lambda row: calculate_sum_win(row.win, row.bet), axis=1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.groupby('user_id').net.sum().median()", "_____no_output_____" ], [ "no_bets_per_person = []\nfor user in df.user_id.unique():\n data = df[df.user_id == user]\n no_bets = len(data[data.bet == 0])\n if no_bets < len(data):\n no_bets_per_person.append(no_bets)\nnp.mean(no_bets_per_person)", "_____no_output_____" ], [ "# another solution\n# после преобразований объединили данные из двух файлов\nusers_log = pd.merge(log, users, on ='user_id')\n# создаем два датафрейма для подсчета случаев нулевой ставки\ngroup = users_log[users_log.bet==0].groupby('user_id').bet.count() \ngroup_not_null = users_log[users_log.bet>0].groupby('user_id').bet.count()\n# и объединяем по user_id\njoined=pd.merge(group, group_not_null, on=['user_id'])\n# Оставим только те строки, в которых у посетителя была хотя бы одна ставка\njoined = joined[joined['bet_y']>0]\n# И посчитаем среднее количество приходов без ставки\njoined['bet_x'].sum()/len(joined)", "_____no_output_____" ], [ "group = df[df.bet==0].groupby('user_id').bet.count() \ngroup_not_null = df[df.bet>0].groupby('user_id').bet.count()\njoined=pd.merge(group, group_not_null, on=['user_id'])", "_____no_output_____" ], [ "joined.index", "_____no_output_____" ], [ "time = []\ndf.sort_values('user_id', inplace=True)\nfor user in df.user_id.unique():\n data = df[df.user_id == user]\n no_bets = len(data[data.bet == 0])\n if no_bets < len(data):\n min_time_zero_bets = data[data.bet == 0].time.min()\n min_time_norm_bets = data[data.bet != 0].time.min()\n if min_time_norm_bets > min_time_zero_bets:\n time.append(min_time_norm_bets - min_time_zero_bets)\n else: \n time.append(pd.Timedelta(0))\nnp.mean(time)", "_____no_output_____" ], [ "mean_bets_per_city = df[df.bet > 0].groupby('geo').bet.mean()\nmean_bets_per_city.max() / mean_bets_per_city.min()", "_____no_output_____" ], [ "# log = pd.read_csv(\"../data/log.xls\", header=None, names=['user_id','time','bet','win'])\n# users = pd.read_csv(\"../data/users.xls\", encoding='KOI8-R', sep='\\t', names=['user_id','email','geo'])\n\n# users.user_id = users.user_id.apply(lambda x: x.lower()) \n# log = log[log.user_id != '#error'] \n# log.user_id = log.user_id.str.split(' - ').apply(lambda x: x[1]) \n\n# df = pd.merge(log, users, on ='user_id')\n\n# sample2 = df.groupby('geo').user_id.count()", "_____no_output_____" ], [ "df['time'].dt.minute.value_counts()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af5ff850670c5e5f8176f44586b70f9956eaf62
235,934
ipynb
Jupyter Notebook
notebooks/2-Conflations.ipynb
voytekresearch/BandRatios
ce06cdd3066c45730bff7f48e82835bd4c0e0fdc
[ "MIT" ]
6
2021-03-29T20:58:32.000Z
2022-03-20T10:38:35.000Z
notebooks/2-Conflations.ipynb
voytekresearch/BandRatios
ce06cdd3066c45730bff7f48e82835bd4c0e0fdc
[ "MIT" ]
1
2020-01-11T05:30:38.000Z
2020-01-11T05:30:39.000Z
notebooks/2-Conflations.ipynb
voytekresearch/BandRatios
ce06cdd3066c45730bff7f48e82835bd4c0e0fdc
[ "MIT" ]
7
2020-03-18T13:22:38.000Z
2022-02-06T12:05:11.000Z
369.223787
103,968
0.928874
[ [ [ "# Band Ratios Conflations\n\nThis notebook steps through how band ratio measures are underdetermined. \n\nBy 'underdetermined', we mean that the same value, or same change in value between measures, can arise from different underlying causes. \n\nThis shows that band ratios are a non-specific measure.\n\nAs an example case, we use the theta-beta ratio.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom fooof import FOOOF\nfrom fooof.sim import gen_power_spectrum\nfrom fooof.plts.spectra import (plot_spectrum, plot_spectra,\n plot_spectrum_shading, plot_spectra_shading)", "_____no_output_____" ], [ "# Import custom project code\nimport sys\nsys.path.append('../bratios')\nfrom ratios import calc_band_ratio\nfrom paths import FIGS_PATHS as fp", "_____no_output_____" ], [ "# Settings\nSAVE_FIG = False\nPLOT_TITLES = True # Whether to plot titles on each axis", "_____no_output_____" ], [ "# Plot settings\nshade_color = '#0365C0'", "_____no_output_____" ], [ "# Band Settings\ntheta_band = [4, 8]\nbeta_band = [20, 30]\n\n# Set up index helpers\ncf_ind = 0\npw_ind = 1\nbw_ind = 2", "_____no_output_____" ], [ "# Simulated power spectra settings\nfreq_range = [1, 35]\nfreq_res = 0.1\nnlv = 0\n\n# Define default aperiodic values\nap_def = [0, 1]\n\n# Define default periodic values\ntheta_def = [6, 0.4, 1]\nalpha_def = [10, 0.5, 0.75]\nbeta_def = [25, 0.3, 1.5]", "_____no_output_____" ] ], [ [ "## Comparing Band Ratio Values\n\nFirst, let's consider a hypothetical investigation comparing band ratio measures between two groups.\n\nThe typical interpretation of finding a difference between measured band ratios would be that there is a difference in the relative powers of the oscillation bands used in the calculation of the band ratio. That is to say, the change in ratio could come from a change in two things (the power of the low band, and/or the power of the high band). \n\nHere, we will show that there are actually many more ways in which one could measure this difference.\n\nA numerically identically change in theta / beta ratio can be obtained from:\n\n#### Periodic Changes\n- a change in theta power\n- a change in theta bandwidth\n- a change in beta center frequency\n- a change in beta power\n- a change in beta bandwidth\n\n#### Aperiodic Changes\n- a change in aperiodic exponent\n - with or without oscillations present\n \nNote that the specific values in the simulations below have been tuned to create numerically identical changes in measured band ratio.", "_____no_output_____" ] ], [ [ "# Create a baseline PSD, with oscillations, to compare to\nfreqs, ps_base = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_def, beta_def],\n nlv, freq_res)", "_____no_output_____" ] ], [ [ "### Periodic Changes", "_____no_output_____" ] ], [ [ "## CF\n# Change in center frequency - high band\nbeta_cf = beta_def.copy(); beta_cf[cf_ind] = 19.388\nfreqs, ps_be_cf = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_def, beta_cf],\n nlv, freq_res)\n\n## PW\n# Changes in oscillation power - low band\ntheta_pw = theta_def.copy(); theta_pw[pw_ind] = 0.5041\nfreqs, ps_th_pw = gen_power_spectrum(freq_range, ap_def,\n [theta_pw, alpha_def, beta_def],\n nlv, freq_res)\n# Changes in oscillation power - high band\nbeta_pw = beta_def.copy(); beta_pw[pw_ind] = 0.1403\nfreqs, ps_be_pw = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_def, beta_pw],\n nlv, freq_res)\n\n## BW\n# Changes in oscillation bandwidth - low band\ntheta_bw = theta_def.copy(); theta_bw[bw_ind] = 1.61\nfreqs, ps_th_bw = gen_power_spectrum(freq_range, ap_def,\n [theta_bw, alpha_def, beta_def],\n nlv, freq_res)\n# Changes in oscillation bandwidth - high band\nbeta_bw = beta_def.copy(); beta_bw[bw_ind] = 0.609\nfreqs, ps_be_bw = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_def, beta_bw],\n nlv, freq_res)", "_____no_output_____" ], [ "# Changes in other band - center frequency\nalpha_cf = alpha_def.copy(); alpha_cf[cf_ind] = 8.212\nfreqs, ps_al_cf = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_cf, beta_def],\n nlv, freq_res)\n\n# Changes in other band - bandwidth\nalpha_bw = alpha_def.copy(); alpha_bw[bw_ind] = 1.8845\nfreqs, ps_al_bw = gen_power_spectrum(freq_range, ap_def,\n [theta_def, alpha_bw, beta_def],\n nlv, freq_res)", "_____no_output_____" ], [ "# Collect all the power spectra together\nspectra_data = {'Theta Frequency' : None,\n 'Theta Power' : ps_th_pw,\n 'Theta Bandwidth' : ps_th_bw,\n 'Alpha Frequency' : ps_al_cf,\n 'Alpha Power' : None,\n 'Alpha Bandwidth' : ps_al_bw,\n 'Beta Frequency' : ps_be_cf,\n 'Beta Power' : ps_be_pw,\n 'Beta Bandwidth' : ps_be_bw}", "_____no_output_____" ], [ "# Calcualte theta beta ratio of the baseline power spectrum\nbase_br = calc_band_ratio(freqs, ps_base, theta_band, beta_band)", "_____no_output_____" ], [ "# Calculate changes in theta / beta ratios\ndiffreqs = {}\nfor label, spectra in spectra_data.items():\n if np.all(spectra):\n comp_br = calc_band_ratio(freqs, spectra, theta_band, beta_band)\n diffreqs[label] = base_br - comp_br", "_____no_output_____" ], [ "# Check the computed ratio values of each spectrum\nprint('TBR of base spectrum is: {:1.3f}'.format(base_br))\nprint('TBR of comp spectrum is: {:1.3f}'.format(comp_br))", "TBR of base spectrum is: 5.737\nTBR of comp spectrum is: 6.737\n" ], [ "# Check TBR difference measures from periodic changes\nfor label, diff in diffreqs.items():\n print('TBR difference from {:20} is \\t {:1.3f}'.format(label, diff))", "TBR difference from Theta Power is \t -1.000\nTBR difference from Theta Bandwidth is \t -1.000\nTBR difference from Alpha Frequency is \t -1.000\nTBR difference from Alpha Bandwidth is \t -1.000\nTBR difference from Beta Frequency is \t -1.000\nTBR difference from Beta Power is \t -1.000\nTBR difference from Beta Bandwidth is \t -1.000\n" ], [ "# Create figure of periodic changes\ntitle_settings = {'fontsize': 16, 'fontweight': 'bold'}\nfig, ax = plt.subplots(3, 3, figsize=(15, 14))\n\nfor axis, (title, data) in zip(ax.flatten(), spectra_data.items()):\n \n if not np.all(data): continue\n \n plot_spectra_shading(freqs, [ps_base, data], [theta_band, beta_band],\n shade_colors=shade_color,\n log_freqs=False, log_powers=True, ax=axis)\n \n if PLOT_TITLES:\n axis.set_title(title, **title_settings)\n \n axis.set_xlim([0, 35])\n axis.set_ylim([-1.75, 0])\n\n axis.xaxis.label.set_visible(False)\n axis.yaxis.label.set_visible(False)\n \n# Turn off empty axes\nax[0, 0].axis('off')\nax[1, 1].axis('off')\n \nfig.subplots_adjust(hspace=.3)\nfig.subplots_adjust(wspace=.3)\n\nif SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'Underdetermined-Periodic', 'pdf'))", "_____no_output_____" ] ], [ [ "Each panel above plots two PSDs, where the blue curve is the same reference power spectrum plotted in all panels, and the orange is a unique comparison spectrum. \n\nThe difference between TBR from the blue and orange curve is the same (see cell above) across each panel. \n\nThis shows that multiple spectral parameters could change to arrive at identical differences in a ratio measure.", "_____no_output_____" ], [ "#### Periodic Notes\n\nNote that for a given change (or direction of change) in theta / beta ratio (TBR), there is only one center frequency change that could do it. \n\nThis is true for the case, as is simulated, in which the 'baseline' spectrum has oscillations entirely within band ranges. In this example, the change is a relative increase in 'theta', and there is no way to increase relative theta by changing theta CF alone. This is due to the choice of comparison spectrum, and in another scenario, theta CF could also change measured ratio measures.", "_____no_output_____" ], [ "### Aperiodic Changes\n\nThe same change in ratio can also be driven from changes in aperiodic properties. \n\nThis can happen with or without oscillations even being present.", "_____no_output_____" ] ], [ [ "# Change in aperiodic exponent\nap_shift = [0.13, 1.1099]\nfreqs, ps_ap_ex = gen_power_spectrum(freq_range, ap_shift,\n [theta_def, alpha_def, beta_def],\n nlv, freq_res)", "_____no_output_____" ], [ "# Use a new base and transformation, without any oscillations\nfreqs, ps_new_base = gen_power_spectrum(freq_range, ap_def, [],\n nlv, freq_res)\nap_shift = [0.13, 1.1417]\nfreqs, ps_new_apch = gen_power_spectrum(freq_range, ap_shift, [],\n nlv, freq_res)", "_____no_output_____" ], [ "# Calculate the differences in ratio from baseline spectra\nd_ap_osc = base_br - calc_band_ratio(freqs, ps_ap_ex, theta_band, beta_band)\nd_ap_no_osc = calc_band_ratio(freqs, ps_new_base, theta_band, beta_band) - \\\n calc_band_ratio(freqs, ps_new_apch, theta_band, beta_band)", "_____no_output_____" ], [ "# Check TBR difference measures from aperiodic changes\nbase_text = 'TBR difference from the aperiodic component '\nprint(base_text + 'with oscillations is \\t {:1.3f}'.format(d_ap_osc))\nprint(base_text + 'without oscillations is \\t {:1.3f}'.format(d_ap_no_osc))", "TBR difference from the aperiodic component with oscillations is \t -1.000\nTBR difference from the aperiodic component without oscillations is \t -1.000\n" ], [ "# Collect together components to plot\nap_bases = [ps_base, ps_new_base]\nap_diffs = [ps_ap_ex, ps_new_apch]", "_____no_output_____" ], [ "# Create aperiodic differences figure\nfig, ax = plt.subplots(2, 1, figsize=(5, 9))\n\nfor ps_base, ps_diff, axis in zip(ap_bases, ap_diffs, ax.flatten()):\n \n plot_spectra_shading(freqs, [ps_base, ps_diff], [theta_band, beta_band],\n shade_colors=shade_color,\n log_freqs=False, log_powers=True, ax=axis)\n \n if PLOT_TITLES:\n axis.set_title('Aperiodic Exponent', **title_settings)\n \n # Plot Aesthetics\n axis.set_xlim([0, 35])\n axis.set_ylim([-1.75, 0])\n axis.xaxis.label.set_visible(False)\n axis.yaxis.label.set_visible(False)\n \nfig.subplots_adjust(wspace=.3)\n\nif SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'Underdetermined-Aperiodic', 'pdf'))", "_____no_output_____" ] ], [ [ "#### Conclusions\n\nIn this example, we have explored changes to measured band ratios by varying different spectral parameters.\n\nGiven an observed change in a BandRatio measure, there is no way to tell what has actually changed.\n\nVariations in multiple spectral parameters can lead to the exact same change in ratio measure.\n\nThere is no reason to think the change even reflects oscillatory activity, given that aperiodic shifts can drive this effect. \n\nIn this notebook, we simulated variations in one parameter at a time, but in practice, all of these changes could happen together. \n\nIn subsequent notebooks, we will further characterize these findings by simulating changes in each parameter, to estimate how impactful different parameters are to ratio measures, as well as by simulating concurrent changes in multiple parameters, to explore the interaction between changes.", "_____no_output_____" ], [ "## Same Ratio, Different Spectra\n\nSo far we have seen how multiple possible changes in power spectra can lead to the same measured difference in band ratio measures across power spectra.\n\nWhat if we calculate band ratio measures and find that they are the same? Can we infer that the analyzed power spectra are in some ways equivalent? \n\nNext, let's examine if and how different power spectra can have the same band ratio value. ", "_____no_output_____" ] ], [ [ "# Create a collection of spectra with different properties, with the same measured ratio value\nfreqs, ps1 = gen_power_spectrum(freq_range, [0, 0.9059],\n [theta_def, alpha_def, beta_def],\n nlv, freq_res)\nfreqs, ps2 = gen_power_spectrum(freq_range, [0, 0.9059],\n [[6, 0.5, 2], alpha_def, [25, 0.3544, 5]],\n nlv, freq_res)\nfreqs, ps3 = gen_power_spectrum(freq_range, [0.25, 1.2029],\n [[6, 0.10, 1], alpha_def, beta_def],\n nlv, freq_res)\nfreqs, ps4 = gen_power_spectrum(freq_range, [0.25, 1.2029],\n [theta_def, alpha_def, [25, 0.66444, 1.5]],\n nlv, freq_res)", "_____no_output_____" ], [ "# Collect the generated spectra together\nspectra_list = [ps1, ps2, ps3, ps4]", "_____no_output_____" ], [ "# Calculate the ratio value for each spectrum\nfor spectrum in spectra_list:\n print('Ratio value:\\t {:1.3f}'.format(calc_band_ratio(freqs, ps1, theta_band, beta_band)))", "Ratio value:\t 5.000\nRatio value:\t 5.000\nRatio value:\t 5.000\nRatio value:\t 5.000\n" ], [ "# Plot all the power spectra together\nplot_spectra_shading(freqs, spectra_list, [theta_band, beta_band],\n shade_colors=shade_color, linewidth=3,\n log_freqs=False, log_powers=True)\nif SAVE_FIG: plt.savefig(fp.make_file_path(fp.demo, 'EquivalentRatioSpectra', 'pdf'))", "_____no_output_____" ] ], [ [ "In the plot above, we can see four different power spectra. \n\nHowever, each of these power spectra has the exact same measured theta / beta ratio value. \n\nThus we can conclude that measuring the same band ratio value for different power spectra should not be taken to imply that they are in any way equivalent.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
4af620b97a25881e00ab82e5b516720d46e0a998
34,003
ipynb
Jupyter Notebook
Week_3_Pandas_and_Matplotlib/Lab_Assignment/Python_Data_Analysis_Lab_Assignment.ipynb
m-matute/DS311-Technologies-in-Data-Analytic
09b61de9ddebb484a25366b9f0350989f96e94b1
[ "MIT" ]
1
2022-02-22T02:55:44.000Z
2022-02-22T02:55:44.000Z
Week_3_Pandas_and_Matplotlib/Lab_Assignment/Python_Data_Analysis_Lab_Assignment.ipynb
m-matute/DS311-Technologies-in-Data-Analytic
09b61de9ddebb484a25366b9f0350989f96e94b1
[ "MIT" ]
null
null
null
Week_3_Pandas_and_Matplotlib/Lab_Assignment/Python_Data_Analysis_Lab_Assignment.ipynb
m-matute/DS311-Technologies-in-Data-Analytic
09b61de9ddebb484a25366b9f0350989f96e94b1
[ "MIT" ]
3
2022-02-06T06:30:52.000Z
2022-02-26T04:54:54.000Z
31.542672
457
0.607299
[ [ [ "# Python Data Analysis\n\n## Introduction\nIn this lab, we'll make use of everything we've learned about pandas, data cleaning, and simple data analysis. In order to complete this lab, you'll have to import, clean, combine, reshape, and visualize data to answer questions provided, as well as your own questions!\n\n## Objectives\nYou will be able to:\n- Practice opening and inspecting the contents of CSVs using pandas dataframes\n- Practice identifying and handling missing values\n- Practice identifying and handling invalid values\n- Practice cleaning text data by removing whitespace and fixing typos\n- Practice joining multiple dataframes", "_____no_output_____" ], [ "## Your Task: Clean the Superheroes Dataset with Pandas\n\n", "_____no_output_____" ], [ "### Data Understanding\nIn this lab, we'll work with a version of the comprehensive Superheroes Dataset, which can be found on [Kaggle](https://www.kaggle.com/claudiodavi/superhero-set/data) and was originally scraped from [SuperHeroDb](https://www.superherodb.com/). We have modified the structure and contents of the dataset somewhat for the purposes of this lab. Note that this data was collected in June 2017, so it may not reflect the most up-to-date superhero lore.\n\nThe data is contained in two separate CSV files:\n\n1. `heroes_information.csv`: each record represents a superhero, with attributes of that superhero (e.g. eye color). Height is measured in centimeters, and weight is measured in pounds.\n2. `super_hero_powers.csv`: each record represents a superpower, then has True/False values representing whether each superhero has that power\n\n### Business Understanding\n\nThe business questions you have been provided are:\n\n1. What is the distribution of superheroes by publisher?\n2. What is the relationship between height and number of superpowers? And does this differ based on gender?\n3. What are the 5 most common superpowers in Marvel Comics vs. DC Comics?\n\nThis lab also simulates something you are likely to encounter at some point or another in your career in data science: someone has given you access to a dataset, as well as a few questions, and has told you to \"find something interesting\".\n\nSo, in addition to completing the basic data cleaning tasks and the aggregation and reshaping tasks needed to answer the provided questions, you will also need to formulate a question of your own and perform any additional cleaning/aggregation/reshaping that is needed to answer it.", "_____no_output_____" ], [ "### Requirements\n\n#### 1. Load the Data with Pandas\n\nCreate a dataframes `heroes_df` and `powers_df` that represent the two CSV files. Use pandas methods to inspect the shape and other attributes of these dataframes.\n\n#### 2. Perform Data Cleaning Required to Answer First Question\n\nThe first question is: *What is the distribution of superheroes by publisher?*\n\nIn order to answer this question, you will need to:\n\n* Identify and handle missing values\n* Identify and handle text data requiring cleaning\n\n#### 3. Perform Data Aggregation and Cleaning Required to Answer Second Question\n\nThe second question is: *What is the relationship between height and number of superpowers? And does this differ based on gender?*\n\nIn order to answer this question, you will need to:\n\n* Join the dataframes together\n* Identify and handle invalid values\n\n#### 4. Perform Data Aggregation Required to Answer Third Question\n\nThe third question is: *What are the 5 most common superpowers in Marvel Comics vs. DC Comics?*\n\nThis should not require any additional data cleaning or joining of tables, but it will require some additional aggregation.\n\n#### 5. Formulate and Answer Your Own Question\n\nThis part is fairly open-ended. Think of a question that can be answered with the available data, and perform any cleaning or aggregation required to answer that question.", "_____no_output_____" ], [ "## 1. Load the Data with Pandas\n\nIn the cell below, we:\n\n* Import and alias `pandas` as `pd`\n* Import and alias `numpy` as `np`\n* Import and alias `seaborn` as `sns`\n* Import and alias `matplotlib.pyplot` as `plt`\n* Set Matplotlib visualizations to display inline in the notebook", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Superheroes\n\nIn the cell below, load `heroes_information.csv` as `heroes_df`:", "_____no_output_____" ] ], [ [ "# Your code here\n\nheroes_df.head()", "_____no_output_____" ] ], [ [ "It looks like that CSV came with an index column, resulting in an extra column called `Unnamed: 0`. We don't need that column, so write code to get rid of it below.\n\nThere are two ways to do this:\n\n1. Re-load with `read_csv`, and specify the parameter `index_col=0`\n2. Drop the column `Unnamed: 0` with `axis=1`", "_____no_output_____" ] ], [ [ "# Your code here\n\nheroes_df.head()", "_____no_output_____" ] ], [ [ "The following code checks that the dataframe was loaded correctly.", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# There should be 734 rows\nassert heroes_df.shape[0] == 734\n\n# There should be 10 columns. If this fails, make sure you got rid of\n# the extra index column\nassert heroes_df.shape[1] == 10\n\n# These should be the columns\nassert list(heroes_df.columns) == ['name', 'Gender', 'Eye color', 'Race',\n 'Hair color', 'Height', 'Publisher', 'Skin color', 'Alignment', 'Weight']", "_____no_output_____" ] ], [ [ "Now you want to get familiar with the data. This step includes:\n\n* Understanding the dimensionality of your dataset\n* Investigating what type of data it contains, and the data types used to store it\n* Discovering how missing values are encoded, and how many there are\n* Getting a feel for what information it does and doesn't contain\n\nIn the cell below, inspect the overall shape of the dataframe:", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "Now let's look at the info printout:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nheroes_df.info()", "_____no_output_____" ] ], [ [ "In the cell below, interpret that information. Do the data types line up with what we expect? Are there any missing values?", "_____no_output_____" ] ], [ [ "# Replace None with appropriate text\n\"\"\"\nNone\n\"\"\"", "_____no_output_____" ] ], [ [ "### Superpowers\n\nNow, repeat the same process with `super_hero_powers.csv`. Name the dataframe `powers_df`. This time, make sure you use `index_col=0` when opening the CSV because the index contains important information.", "_____no_output_____" ] ], [ [ "# Your code here (create more cells as needed)", "_____no_output_____" ] ], [ [ "The following code will check if it was loaded correctly:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# There should be 167 rows, 667 columns\nassert powers_df.shape == (167, 667)\n\n# The first column should be '3-D Man'\nassert powers_df.columns[0] == '3-D Man'\n\n# The last column should be 'Zoom'\nassert powers_df.columns[-1] == 'Zoom'\n\n# The first index should be 'Agility'\nassert powers_df.index[0] == 'Agility'\n\n# The last index should be 'Omniscient'\nassert powers_df.index[-1] == 'Omniscient'", "_____no_output_____" ] ], [ [ "## 2. Perform Data Cleaning Required to Answer First Question\n\nRecall that the first question is: *What is the distribution of superheroes by publisher?*\n\nTo answer this question, we will only need to use `heroes_df`, which contains the `Publisher` column.\n\n### Identifying and Handling Missing Values\n\nAs you likely noted above, the `Publisher` column is missing some values. Let's take a look at some samples with and without missing publisher values:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nhas_publisher_sample = heroes_df[heroes_df[\"Publisher\"].notna()].sample(5, random_state=1)\nhas_publisher_sample", "_____no_output_____" ], [ "# Run this cell without changes\nmissing_publisher_sample = heroes_df[heroes_df[\"Publisher\"].isna()].sample(5, random_state=1)\nmissing_publisher_sample", "_____no_output_____" ] ], [ [ "What do we want to do about these missing values?\n\nRecall that there are two general strategies for dealing with missing values:\n\n1. Fill in missing values (either using another value from the column, e.g. the mean or mode, or using some other value like \"Unknown\")\n2. Drop rows with missing values\n\nWrite your answer below, and explain how it relates to the information we have:", "_____no_output_____" ] ], [ [ "# Replace None with appropriate text\n\"\"\"\nNone\n\"\"\"", "_____no_output_____" ] ], [ [ "Now, implement your chosen strategy using code. (You can also check the solution branch for the answer to the question above if you're really not sure.)", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "Now there should be no missing values in the publisher column:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nassert heroes_df[\"Publisher\"].isna().sum() == 0", "_____no_output_____" ] ], [ [ "### Identifying and Handling Text Data Requiring Cleaning\n\nThe overall field of natural language processing (NLP) is quite broad, and we're not going to get into any advanced text processing, but it's useful to be able to clean up minor issues in text data.\n\nLet's take a look at the counts of heroes grouped by publisher:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nheroes_df[\"Publisher\"].value_counts()", "_____no_output_____" ] ], [ [ "There are two cases where we appear to have data entry issues, and publishers that should be encoded the same have not been. In other words, there are four categories present that really should be counted as two categories (and you do not need specific comic book knowledge to be able to identify them).\n\nIdentify those two cases below:", "_____no_output_____" ] ], [ [ "# Replace None with appropriate text\n\"\"\"\nNone\n\"\"\"", "_____no_output_____" ] ], [ [ "Now, write some code to handle these cases. If you're not sure where to start, look at the pandas documentation for [replacing values](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.replace.html) and [stripping off whitespace](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.strip.html).", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "Check your work below:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nheroes_df[\"Publisher\"].value_counts()", "_____no_output_____" ] ], [ [ "### Answering the Question\n\nNow we should be able to answer *What is the distribution of superheroes by publisher?*\n\nIf your data cleaning was done correctly, this code should work without any further changes:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# Set up plots\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 5))\n\n# Create variables for easier reuse\nvalue_counts = heroes_df[\"Publisher\"].value_counts()\ntop_5_counts = value_counts.iloc[:5]\n\n# Plot data\nax1.bar(value_counts.index, value_counts.values)\nax2.bar(top_5_counts.index, top_5_counts.values)\n\n# Customize appearance\nax1.tick_params(axis=\"x\", labelrotation=90)\nax2.tick_params(axis=\"x\", labelrotation=45)\nax1.set_ylabel(\"Count of Superheroes\")\nax2.set_ylabel(\"Count of Superheroes\")\nax1.set_title(\"Distribution of Superheroes by Publisher\")\nax2.set_title(\"Top 5 Publishers by Count of Superheroes\");", "_____no_output_____" ] ], [ [ "## 3. Perform Data Aggregation and Cleaning Required to Answer Second Question\n\nRecall that the second question is: *What is the relationship between height and number of superpowers? And does this differ based on gender?*\n\nUnlike the previous question, we won't be able to answer this with just `heroes_df`, since information about height is contained in `heroes_df`, while information about superpowers is contained in `powers_df`.\n\n### Joining the Dataframes Together\n\nFirst, identify the shared key between `heroes_df` and `powers_df`. (Shared key meaning, the values you want to join on.) Let's look at them again:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nheroes_df", "_____no_output_____" ], [ "# Run this cell without changes\npowers_df", "_____no_output_____" ] ], [ [ "In the cell below, identify the shared key, and your strategy for joining the data (e.g. what will one record represent after you join, will you do a left/right/inner/outer join):", "_____no_output_____" ] ], [ [ "# Replace None with appropriate text\n\"\"\"\nNone\n\"\"\"", "_____no_output_____" ] ], [ [ "In the cell below, create a new dataframe called `heroes_and_powers_df` that contains the joined data. You can look at the above answer in the solution branch if you're not sure where to start.\n\n***Hint:*** Note that the `.join` method requires that the two dataframes share an index ([documentation here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.join.html)) whereas the `.merge` method can join using any columns ([documentation here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html)). It is up to you which one you want to use.", "_____no_output_____" ] ], [ [ "# Your code here (create more cells as needed)", "_____no_output_____" ] ], [ [ "Run the code below to check your work:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# Confirms you have created a dataframe with the specified name\nassert type(heroes_and_powers_df) == pd.DataFrame\n\n# Confirms you have the right number of rows\nassert heroes_and_powers_df.shape[0] == 647\n\n# Confirms you have the necessary columns\n# (If you modified the value of powers_df along the way, you might need to\n# modify this test. We are checking that all of the powers are present as\n# columns.)\nassert [power in heroes_and_powers_df.columns for power in powers_df.index]\n# (If you modified the value of heroes_df along the way, you mgith need to \n# modify this as well. We are checking that all of the attribute columns from\n# heroes_df are present as columns in the joined df)\nassert [attribute in heroes_and_powers_df.columns for attribute in heroes_df.columns]", "_____no_output_____" ] ], [ [ "Now that we have created a joined dataframe, we can aggregate the number of superpowers by superhero. This code is written for you:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# Note: we can use sum() with True and False values and they will\n# automatically be cast to 1s and 0s\nheroes_and_powers_df[\"Power Count\"] = sum([heroes_and_powers_df[power_name] for power_name in powers_df.index])\nheroes_and_powers_df", "_____no_output_____" ] ], [ [ "### Answering the Question\n\nNow we can plot the height vs. the count of powers:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\nfig, ax = plt.subplots(figsize=(16, 8))\n\nax.scatter(\n x=heroes_and_powers_df[\"Height\"],\n y=heroes_and_powers_df[\"Power Count\"],\n alpha=0.3\n)\n\nax.set_xlabel(\"Height (cm)\")\nax.set_ylabel(\"Number of Superpowers\")\nax.set_title(\"Height vs. Power Count\");", "_____no_output_____" ] ], [ [ "Hmm...what is that stack of values off below zero? What is a \"negative\" height?", "_____no_output_____" ], [ "### Identifying and Handling Invalid values\n\nOne of the trickier tasks in data cleaning is identifying invalid or impossible values. In these cases, you have to apply your domain knowledge rather than any particular computational technique. For example, if you were looking at data containing dates of past home sales, and one of those dates was 100 years in the future, pandas wouldn't flag that as an issue, but you as a data scientist should be able to identify it.\n\nIn this case, we are looking at heights, which are 1-dimensional, positive numbers. In theory we could have a very tiny height close to 0 cm because the hero is microscopic, but it does not make sense that we would have a height below zero.\n\nLet's take a look at a sample of those negative heights:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nheroes_and_powers_df[heroes_and_powers_df[\"Height\"] < 0].sample(5, random_state=1)", "_____no_output_____" ] ], [ [ "It looks like not only are those heights negative, those weights are negative also, and all of them are set to exactly -99.0.\n\nIt seems like this data source probably filled in -99.0 as the height or weight whenever it was unknown, instead of just leaving it as NaN.\n\nDepending on the purpose of the analysis, maybe this would be a useful piece of information, but for our current question, let's go ahead and drop the records where the height is -99.0. We'll make a new temporary dataframe to make sure we don't accidentally delete anything that will be needed in a future question.", "_____no_output_____" ] ], [ [ "# Run this cell without changes\nquestion_2_df = heroes_and_powers_df[heroes_and_powers_df[\"Height\"] != -99.0].copy()\nquestion_2_df", "_____no_output_____" ] ], [ [ "### Answering the Question, Again\n\nNow we can redo that plot without those negative heights:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\nfig, ax = plt.subplots(figsize=(16, 8))\n\nax.scatter(\n x=question_2_df[\"Height\"],\n y=question_2_df[\"Power Count\"],\n alpha=0.3\n)\n\nax.set_xlabel(\"Height (cm)\")\nax.set_ylabel(\"Number of Superpowers\")\nax.set_title(\"Height vs. Power Count\");", "_____no_output_____" ] ], [ [ "Ok, that makes more sense. It looks like there is not much of a relationship between height and number of superpowers.\n\nNow we can go on to answering the second half of question 2: *And does this differ based on gender?*\n\nTo indicate multiple categories within a scatter plot, we can use color to add a third dimension:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\nfig, ax = plt.subplots(figsize=(16, 8))\n\n# Select subsets\nquestion_2_male = question_2_df[question_2_df[\"Gender\"] == \"Male\"]\nquestion_2_female = question_2_df[question_2_df[\"Gender\"] == \"Female\"]\nquestion_2_other = question_2_df[(question_2_df[\"Gender\"] != \"Male\") & (question_2_df[\"Gender\"] != \"Female\")]\n\n# Plot data with different colors\nax.scatter(\n x=question_2_male[\"Height\"],\n y=question_2_male[\"Power Count\"],\n alpha=0.5,\n color=\"cyan\",\n label=\"Male\"\n)\nax.scatter(\n x=question_2_female[\"Height\"],\n y=question_2_female[\"Power Count\"],\n alpha=0.5,\n color=\"gray\",\n label=\"Female\"\n)\nax.scatter(\n x=question_2_other[\"Height\"],\n y=question_2_other[\"Power Count\"],\n alpha=0.5,\n color=\"yellow\",\n label=\"Other\"\n)\n\n# Customize appearance\nax.set_xlabel(\"Height (cm)\")\nax.set_ylabel(\"Number of Superpowers\")\nax.set_title(\"Height vs. Power Count\")\nax.legend();", "_____no_output_____" ] ], [ [ "It appears that there is still no clear relationship between count of powers and height, regardless of gender. We do however note that \"Male\" is the most common gender, and that male superheroes tend to be taller, on average.", "_____no_output_____" ], [ "## 4. Perform Data Aggregation Required to Answer Third Question\n\nRecall that the third question is: *What are the 5 most common superpowers in Marvel Comics vs. DC Comics?*\n\nWe'll need to keep using `heroes_and_powers_df` since we require information from both `heroes_df` and `powers_df`.\n\nYour resulting `question_3_df` should contain aggregated data, with columns `Superpower Name`, `Marvel Comics` (containing the count of occurrences in Marvel Comics), and `DC Comics` (containing the count of occurrences in DC Comics). Each row should represent a superpower.\n\nIn other words, `question_3_df` should look like this:\n\n![question 3 df](images/question_3.png)\n\nDon't worry if the rows or columns are in a different order, all that matters is that you have the right rows and columns with all the data.\n\n***Hint:*** refer to the [documentation for `.groupby`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html) and treat each publisher as a group.", "_____no_output_____" ] ], [ [ "# Your code here (create more cells as needed)", "_____no_output_____" ] ], [ [ "The code below checks that you have the correct dataframe structure:", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\n# Checking that you made a dataframe called question_3_df\nassert type(question_3_df) == pd.DataFrame\n\n# Checking the shape\nassert question_3_df.shape == (167, 3)\n\n# Checking the column names\nassert sorted(list(question_3_df.columns)) == ['DC Comics', 'Marvel Comics', 'Superpower Name']", "_____no_output_____" ] ], [ [ "### Answering the Question\n\nThe code below uses the dataframe you created to find and plot the most common superpowers in Marvel Comics and DC Comics.", "_____no_output_____" ] ], [ [ "# Run this cell without changes\n\nmarvel_most_common = question_3_df.drop(\"DC Comics\", axis=1)\nmarvel_most_common = marvel_most_common.sort_values(by=\"Marvel Comics\", ascending=False)[:5]\nmarvel_most_common", "_____no_output_____" ], [ "# Run this cell without changes\n\ndc_most_common = question_3_df.drop(\"Marvel Comics\", axis=1)\ndc_most_common = dc_most_common.sort_values(by=\"DC Comics\", ascending=False)[:5]\ndc_most_common", "_____no_output_____" ], [ "# Run this cell without changes\n\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 5))\n\nax1.bar(\n x=marvel_most_common[\"Superpower Name\"],\n height=marvel_most_common[\"Marvel Comics\"]\n)\nax2.bar(\n x=dc_most_common[\"Superpower Name\"],\n height=dc_most_common[\"DC Comics\"]\n)\n\nax1.set_ylabel(\"Count of Superheroes\")\nax2.set_ylabel(\"Count of Superheroes\")\nax1.set_title(\"Frequency of Top Superpowers in Marvel Comics\")\nax2.set_title(\"Frequency of Top Superpowers in DC Comics\");", "_____no_output_____" ] ], [ [ "It looks like super strength is the most popular power in both Marvel Comics and DC Comics. Overall, the top 5 powers are fairly similar — 4 out of 5 overlap, although Marvel contains agility whereas DC contains flight.", "_____no_output_____" ], [ "## 5. Formulate and Answer Your Own Question\n\nFor the remainder of this lab, you'll be focusing on coming up with and answering your own question, just like we did above. Your question should not be overly simple, and should require both descriptive statistics and data visualization to answer. In case you're unsure of what questions to ask, some sample questions have been provided below.\n\nPick one of the following questions to investigate and answer, or come up with one of your own!\n\n* Which powers have the highest chance of co-occurring in a hero (e.g. super strength and flight)?\n* What is the distribution of skin colors amongst alien heroes?\n* How are eye color and hair color related in this dataset?\n\nExplain your question below:", "_____no_output_____" ] ], [ [ "# Replace None with appropriate text:\n\"\"\"\nNone\n\"\"\"", "_____no_output_____" ] ], [ [ "Some sample cells have been provided to give you room to work. Feel free to create more cells as needed.\n\nBe sure to include thoughtful, well-labeled visualizations to back up your analysis!", "_____no_output_____" ], [ "## Summary\n\nIn this lab, you demonstrated your mastery of using pandas to clean and aggregate data in order to answer several business questions. This included identifying and handling missing values, text requiring preprocessing, and invalid values. You also performed aggregation and reshaping tasks such as transposing, joining, and grouping data. Great job, there was a lot here!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4af62237eece87b6a0a1312becb099f78e680daf
42,938
ipynb
Jupyter Notebook
aoc25.ipynb
deleter8/aoc2019
109a46c313398a2feafdc704a9e15978ecb6aa2d
[ "Unlicense" ]
null
null
null
aoc25.ipynb
deleter8/aoc2019
109a46c313398a2feafdc704a9e15978ecb6aa2d
[ "Unlicense" ]
null
null
null
aoc25.ipynb
deleter8/aoc2019
109a46c313398a2feafdc704a9e15978ecb6aa2d
[ "Unlicense" ]
null
null
null
38.099379
17,563
0.500978
[ [ [ "import math\n\ndef init_computer(code, inputs):\n return {\n 'mem': code.copy(),\n 'mem_size': len(code),\n 'extend_mem' : {},\n 'inst': 0,\n 'rel': 0,\n 'inputs': inputs.copy(),\n 'outputs': [],\n 'halt': False,\n 'needs_input': False\n }\n\ndef read_mem(computer, pos):\n if(pos >= computer['mem_size']):\n if(pos in computer['extend_mem']):\n return computer['extend_mem'][pos]\n else:\n return 0\n else:\n return computer['mem'][pos]\n\ndef write_mem(computer, pos, val):\n if(pos < 0):\n print(\"invalid mem pos %i\" % pos)\n return\n if(pos >= computer['mem_size']):\n computer['extend_mem'][pos] = val\n else:\n computer['mem'][pos] = val\n\n\ndef run(computer):\n code_size = len(computer['mem'])\n i = computer['inst']\n outputs = []\n op_info = {1:3, 2:3, 3:1, 4:1, 5:2, 6:2, 7:3, 8:3, 9:1, 99:0}\n computer['needs_input'] = False\n while(True):\n op = read_mem(computer, i)\n opcode = op % 100\n if(not(opcode in op_info)):\n print(\"error unknown opcode %i\" % (opcode))\n computer['needs_input'] = False\n break\n a0 = -1\n a1 = -1\n a2 = -1\n jump = False\n if(op_info[opcode] > 0):\n p_mode = (math.floor(op / 100) % 10)\n if( p_mode == 0 ):\n #position mode (pointer)\n a0 = read_mem(computer, i + 1)\n elif( p_mode == 1 ):\n #immediate mode (value)\n a0 = i + 1\n elif( p_mode == 2 ):\n #relative mode\n a0 = read_mem(computer, i + 1) + computer['rel']\n if(op_info[opcode] > 1):\n p_mode = (math.floor(op / 1000) % 10)\n if( p_mode == 0 ):\n #position mode (pointer)\n a1 = read_mem(computer, i + 2)\n elif( p_mode == 1 ):\n #immediate mode (value)\n a1 = i + 2\n elif( p_mode == 2 ):\n #relative mode\n a1 = read_mem(computer, i + 2) + computer['rel']\n if(op_info[opcode] > 2):\n p_mode = (math.floor(op / 10000) % 10)\n if( p_mode == 0 ):\n #position mode (pointer)\n a2 = read_mem(computer, i + 3)\n elif( p_mode == 1 ):\n #immediate mode (value)\n a2 = i + 3\n elif( p_mode == 2 ):\n #relative mode\n a2 = read_mem(computer, i + 3) + computer['rel']\n if(opcode == 1):\n #add op\n write_mem(computer, a2, read_mem(computer, a0) + read_mem(computer, a1))\n elif(opcode == 2):\n #mult op\n write_mem(computer, a2, read_mem(computer, a0) * read_mem(computer, a1))\n elif(opcode == 3):\n #read op\n if(len(computer['inputs']) == 0):\n computer['needs_input'] = True\n break\n write_mem(computer, a0, computer['inputs'][0])\n computer['inputs'] = computer['inputs'][1:]\n elif(opcode == 4):\n outputs.append(read_mem(computer, a0))\n elif(opcode == 5):\n #jump if true op\n if(read_mem(computer, a0) != 0):\n jump = True\n i = read_mem(computer, a1)\n elif(opcode == 6):\n #jump if false op\n if(read_mem(computer, a0) == 0):\n jump = True\n i = read_mem(computer, a1)\n elif(opcode == 7):\n #check less than op\n write_mem(computer, a2, 1 if(read_mem(computer, a0) < read_mem(computer, a1)) else 0)\n elif(opcode == 8):\n #check equals op\n write_mem(computer, a2, 1 if(read_mem(computer, a0) == read_mem(computer, a1)) else 0)\n elif(opcode == 9):\n #change relative param op\n computer['rel'] = computer['rel'] + read_mem(computer, a0)\n elif(opcode == 99):\n #halt op\n computer['halt'] = True\n computer['needs_input'] = False\n break\n if(not(jump)):\n i = i + op_info[opcode] + 1\n if(i >= code_size):\n print('exiting b/c end of code reached')\n computer['needs_input'] = False\n computer['outputs'] = outputs\n computer['inst'] = i\n \n return computer\n", "_____no_output_____" ], [ "code = [109,4797,21101,3124,0,1,21101,13,0,0,1105,1,1424,21101,0,166,1,21102,1,24,0,1105,1,1234,21102,1,31,0,1105,1,1984,1105,1,13,6,4,3,2,52,51,21,4,28,56,55,3,19,-9,-10,47,89,88,90,90,6,77,73,85,71,1,76,68,63,65,22,-27,70,76,81,87,5,105,105,107,108,95,4,97,92,109,109,5,110,105,110,108,95,4,115,96,109,109,13,-3,59,101,85,92,97,13,84,80,92,78,34,-15,26,-16,46,88,72,79,84,0,72,76,-3,85,74,79,75,-8,64,68,75,57,65,70,64,66,72,8,-41,32,-22,56,77,82,-4,60,76,62,70,-2,74,-11,55,52,68,67,73,56,60,52,-20,44,56,66,-24,48,58,42,49,54,-16,-53,10,0,56,99,96,95,82,94,83,45,-9,23,-13,61,85,88,74,71,82,73,79,73,89,67,65,-4,62,73,70,69,56,68,57,2,-35,24,-14,64,85,90,4,70,67,79,7,83,-2,68,75,-5,78,65,57,75,-10,76,53,76,0,-37,31,-21,57,78,83,-3,64,74,72,0,76,-9,73,58,57,-13,70,57,49,67,-18,54,64,48,55,-23,48,44,56,42,-14,-51,14,-4,74,95,100,14,97,77,86,79,9,92,79,75,5,27,-17,61,82,87,1,68,78,76,4,80,-5,66,58,78,60,-10,73,60,52,70,-15,57,67,51,58,-6,-43,14,-4,74,95,100,14,81,94,90,90,9,92,79,75,5,60,-50,23,42,38,-32,38,39,30,42,47,-38,30,36,28,25,41,38,34,31,18,23,29,19,33,-52,20,29,-55,27,27,27,8,15,-61,22,16,-64,24,13,18,-54,-69,-70,-14,7,12,-74,-8,-11,1,-71,5,-80,-4,-3,3,-15,-84,-85,-109,29,-19,59,80,85,-1,82,62,71,64,-6,77,64,60,-10,62,66,57,59,63,57,67,51,-19,56,58,57,57,-10,-47,44,-34,39,58,54,-16,60,61,57,64,48,56,-23,52,40,60,38,-28,44,53,-31,55,32,55,-35,48,42,41,-39,32,38,42,-42,-44,12,33,38,-48,28,19,25,32,-52,-76,-77,59,-49,13,55,-30,42,51,-33,49,50,32,31,31,39,36,48,-42,24,35,32,34,29,21,35,19,25,37,-53,14,10,26,18,-57,-59,-3,18,23,-63,1,17,3,-67,1,-4,14,-2,6,-73,-8,14,-76,-12,-78,-40,2,4,-13,-82,-106,-107,35,-25,53,74,79,0,74,60,-10,65,53,72,64,52,56,52,50,-19,53,57,62,56,-24,58,54,38,39,40,-29,-31,2,56,35,-34,-58,-59,138,-128,-74,-108,-33,-31,-26,-44,-101,-114,-33,-37,-51,-39,-35,-47,-54,-122,-37,-45,-52,-59,-58,-128,-46,-65,-42,-49,-133,-132,-102,-60,-68,-56,-55,-139,-141,-106,-61,-65,-72,-78,-64,-148,-70,-72,-151,-68,-81,-81,-72,-156,-74,-86,-86,-80,-161,-97,-81,-95,-165,-94,-98,-103,-83,-97,-102,-90,-173,-90,-103,-111,-99,-178,-95,-108,-112,-182,-115,-115,-101,-117,-120,-104,-120,-122,-191,-106,-128,-118,-110,-127,-196,-196,-199,-135,-123,-134,-203,-115,-126,-121,-207,-143,-127,-141,-211,-143,-139,-145,-148,-132,-148,-150,-219,-154,-156,-155,-148,-224,-141,-147,-227,-144,-157,-161,-231,-165,-161,-165,-168,-161,-157,-159,-166,-162,-157,-228,-265,138,-128,-74,-108,-33,-31,-26,-44,-101,-114,-33,-37,-51,-39,-35,-47,-54,-122,-37,-45,-52,-59,-58,-128,-46,-65,-42,-49,-133,-132,-102,-60,-68,-56,-55,-139,-141,-106,-61,-65,-72,-78,-64,-148,-70,-72,-151,-68,-81,-81,-72,-156,-74,-86,-86,-80,-161,-97,-81,-95,-165,-90,-94,-97,-97,-86,-102,-90,-173,-90,-103,-111,-99,-178,-95,-108,-112,-182,-115,-115,-101,-117,-120,-104,-120,-122,-191,-106,-128,-118,-110,-127,-196,-196,-199,-135,-123,-134,-203,-115,-126,-121,-207,-143,-127,-141,-211,-143,-139,-145,-148,-132,-148,-150,-219,-154,-156,-155,-148,-224,-141,-147,-227,-144,-157,-161,-231,-165,-161,-165,-168,-161,-157,-159,-166,-162,-157,-228,-265,263,-253,-199,-233,-158,-156,-151,-169,-226,-239,-158,-162,-176,-164,-160,-172,-179,-247,-162,-170,-177,-184,-183,-253,-171,-190,-167,-174,-258,-257,-227,-183,-197,-187,-175,-182,-193,-184,-268,-202,-191,-194,-192,-197,-205,-191,-207,-276,-278,-222,-201,-196,-282,-206,-219,-196,-286,-207,-206,-210,-223,-222,-223,-225,-280,-293,-296,-232,-220,-231,-300,-212,-223,-218,-304,-236,-228,-223,-239,-227,-310,-227,-240,-244,-314,-248,-237,-250,-243,-239,-247,-237,-308,-345,-273,-260,-248,-243,-263,-329,-252,-252,-248,-260,-267,-266,-253,-337,-249,-260,-255,-259,-342,-260,-267,-280,-270,-271,-348,-281,-268,-272,-279,-285,-342,-355,-280,-278,-279,-284,-277,-361,-282,-278,-274,-275,-290,-298,-300,-369,-300,-292,-290,-373,-309,-375,-299,-298,-301,-310,-302,-297,-370,-383,-302,-316,-321,-311,-315,-299,-321,-308,-392,-306,-322,-330,-312,-397,-326,-334,-317,-401,-330,-338,-324,-325,-337,-329,-339,-341,-398,-411,-347,-335,-346,-415,-334,-352,-350,-346,-341,-338,-422,-334,-345,-340,-344,-427,-345,-357,-357,-351,-432,-365,-361,-353,-367,-370,-354,-363,-351,-427,-464,-441,-397,-373,-434,-447,-376,-380,-374,-375,-373,-452,-454,-398,-377,-372,-458,-376,-388,-382,-377,-387,-396,-465,-400,-398,-468,-404,-404,-395,-403,-473,-390,-396,-476,-406,-409,-395,-480,-408,-404,-483,-418,-396,-486,-403,-399,-409,-417,-413,-421,-493,37,-5,73,71,-8,75,62,58,-12,62,55,74,64,48,50,-19,45,63,-22,61,48,44,-26,50,37,44,48,-31,33,40,48,41,43,30,37,-25,-38,-63,0,0,109,7,21101,0,0,-2,22208,-2,-5,-1,1205,-1,1169,22202,-2,-4,1,22201,1,-6,1,21202,-2,1,2,21101,0,1162,0,2106,0,-3,21201,-2,1,-2,1106,0,1136,109,-7,2106,0,0,109,6,2102,1,-5,1181,21002,0,1,-2,21102,0,1,-3,21201,-5,1,-5,22208,-3,-2,-1,1205,-1,1229,2201,-5,-3,1204,21001,0,0,1,22101,0,-3,2,21201,-2,0,3,21102,1222,1,0,2106,0,-4,21201,-3,1,-3,1105,1,1192,109,-6,2105,1,0,109,2,21201,-1,0,1,21101,0,1256,2,21101,0,1251,0,1106,0,1174,109,-2,2106,0,0,109,5,22201,-4,-3,-1,22201,-2,-1,-1,204,-1,109,-5,2106,0,0,109,3,2101,0,-2,1280,1006,0,1303,104,45,104,32,1201,-1,66,1291,21001,0,0,1,21101,0,1301,0,1106,0,1234,104,10,109,-3,2106,0,0,0,0,109,2,2102,1,-1,1309,1102,0,1,1308,21101,4601,0,1,21101,13,0,2,21102,1,4,3,21102,1353,1,4,21102,1,1343,0,1106,0,1130,21001,1308,0,-1,109,-2,2105,1,0,59,109,3,1202,-2,1,1360,20008,0,1309,-1,1206,-1,1419,1005,1308,1398,1101,0,1,1308,21008,1309,-1,-1,1206,-1,1387,21101,0,106,1,1105,1,1391,21102,1,92,1,21102,1,1398,0,1105,1,1234,104,45,104,32,1201,-2,1,1408,20101,0,0,1,21101,1417,0,0,1106,0,1234,104,10,109,-3,2106,0,0,109,3,1201,-2,0,1128,21102,34,1,1,21101,0,1441,0,1106,0,1234,1001,1128,0,1447,20101,0,0,1,21102,1,1456,0,1105,1,1234,21102,41,1,1,21102,1,1467,0,1106,0,1234,1001,1128,1,1473,20101,0,0,1,21102,1,1482,0,1106,0,1234,21101,0,46,1,21101,1493,0,0,1105,1,1234,21001,1128,3,1,21101,0,4,2,21102,1,1,3,21102,1273,1,4,21101,1516,0,0,1105,1,1130,20102,1,1128,1,21101,1527,0,0,1106,0,1310,1001,1128,2,1532,21002,0,1,-1,1206,-1,1545,21101,0,1545,0,2106,0,-1,109,-3,2106,0,0,109,0,99,109,2,1102,0,1,1550,21101,4601,0,1,21102,13,1,2,21101,4,0,3,21102,1664,1,4,21102,1582,1,0,1105,1,1130,2,2486,1352,1551,1101,0,0,1552,20102,1,1550,1,21102,1,33,2,21101,1702,0,3,21101,0,1609,0,1105,1,2722,21007,1552,0,-1,1205,-1,1630,20107,0,1552,-1,1205,-1,1637,21101,1630,0,0,1106,0,1752,21102,548,1,1,1105,1,1641,21102,687,1,1,21102,1648,1,0,1106,0,1234,21102,4457,1,1,21102,1659,1,0,1105,1,1424,109,-2,2106,0,0,109,4,21202,-2,-1,-2,2101,0,-3,1675,21008,0,-1,-1,1206,-1,1697,1201,-3,2,1687,20101,-27,0,-3,22201,-3,-2,-3,2001,1550,-3,1550,109,-4,2105,1,0,109,5,21008,1552,0,-1,1206,-1,1747,1201,-3,1901,1716,21002,0,1,-2,1205,-4,1736,20207,-2,1551,-1,1205,-1,1747,1102,1,-1,1552,1106,0,1747,22007,1551,-2,-1,1205,-1,1747,1101,1,0,1552,109,-5,2105,1,0,109,1,21102,826,1,1,21101,0,1765,0,1106,0,1234,20101,0,1550,1,21102,1776,1,0,1106,0,2863,21102,1,1090,1,21101,1787,0,0,1105,1,1234,99,1106,0,1787,109,-1,2106,0,0,109,1,21102,512,1,1,21101,1809,0,0,1106,0,1234,99,1105,1,1809,109,-1,2105,1,0,109,1,1101,0,1,1129,109,-1,2105,1,0,109,1,21102,1,377,1,21101,0,1842,0,1105,1,1234,1105,1,1831,109,-1,2105,1,0,109,1,21101,0,407,1,21101,1863,0,0,1105,1,1234,99,1106,0,1863,109,-1,2106,0,0,109,1,21101,0,452,1,21102,1,1885,0,1105,1,1234,99,1106,0,1885,109,-1,2105,1,0,1941,1947,1953,1958,1965,1972,1978,2623,2514,3150,2746,2962,2854,2944,3059,3164,2869,2566,2522,3029,2456,2878,2436,3145,3193,2644,2715,2828,2566,2960,3035,2621,2719,2648,2890,2605,2797,2544,2890,2837,2281,2468,2418,2450,2487,2125,2505,5,95,108,104,104,23,5,96,91,108,108,1,4,101,105,112,3,6,104,104,106,107,94,-1,6,109,104,109,107,94,-1,5,111,91,100,93,23,5,114,95,108,108,1,109,3,21102,1993,1,0,1105,1,2634,1006,1129,2010,21101,316,0,1,21101,2007,0,0,1105,1,1234,1106,0,2076,21102,1,0,-1,1201,-1,1894,2020,20101,0,0,1,21101,0,0,2,21101,0,0,3,21101,0,2037,0,1105,1,2525,1206,1,2054,1201,-1,1934,2050,21102,2051,1,0,106,0,0,1106,0,2076,21201,-1,1,-1,21207,-1,7,-2,1205,-2,2014,21102,177,1,1,21102,1,2076,0,1106,0,1234,109,-3,2106,0,0,109,3,2001,1128,-2,2089,20101,0,0,-1,1205,-1,2108,21101,0,201,1,21102,1,2105,0,1105,1,1234,1105,1,2119,22102,1,-1,1,21102,2119,1,0,1105,1,1424,109,-3,2106,0,0,0,109,1,1102,1,0,2124,21102,1,4601,1,21102,13,1,2,21102,4,1,3,21101,0,2173,4,21102,2154,1,0,1106,0,1130,1005,2124,2168,21102,1,226,1,21102,1,2168,0,1105,1,1234,109,-1,2105,1,0,109,3,1005,2124,2275,1201,-2,0,2183,20008,0,1128,-1,1206,-1,2275,1201,-2,1,2194,21002,0,1,-1,22101,0,-1,1,21101,0,5,2,21102,1,1,3,21101,2216,0,0,1105,1,2525,1206,1,2275,21102,1,258,1,21101,2230,0,0,1106,0,1234,21201,-1,0,1,21101,0,2241,0,1106,0,1234,104,46,104,10,1102,1,1,2124,1201,-2,0,2256,1102,1,-1,0,1201,-2,3,2262,21002,0,1,-1,1206,-1,2275,21101,2275,0,0,2105,1,-1,109,-3,2106,0,0,0,109,1,1101,0,0,2280,21101,0,4601,1,21101,13,0,2,21101,0,4,3,21102,2329,1,4,21101,2310,0,0,1105,1,1130,1005,2280,2324,21102,1,273,1,21101,0,2324,0,1105,1,1234,109,-1,2106,0,0,109,3,1005,2280,2413,1201,-2,0,2339,21008,0,-1,-1,1206,-1,2413,1201,-2,1,2350,21001,0,0,-1,21202,-1,1,1,21101,0,5,2,21101,1,0,3,21102,2372,1,0,1106,0,2525,1206,1,2413,21102,1,301,1,21101,2386,0,0,1105,1,1234,21202,-1,1,1,21101,0,2397,0,1106,0,1234,104,46,104,10,1101,1,0,2280,1201,-2,0,2412,1002,1128,1,0,109,-3,2106,0,0,109,1,21101,-1,0,1,21101,0,2431,0,1105,1,1310,1205,1,2445,21102,133,1,1,21101,2445,0,0,1105,1,1234,109,-1,2105,1,0,109,1,21101,3,0,1,21102,2463,1,0,1105,1,2081,109,-1,2106,0,0,109,1,21101,0,4,1,21101,2481,0,0,1106,0,2081,109,-1,2106,0,0,53,109,1,21102,1,5,1,21102,2500,1,0,1105,1,2081,109,-1,2106,0,0,109,1,21101,6,0,1,21102,1,2518,0,1105,1,2081,109,-1,2106,0,0,0,0,109,5,2101,0,-3,2523,1101,0,1,2524,21201,-4,0,1,21101,0,2585,2,21102,2550,1,0,1106,0,1174,1206,-2,2576,1202,-4,1,2558,2001,0,-3,2566,101,3094,2566,2566,21008,0,-1,-1,1205,-1,2576,1101,0,0,2524,20101,0,2524,-4,109,-5,2105,1,0,109,5,22201,-4,-3,-4,22201,-4,-2,-4,21208,-4,10,-1,1206,-1,2606,21101,-1,0,-4,201,-3,2523,2615,1001,2615,3094,2615,21001,0,0,-1,22208,-4,-1,-1,1205,-1,2629,1101,0,0,2524,109,-5,2106,0,0,109,4,21101,0,3094,1,21102,1,30,2,21101,0,1,3,21102,1,2706,4,21102,1,2659,0,1105,1,1130,21102,0,1,-3,203,-2,21208,-2,10,-1,1205,-1,2701,21207,-2,0,-1,1205,-1,2663,21207,-3,29,-1,1206,-1,2663,2101,3094,-3,2693,1201,-2,0,0,21201,-3,1,-3,1105,1,2663,109,-4,2106,0,0,109,2,2101,0,-1,2715,1102,-1,1,0,109,-2,2106,0,0,0,109,5,2101,0,-2,2721,21207,-4,0,-1,1206,-1,2739,21102,1,0,-4,22101,0,-4,1,22101,0,-3,2,21102,1,1,3,21102,2758,1,0,1105,1,2763,109,-5,2105,1,0,109,6,21207,-4,1,-1,1206,-1,2786,22207,-5,-3,-1,1206,-1,2786,21202,-5,1,-5,1105,1,2858,21202,-5,1,1,21201,-4,-1,2,21202,-3,2,3,21102,2805,1,0,1106,0,2763,22101,0,1,-5,21102,1,1,-2,22207,-5,-3,-1,1206,-1,2824,21101,0,0,-2,22202,-3,-2,-3,22107,0,-4,-1,1206,-1,2850,21201,-2,0,1,21201,-4,-1,2,21102,2850,1,0,105,1,2721,21202,-3,-1,-3,22201,-5,-3,-5,109,-6,2106,0,0,109,3,21208,-2,0,-1,1205,-1,2902,21207,-2,0,-1,1205,-1,2882,1105,1,2888,104,45,21202,-2,-1,-2,21202,-2,1,1,21102,1,2899,0,1105,1,2909,1106,0,2904,104,48,109,-3,2106,0,0,109,4,21201,-3,0,1,21101,0,10,2,21102,2926,1,0,1106,0,3010,22102,1,1,-2,21201,2,0,-1,1206,-2,2948,22102,1,-2,1,21102,2948,1,0,1105,1,2909,22101,48,-1,-1,204,-1,109,-4,2106,0,0,1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304,8388608,16777216,33554432,67108864,134217728,268435456,536870912,1073741824,2147483648,4294967296,8589934592,17179869184,34359738368,68719476736,137438953472,274877906944,549755813888,1099511627776,2199023255552,4398046511104,8796093022208,17592186044416,35184372088832,70368744177664,140737488355328,281474976710656,562949953421312,1125899906842624,109,8,21102,0,1,-4,21101,0,0,-3,21102,51,1,-2,21201,-2,-1,-2,1201,-2,2959,3033,21001,0,0,-1,21202,-3,2,-3,22207,-7,-1,-5,1205,-5,3059,21201,-3,1,-3,22102,-1,-1,-5,22201,-7,-5,-7,22207,-3,-6,-5,1205,-5,3078,22102,-1,-6,-5,22201,-3,-5,-3,22201,-1,-4,-4,1205,-2,3024,21201,-4,0,-7,21202,-3,1,-6,109,-8,2106,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3131,3143,0,3348,3252,3390,0,11,61,105,95,94,17,50,97,83,78,79,83,108,-19,2,7,-79,-9,-2,2,-83,-11,-7,-86,-3,-16,-7,-11,-6,-21,-21,-94,-30,-96,-25,-19,-23,-31,-101,-29,-25,-104,-21,-34,-38,-108,-39,-34,-32,-33,-31,-114,-43,-47,-35,-49,-105,-120,-69,-43,-123,-49,-56,-57,-47,-128,-40,-51,-46,-50,-133,-51,-63,-63,-57,-138,-69,-58,-62,-65,-143,-79,-69,-63,-68,-148,-79,-68,-82,-83,-63,-81,-77,-85,-145,-158,-75,-88,-92,-162,-91,-85,-89,-97,-167,-96,-104,-87,-171,-106,-104,-105,-97,-176,-94,-109,-114,-104,-112,-114,-169,3259,3268,0,0,3609,3832,3124,8,59,102,104,103,93,87,97,99,79,5,24,20,-50,26,17,31,11,21,-56,30,7,17,16,22,-62,2,14,3,-66,17,4,0,-70,6,-3,11,-9,1,-76,-7,-2,0,-1,1,-82,-18,-2,-16,-86,-4,-12,-16,-19,-19,-8,-17,-5,-95,-28,-24,-28,-29,-31,-19,-33,-25,-20,-105,-39,-28,-32,-30,-28,-28,-98,-113,-67,-33,-116,-52,-36,-50,-120,-37,-50,-54,-35,-94,3355,3363,0,3445,0,3124,0,7,68,97,107,89,93,89,97,26,43,91,73,85,91,85,72,72,76,68,3,78,-6,63,74,60,59,79,57,0,54,67,57,52,50,-5,3397,3404,0,3124,0,0,3517,6,59,107,91,88,90,90,40,38,70,68,58,-12,66,56,-15,68,55,51,-19,47,44,44,50,54,44,58,56,-28,54,39,38,45,-33,50,44,-36,35,27,47,29,-41,38,36,43,24,36,-33,3452,3461,0,0,3882,3348,0,8,72,88,105,104,85,90,87,100,55,29,48,44,63,-20,54,40,-30,34,-32,43,39,49,48,39,31,-39,44,46,31,40,40,44,-46,18,30,19,-50,32,32,12,28,29,17,21,13,-59,24,18,-62,13,15,14,9,-67,-3,7,6,-71,-7,3,-1,0,-7,-63,3524,3532,0,0,3390,0,3698,7,65,89,99,98,108,85,108,76,8,27,27,36,-48,16,32,18,13,-53,18,10,27,-57,8,10,9,17,-62,16,16,19,7,10,5,21,-1,-3,-72,-3,5,7,-76,6,1,-2,-11,3,-10,-10,-6,-14,-59,-87,1,-10,-5,-84,-10,-24,-94,-21,-11,-14,-14,-99,-22,-22,-18,-103,-23,-20,-33,-23,-39,-109,-27,-26,-30,-44,-114,-28,-44,-52,-34,-105,3616,3625,0,0,3763,0,3252,8,75,96,89,96,20,53,83,106,72,11,44,38,37,35,37,38,36,-48,17,29,33,20,-53,-4,14,12,-44,-12,20,23,8,6,-63,-14,4,7,11,0,0,-1,11,-72,4,-5,-7,-3,-10,-5,-1,-11,-81,-17,-5,-16,-85,-4,-18,-17,-4,-14,-26,-10,-93,-12,-26,-23,-19,-30,-30,-31,-19,-102,-26,-35,-37,-33,-40,-35,-31,-41,-97,3705,3728,0,0,3517,0,0,22,65,74,90,87,6,41,86,76,88,70,0,44,63,70,74,79,63,71,57,69,57,58,34,39,81,-4,60,74,73,61,56,72,72,-12,71,65,-15,50,52,-18,68,59,61,53,50,54,46,-26,51,51,53,47,34,44,43,55,-21,3770,3791,0,4062,0,0,3609,20,51,84,80,93,8,62,88,70,84,83,75,79,71,-1,33,66,74,79,63,75,40,32,70,77,-11,57,63,69,54,-16,51,61,-19,69,58,63,-23,63,57,39,53,-28,51,52,38,51,36,44,49,47,-37,41,39,-40,43,30,26,-44,26,33,-16,3839,3847,0,3252,3998,0,0,7,76,108,88,88,97,89,102,34,48,66,69,73,62,62,61,73,3,72,61,77,55,53,-2,-17,34,53,49,68,-15,59,45,-25,39,49,48,-29,39,46,48,51,55,-21,3889,3912,0,0,3941,0,3445,22,50,88,92,7,41,77,83,70,81,77,65,83,67,-3,34,74,79,71,76,56,63,67,28,55,82,79,70,72,78,85,9,-4,68,78,0,75,-9,73,73,61,63,62,-15,71,62,64,56,53,57,49,-9,3948,3962,0,0,0,0,3882,13,54,100,86,103,15,63,98,77,93,94,78,90,90,35,49,68,64,-6,59,61,59,73,-11,53,69,55,-15,49,59,58,-19,64,58,57,-23,59,52,39,49,48,-29,40,48,50,-33,55,44,49,-23,4005,4013,0,0,4309,0,3832,7,76,108,102,104,86,91,88,48,36,55,51,-19,46,58,66,46,59,-25,48,58,55,55,-30,36,47,45,50,30,37,41,-38,38,39,41,27,-43,22,34,42,22,35,-35,-50,-51,-2,16,13,30,26,26,15,27,9,15,27,-49,4069,4081,0,4133,0,3763,0,11,58,98,90,91,95,85,84,96,86,90,82,51,38,59,64,-22,60,45,44,-26,38,-28,58,42,42,52,36,32,44,29,45,30,-39,47,32,42,29,-44,35,30,18,30,34,-50,19,27,29,-54,-4,24,25,15,19,11,7,20,16,9,3,-66,19,-50,-55,4140,4151,0,0,4229,4062,0,10,68,86,106,92,89,82,100,88,93,91,77,6,38,18,36,36,33,-25,-52,-2,30,27,9,21,10,10,8,-47,-62,-15,12,4,-1,16,1,-69,13,14,8,7,2,14,-76,0,-9,-14,3,4,0,-14,-7,-16,-8,-3,-5,-89,-20,-9,-13,-16,-94,-25,-23,-27,-14,-10,-100,-18,-18,-38,-22,-22,-106,-23,-29,-109,-28,-42,-45,-48,-38,-42,-50,-35,-53,-35,-51,-107,4236,4248,0,4384,0,0,4133,11,68,86,102,87,99,102,80,98,92,94,100,60,24,43,39,51,37,-33,31,47,33,-37,27,-39,30,28,45,-43,40,24,30,22,35,18,29,29,17,30,-27,-55,28,15,11,30,-53,21,7,-63,1,11,10,-67,-2,10,6,13,-3,-5,-74,-7,3,10,0,-67,-80,3,-10,-4,1,-14,-14,-73,4316,4328,0,0,0,0,3998,11,72,87,92,87,95,83,84,14,57,77,77,55,34,55,60,-26,56,41,40,-30,38,54,40,34,34,42,30,31,-39,32,28,40,26,-44,34,24,-47,32,33,29,33,27,31,35,25,13,-57,22,20,16,28,15,6,18,-65,2,2,15,4,1,7,-72,14,5,7,-1,-63,4391,4400,0,4457,0,4229,0,8,64,102,98,100,88,88,85,92,56,27,54,51,42,51,49,39,-31,51,36,35,42,47,-37,46,40,-40,31,23,43,25,-45,30,22,22,35,-50,22,32,-53,25,23,-56,27,14,10,-60,-22,11,2,14,19,-66,-28,14,4,-2,-71,11,-4,10,9,-3,1,-7,-65,4464,4484,0,0,0,4384,4556,19,64,81,78,95,91,81,91,95,5,39,75,71,68,75,79,77,70,74,79,71,2,38,-41,42,29,25,-45,32,22,40,35,-50,31,27,26,23,-43,-56,8,-58,21,22,8,21,20,21,17,3,-54,15,0,8,12,1,11,-1,11,-7,-77,-8,-3,-1,-2,0,-83,3,-12,-10,-11,-88,-3,-21,-9,-19,-23,-5,-95,-7,-18,-13,-17,-100,-28,-34,-34,-26,-21,-33,-23,-19,-95,4563,4588,1553,0,4457,0,0,24,56,89,75,88,87,88,84,70,13,50,67,75,79,68,78,66,78,60,-10,27,64,66,65,67,12,53,97,83,93,105,105,87,91,83,25,24,23,3252,4653,2075,0,3998,4662,28,1850,3390,4674,29,1829,3698,4688,16777246,0,4309,4699,31,1872,3348,4707,32,1796,4384,4718,97,0,3609,4728,1073741858,0,3941,4737,2097187,0,3517,4742,37,0,3763,4752,32805,0,4229,4764,65574,0,3882,4777,39,1818,8,103,105,100,86,97,88,96,101,11,98,99,95,102,86,94,15,90,78,98,76,13,92,96,87,89,93,87,97,81,11,86,88,87,87,10,91,86,103,103,87,99,16,84,85,84,7,105,96,102,106,100,98,102,10,91,104,87,84,98,86,16,95,93,81,9,95,111,101,89,101,85,102,82,84,8,96,102,98,100,91,101,83,94,4,95,92,101,94,9,93,107,90,96,19,85,86,92,91,11,89,85,101,93,17,93,80,98,97,81,93,12,95,95,87,90,94,15,80,92,96,95,86,78,19,84,85,76,88,93,8,76,82,74,71,87,84,80,77,64,69,75,65,79]\ncomputer = init_computer(code, [])\nrun(computer)\nmapstr = ''.join(chr(i) for i in computer['outputs'][:-1])\n", "_____no_output_____" ], [ "print(mapstr)\ncomputer['outputs'] = []", "\n\n\n== Hull Breach ==\nYou got in through a hole in the floor here. To keep your ship from also freezing, the hole has been sealed.\n\nDoors here lead:\n- north\n- east\n- south\n\nCommand?\n" ], [ "def run_command(computer, command):\n computer['inputs'] = [ord(c) for c in command + '\\n']\n run(computer)\n print(''.join(chr(i) for i in computer['outputs'][:-1]))\n computer['outputs'] = []", "_____no_output_____" ], [ "run_command(computer, 'north')", "\n\n\n== Kitchen ==\nEverything's freeze-dried.\n\nDoors here lead:\n- north\n- south\n\nItems here:\n- escape pod\n\nCommand?\n" ], [ "run_command(computer, 'north')", "\n\n\n== Passages ==\nThey're a little twisty and starting to look all alike.\n\nDoors here lead:\n- east\n- south\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Hot Chocolate Fountain ==\nSomehow, it's still working.\n\nDoors here lead:\n- east\n- west\n\nItems here:\n- giant electromagnet\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Crew Quarters ==\nThe beds are all too small for you.\n\nDoors here lead:\n- west\n\nItems here:\n- cake\n\nCommand?\n" ], [ "run_command(computer, 'take cake')", "\nYou take the cake.\n\nCommand?\n" ], [ "run_command(computer, 'west')", "\n\n\n== Hot Chocolate Fountain ==\nSomehow, it's still working.\n\nDoors here lead:\n- east\n- west\n\nItems here:\n- giant electromagnet\n\nCommand?\n" ], [ "run_command(computer, 'west')", "\n\n\n== Passages ==\nThey're a little twisty and starting to look all alike.\n\nDoors here lead:\n- east\n- south\n\nCommand?\n" ], [ "run_command(computer, 'south')", "\n\n\n== Kitchen ==\nEverything's freeze-dried.\n\nDoors here lead:\n- north\n- south\n\nItems here:\n- escape pod\n\nCommand?\n" ], [ "run_command(computer, 'south')", "\n\n\n== Hull Breach ==\nYou got in through a hole in the floor here. To keep your ship from also freezing, the hole has been sealed.\n\nDoors here lead:\n- north\n- east\n- south\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Corridor ==\nThe metal walls and the metal floor are slightly different colors. Or are they?\n\nDoors here lead:\n- east\n- south\n- west\n\nItems here:\n- ornament\n\nCommand?\n" ], [ "run_command(computer, 'take ornament')", "\nYou take the ornament.\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Sick Bay ==\nSupports both Red-Nosed Reindeer medicine and regular reindeer medicine.\n\nDoors here lead:\n- east\n- west\n\nItems here:\n- hologram\n\nCommand?\n" ], [ "run_command(computer, 'take hologram')", "\nYou take the hologram.\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Gift Wrapping Center ==\nHow else do you wrap presents on the go?\n\nDoors here lead:\n- north\n- west\n\nItems here:\n- dark matter\n\nCommand?\n" ], [ "run_command(computer, 'take dark matter')", "\nYou take the dark matter.\n\nCommand?\n" ], [ "run_command(computer, 'north')", "\n\n\n== Engineering ==\nYou see a whiteboard with plans for Springdroid v2.\n\nDoors here lead:\n- north\n- south\n\nCommand?\n" ], [ "run_command(computer, 'north')", "\n\n\n== Navigation ==\nStatus: Stranded. Please supply measurements from fifty stars to recalibrate.\n\nDoors here lead:\n- east\n- south\n\nCommand?\n" ], [ "run_command(computer, 'east')", "\n\n\n== Observatory ==\nThere are a few telescopes; they're all bolted down, though.\n\nDoors here lead:\n- north\n- west\n\nItems here:\n- klein bottle\n\nCommand?\n" ], [ "run_command(computer, 'take klein bottle')", "\nYou take the klein bottle.\n\nCommand?\n" ], [ "run_command(computer, 'north')", "\n\n\n== Holodeck ==\nSomeone seems to have left it on the Giant Grid setting.\n\nDoors here lead:\n- north\n- south\n\nItems here:\n- hypercube\n\nCommand?\n" ], [ "run_command(computer, 'take hypercube')", "\nYou take the hypercube.\n\nCommand?\n" ], [ "run_command(computer, 'north')", "\n\n\n== Security Checkpoint ==\nIn the next room, a pressure-sensitive floor will verify your identity.\n\nDoors here lead:\n- south\n- west\n\nCommand?\n" ], [ "run_command(computer, 'west')", "\n\n\n== Pressure-Sensitive Floor ==\nAnalyzing...\n\nDoors here lead:\n- east\n\nA loud, robotic voice says \"Alert! Droids on this ship are heavier than the detected value!\" and you are ejected back to the checkpoint.\n\n\n\n== Security Checkpoint ==\nIn the next room, a pressure-sensitive floor will verify your identity.\n\nDoors here lead:\n- south\n- west\n\nCommand?\n" ], [ "[ord(c) for c in 'passwordPASSWORD']", "_____no_output_____" ], [ "''.join(chr(i-1) for i in [103,105,100,86,97,88,96,101,11,98,99,95,102,86,94,15,90,78,98,76,13,92,96,87,89,93,87,97,81,11,86,88,87,87,10,91,86,103,103,87,99,16,84,85,84,7,105,96,102,106,100,98,102,10,91,104,87,84,98,86,16,95,93,81,9,95,111,101,89,101,85,102,82,84,8,96,102,98,100,91,101,83,94,4,95,92,101,94,9,93,107,90,96,19,85,86,92,91,11,89,85,101,93,17,93,80,98,97,81,93,12,95,95,87,90,94,15,80,92,96,95,86,78,19,84,85,76,88,93,8,76,82,74,71,87,84,80,77,64,69,75,65,79])", "_____no_output_____" ], [ "computer['extend_mem']", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af6359df545f6f8bd464bceaf7b65168745eb52
4,055
ipynb
Jupyter Notebook
python/general/jupyter/jupyter_data_science_model_notebook_template.ipynb
ChristopherHaydenTodd/ctodd-templates
e98fe838c67f0667fb58a974af65b9c078658a41
[ "MIT" ]
null
null
null
python/general/jupyter/jupyter_data_science_model_notebook_template.ipynb
ChristopherHaydenTodd/ctodd-templates
e98fe838c67f0667fb58a974af65b9c078658a41
[ "MIT" ]
null
null
null
python/general/jupyter/jupyter_data_science_model_notebook_template.ipynb
ChristopherHaydenTodd/ctodd-templates
e98fe838c67f0667fb58a974af65b9c078658a41
[ "MIT" ]
null
null
null
22.403315
104
0.558076
[ [ [ "# Jupyter Data Science Notebook Template", "_____no_output_____" ] ], [ [ "Title : jupyter_data_science_model_notebook_template\nAuthor : Christopher Todd\nCreated_at : 2018-01-01\nUpdated_at : 2018-01-01\nDescription: Jupyter Data Science Notebook Template", "_____no_output_____" ], [ "Purpose:\n Imput the Purpose of the Template Here\n\nSteps:\n - Step 1\n - Step 2\n - Step 3\n - Step 4", "_____no_output_____" ] ], [ [ "# Load the libraries\n\nLoad All Python libraries required to complete tasks", "_____no_output_____" ] ], [ [ "# Python Library Imports\n\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n# import statsmodels.api as sm\nimport sklearn\n\nfrom scipy import stats\nfrom pandas import Series, DataFrame\nfrom pylab import rcParams\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics \nfrom sklearn.metrics import roc_curve, auc,\\\n mean_squared_error, r2_score, classification_report,\\\n accuracy_score, precision_score, recall_score,\\\n confusion_matrix, precision_recall_fscore_support\nimport statsmodels.formula.api as sm\nfrom statsmodels.stats.outliers_influence import\\\n variance_inflation_factor ", "_____no_output_____" ], [ "cd ../", "/Users/ctodd200/Development/Personal Github/ctodd_python_data_science\n" ], [ "# Custom Python Library Imports\n\nfrom config import config\nCONFIGS = config.Config.get(env='prod', caller_info=False)", "_____no_output_____" ] ], [ [ "# Prepare Data\n\nPrepare the Datasets with the following actions:\n - Load into Memory\n - Describe the Data (Look for Patterns)\n - Clean Data", "_____no_output_____" ], [ "# Split Data (Test/Train)\n\nSplit data into X and Y values for train and test datasets", "_____no_output_____" ], [ "# Check Model Accuracy\n\nUtilize the test/OOT datasets to determine the accuracy, percision, recall, and error of the model", "_____no_output_____" ] ] ]
[ "markdown", "raw", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "raw", "raw" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
4af63eec2f6dfe38c9d9a3945bfe352a0cdd22fb
787,808
ipynb
Jupyter Notebook
GradCAM.ipynb
SebastianQuispeNaola/PruebaDeConcepto
52da536c834955750711ab1f132801f47cd3e7be
[ "MIT" ]
null
null
null
GradCAM.ipynb
SebastianQuispeNaola/PruebaDeConcepto
52da536c834955750711ab1f132801f47cd3e7be
[ "MIT" ]
null
null
null
GradCAM.ipynb
SebastianQuispeNaola/PruebaDeConcepto
52da536c834955750711ab1f132801f47cd3e7be
[ "MIT" ]
null
null
null
3,597.296804
780,288
0.964155
[ [ [ "# Gradient Class Activation Map", "_____no_output_____" ] ], [ [ "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport os\nimport pandas as pd\nfrom pocovidnet.evaluate_covid19 import Evaluator\nfrom pocovidnet.grad_cam import GradCAM\nfrom pocovidnet.cam import get_class_activation_map\nfrom pocovidnet.model import get_vgg16_model", "_____no_output_____" ], [ "def convolve_faster(img, kernel):\n \"\"\"\n Convolve a 2d img with a kernel, storing the output in the cell\n corresponding the the left or right upper corner\n :param img: 2d numpy array\n :param kernel: kernel (must have equal size and width)\n :param neg: if neg=0, store in upper left corner, if neg=1,\n store in upper right corner\n :return convolved image of same size\n \"\"\"\n k_size = len(kernel)\n # a = np.pad(img, ((0, k_size-1), (0, k_size-1)))\n padded = np.pad(img, ((k_size//2, k_size//2), (k_size//2, k_size//2)))\n\n s = kernel.shape + tuple(np.subtract(padded.shape, kernel.shape) + 1)\n strd = np.lib.stride_tricks.as_strided\n subM = strd(padded, shape=s, strides=padded.strides * 2)\n return np.einsum('ij,ijkl->kl', kernel, subM)", "_____no_output_____" ], [ "path_crossval = \"./image_cross_val\"\nweights_dir = \"./trained_models/model_0\"\ngt_dict = {\"Reg\":2, \"Pne\":1, \"pne\":1, \"Cov\":0}\n\ngradcam = GradCAM()\n\nall_predictions = []\nheatmap_points, predicted, gt_class, overlays, fnames = [], [], [], [], []\n\nfor fold in range(5):\n # load weights of the respective fold model\n print(\"NEW FOLD\", fold)\n # make sure the variable is cleared\n evaluator = None\n # load weights\n evaluator = Evaluator(weights_dir=\"./trained_models/model_16/val\", ensemble=False, split=fold, model_id=\"vgg_16\", num_classes=3)\n # get all names belonging to this fold\n all_images_arr = []\n gt, name = [], []\n for mod in [\"covid\", \"pneumonia\", \"regular\"]:\n for f in os.listdir(os.path.join(path_crossval, \"split\"+str(fold), mod)):\n if f[0]!=\".\":\n img_loaded = cv2.imread(os.path.join(path_crossval, \"split\"+str(fold), mod, f))\n img_preprocc = evaluator.preprocess(img_loaded)[0]\n gt.append(gt_dict[f[:3]])\n all_images_arr.append(img_preprocc)\n name.append(f)\n all_images_arr = np.array(all_images_arr)\n # predicciones\n print(\"process all images in fold\", fold, \"with shape\", all_images_arr.shape)\n fold_preds = evaluator.models[0].predict(all_images_arr)\n class_idx_per_img = np.argmax(fold_preds, axis=1)\n all_predictions.append(fold_preds)\n \n # heatmap \n for i, img in enumerate(all_images_arr):\n overlay, heatmap = gradcam.explain(img, evaluator.models[0], gt[i], return_map=True, image_weight=1, layer_name=\"block5_conv3\", zeroing=0.65, heatmap_weight=0.25) \n \n overlays.append(overlay.astype(int))\n # convolve with big kernel\n convolved_overlay = convolve_faster(heatmap, np.ones((19,19)))\n x_coord, y_coord = divmod(np.argmax(convolved_overlay.flatten()), len(convolved_overlay[0]))\n \n heatmap_points.append([x_coord, y_coord])\n predicted.append(class_idx_per_img[i])\n gt_class.append(gt[i])\n fnames.append(name[i]) ", "NEW FOLD 0\nModel restored. Class mappings are ['covid', 'pneumonia', 'regular']\nprocess all images in fold 0 with shape (269, 224, 224, 3)\nNEW FOLD 1\nModel restored. Class mappings are ['covid', 'pneumonia', 'regular']\nprocess all images in fold 1 with shape (350, 224, 224, 3)\nNEW FOLD 2\nModel restored. Class mappings are ['covid', 'pneumonia', 'regular']\nprocess all images in fold 2 with shape (231, 224, 224, 3)\nNEW FOLD 3\nModel restored. Class mappings are ['covid', 'pneumonia', 'regular']\nprocess all images in fold 3 with shape (305, 224, 224, 3)\nNEW FOLD 4\nModel restored. Class mappings are ['covid', 'pneumonia', 'regular']\nprocess all images in fold 4 with shape (234, 224, 224, 3)\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nw=12\nh=15\nfig=plt.figure(figsize=(15, 12))\n#fig.tight_layout(h_pad=20)\ncolumns = 4\nrows = 3\nitem_img = [overlays[4], overlays[7], overlays[16], overlays[97],\n overlays[778], overlays[1041], overlays[1061], overlays[1333],\n overlays[819], overlays[849], overlays[843], overlays[1075]]\n\nitem_img_fp = []\n\nfig, big_axes = plt.subplots(figsize=(15, 12), nrows=3, ncols=1, sharey=True)\nclasses_sp=['saludables', 'con neumonía', 'con COVID-19']\nfor idx, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\"Grad-CAM en LUS de pacientes %s \\n\\n\\n\" % classes_sp[idx - 1], fontsize=16, pad=-30)\n big_ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)\n \n big_ax._frameon = False\nfor i in range(1, columns*rows +1):\n img = item_img[i-1]\n fig.add_subplot(rows, columns, i)\n fig.tight_layout(h_pad=1)\n plt.imshow(img)\n \nplt.show()\n\nfig.savefig(\"grad-cam-img.pdf\",bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af650351785bac212b3ed54a2c9f445f0ebc584
31,433
ipynb
Jupyter Notebook
FinalProject_SpeechRecognition/src/notebooks/04 Training - DS-CNN .ipynb
lev1khachatryan/ASDS_DL
ca00ce7b4cfb722f9bce545820cdb661ff8b643e
[ "MIT" ]
3
2019-12-14T04:48:44.000Z
2020-02-13T18:35:17.000Z
FinalProject_SpeechRecognition/src/notebooks/04 Training - DS-CNN .ipynb
lev1khachatryan/ASDS_DL
ca00ce7b4cfb722f9bce545820cdb661ff8b643e
[ "MIT" ]
1
2020-01-10T08:21:59.000Z
2020-01-10T08:21:59.000Z
FinalProject_SpeechRecognition/src/notebooks/04 Training - DS-CNN .ipynb
lev1khachatryan/ASDS_DL
ca00ce7b4cfb722f9bce545820cdb661ff8b643e
[ "MIT" ]
null
null
null
82.718421
2,437
0.681545
[ [ [ "Derived from https://arxiv.org/pdf/1711.07128.pdf\n", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport sys\nimport os\nimport tensorflow as tf\n\n# sys.path.append(\"../libs\")\nsys.path.insert(1, '../')\n\nfrom libs import input_data\nfrom libs import models\nfrom libs import trainer\nfrom libs import freeze", "WARNING: Logging before flag parsing goes to stderr.\nW0121 21:36:26.453793 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:13: The name tf.logging.set_verbosity is deprecated. Please use tf.compat.v1.logging.set_verbosity instead.\n\nW0121 21:36:26.456787 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:13: The name tf.logging.INFO is deprecated. Please use tf.compat.v1.logging.INFO instead.\n\n" ], [ "flags=tf.app.flags", "_____no_output_____" ], [ "flags=tf.app.flags\n#Important Directories\nflags.DEFINE_string('data_dir','..\\\\..\\\\_inputs\\\\raw','Train Data Folder')\nflags.DEFINE_string('summaries_dir','..\\\\..\\\\summaries','Summaries Folder')\nflags.DEFINE_string('train_dir','..\\\\..\\\\logs&checkpoint','Directory to write event logs and checkpoint')\nflags.DEFINE_string('models_dir','..\\\\..\\\\models','Models Folder')\n#Task Specific Parameters\nflags.DEFINE_string('wanted_words','yes,no,up,down,left,right,on,off,stop,go','Wanted Words')\nflags.DEFINE_float('validation_percentage',10,'Validation Percentage')\nflags.DEFINE_float('testing_percentage',10,'Testing Percentage')\nflags.DEFINE_integer('sample_rate',16000,'Sample Rate')\nflags.DEFINE_integer('clip_duration_ms',1000,'Clip Duration in ms')\nflags.DEFINE_float('window_size_ms',40,'How long each spectogram timeslice is')\nflags.DEFINE_float('window_stride_ms',20.0,'How far to move in time between frequency windows.')\nflags.DEFINE_integer('dct_coefficient_count',40,'How many bins to use for the MFCC fingerprint')\nflags.DEFINE_float('time_shift_ms',100.0,'Range to randomly shift the training audio by in time.')\n\nFLAGS=flags.FLAGS", "_____no_output_____" ], [ "model_architecture='ds_cnn'\nstart_checkpoint=None\nlogging_interval=10\neval_step_interval=1000\nsave_step_interval=1\nsilence_percentage=10.0\nunknown_percentage=10.0\nbackground_frequency=0.8\nbackground_volume=0.3\nlearning_rate='0.0005,0.0001,0.00002' #Always seperated by comma, trains with each of the learning rate for the given number of iterations\ntrain_steps='1000,1000,1000' #Declare the training steps for which the learning rates will be used\nbatch_size=256\nmodel_size_info=[5, 64, 10, 4, 2, 2, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1, 64, 3, 3, 1, 1]", "_____no_output_____" ], [ "remaining_args = FLAGS([sys.argv[0]] + [flag for flag in sys.argv if flag.startswith(\"--\")])\nassert(remaining_args == [sys.argv[0]])", "_____no_output_____" ], [ "train_dir=os.path.join(FLAGS.data_dir,'train','audio')", "_____no_output_____" ], [ "model_settings = models.prepare_model_settings(\n len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),\n FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,\n FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)\naudio_processor = input_data.AudioProcessor(\n train_dir, silence_percentage, unknown_percentage,\n FLAGS.wanted_words.split(','), FLAGS.validation_percentage,\n FLAGS.testing_percentage, model_settings,use_silence_folder=True)", "..\\..\\_inputs\\raw\\train\\audio\\*\\*.wav\n" ], [ "def get_train_data(args):\n sess=args\n time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)\n train_fingerprints, train_ground_truth = audio_processor.get_data(\n batch_size, 0, model_settings,background_frequency,\n background_volume, time_shift_samples, 'training', sess)\n return train_fingerprints,train_ground_truth\n\ndef get_val_data(args):\n '''\n Input: (sess,offset)\n '''\n sess,i=args\n validation_fingerprints, validation_ground_truth = (\n audio_processor.get_data(batch_size, i, model_settings, 0.0,\n 0.0, 0, 'validation', sess))\n return validation_fingerprints,validation_ground_truth", "_____no_output_____" ], [ "# def get_test_data(args):\n# '''\n# Input: (sess,offset)\n# '''\n# sess,i=args\n# test_fingerprints, test_ground_truth = audio_processor.get_data(\n# batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)\n# return test_fingerprints,test_ground_truth", "_____no_output_____" ], [ "def main(_):\n sess=tf.InteractiveSession()\n # Placeholders\n fingerprint_size = model_settings['fingerprint_size']\n label_count = model_settings['label_count']\n fingerprint_input = tf.placeholder(\n tf.float32, [None, fingerprint_size], name='fingerprint_input')\n ground_truth_input = tf.placeholder(\n tf.float32, [None, label_count], name='groundtruth_input')\n set_size = audio_processor.set_size('validation')\n label_count = model_settings['label_count']\n \n # Create Model\n \n logits, dropout_prob = models.create_model(\n fingerprint_input,\n model_settings,\n model_architecture,\n model_size_info=model_size_info,\n is_training=True)\n #Start Training\n extra_args=(dropout_prob,label_count,batch_size,set_size)\n trainer.train(sess,logits,fingerprint_input,ground_truth_input,get_train_data,\n get_val_data,train_steps,learning_rate,eval_step_interval, logging_interval=logging_interval,\n start_checkpoint=start_checkpoint,checkpoint_interval=save_step_interval,\n model_name=model_architecture,train_dir=FLAGS.train_dir,\n summaries_dir=FLAGS.summaries_dir,args=extra_args)", "_____no_output_____" ], [ "tf.app.run(main=main)", "W0121 21:37:19.374380 11212 deprecation_wrapper.py:119] From ..\\libs\\models.py:602: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n\nW0121 21:37:20.228034 11212 deprecation.py:323] From ..\\libs\\trainer.py:56: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\nW0121 21:37:20.247946 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:57: The name tf.summary.scalar is deprecated. Please use tf.compat.v1.summary.scalar instead.\n\nW0121 21:37:20.250938 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:62: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nW0121 21:37:20.650902 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:67: The name tf.confusion_matrix is deprecated. Please use tf.math.confusion_matrix instead.\n\nW0121 21:37:20.722713 11212 deprecation.py:323] From ..\\libs\\trainer.py:70: get_or_create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease switch to tf.train.get_or_create_global_step\nW0121 21:37:20.726666 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:71: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n\nW0121 21:37:20.728660 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:72: The name tf.train.Saver is deprecated. Please use tf.compat.v1.train.Saver instead.\n\nW0121 21:37:20.808479 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:73: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.\n\nW0121 21:37:20.810441 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:75: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.\n\nW0121 21:37:21.058775 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:87: The name tf.logging.info is deprecated. Please use tf.compat.v1.logging.info instead.\n\nI0121 21:37:21.059775 11212 trainer.py:87] Training from step: 1 \nW0121 21:37:21.059775 11212 deprecation_wrapper.py:119] From ..\\libs\\trainer.py:91: The name tf.train.write_graph is deprecated. Please use tf.io.write_graph instead.\n\nI0121 21:37:27.965297 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-1\"\nI0121 21:37:35.248434 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-2\"\nI0121 21:37:42.901761 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-3\"\nI0121 21:37:50.203076 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-4\"\nI0121 21:37:56.923125 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-5\"\nI0121 21:38:03.512285 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-6\"\nW0121 21:38:03.558171 11212 deprecation.py:323] From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\training\\saver.py:960: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse standard file APIs to delete files with this prefix.\nI0121 21:38:10.175363 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-7\"\nI0121 21:38:16.533807 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-8\"\nI0121 21:38:23.337042 11212 trainer.py:171] Saving to \"..\\..\\logs&checkpoint\\ds_cnn\\ckpt-9\"\n" ], [ "# save_checkpoint='..\\\\..\\\\logs&checkpoint\\\\ds_cnn\\\\ckpt-899'", "_____no_output_____" ], [ "# save_path=os.path.join(FLAGS.models_dir,model_architecture,'%s.pb'%os.path.basename(save_checkpoint))\n# freeze.freeze_graph(FLAGS,model_architecture,save_checkpoint,save_path,model_size_info=model_size_info)", "_____no_output_____" ], [ "# save_path=os.path.join(FLAGS.models_dir,model_architecture,'%s-small-batched.pb'%os.path.basename(save_checkpoint))\n# freeze.freeze_graph(FLAGS,model_architecture,save_checkpoint,save_path,batched=True,model_size_info=model_size_info)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af6579a8b132dc0ba1ddbfc761203d0d74148f7
16,863
ipynb
Jupyter Notebook
Importing the large data sets to psql and computing their metrics.ipynb
DhineshVijayakumar/amazon-review-hackondata2017
a89687c240210b2f682654cb88be553c165af91e
[ "MIT" ]
null
null
null
Importing the large data sets to psql and computing their metrics.ipynb
DhineshVijayakumar/amazon-review-hackondata2017
a89687c240210b2f682654cb88be553c165af91e
[ "MIT" ]
null
null
null
Importing the large data sets to psql and computing their metrics.ipynb
DhineshVijayakumar/amazon-review-hackondata2017
a89687c240210b2f682654cb88be553c165af91e
[ "MIT" ]
null
null
null
31.816981
915
0.508035
[ [ [ "# Importing the large datasets to a postgresql server and computing their metrics\n\nIt is not possible to load the larger data sets in the memory of a local machine therefeore an alternative is to import them to a psql table and query them from there. By adding the right indices this can make the queries fast enough. After this import one can extract some basic statistics using sql and also export smaller portions of the data which can be handled by spark or pandas on a local machine.", "_____no_output_____" ], [ "## Helper functions", "_____no_output_____" ] ], [ [ "import timeit\ndef stopwatch(function):\n start_time = timeit.default_timer()\n result = function()\n print('Elapsed time: %i sec' % int(timeit.default_timer() - start_time))\n return result", "_____no_output_____" ] ], [ [ "## Unzipping the data and converting it to csv format\n\nUnfortunately psql does not support an import of record json files therefore we need to convert the data sets to csv. We use here the command line tool [json2csv](https://github.com/jehiah/json2csv).\n\n**WARNING:** The following two commands will run for a while, especially the second one. You can expect approximately **1 minute per GB** of unzipped data.", "_____no_output_____" ] ], [ [ "start_time = timeit.default_timer()\n\n!ls ./data/large-datasets/*.gz | grep -Po '.*(?=.gz)' | xargs -I {} gunzip {}.gz\n\nprint('Elapsed time: %i sec' % int(timeit.default_timer() - start_time))", "Elapsed time: 178 sec\n" ], [ "start_time = timeit.default_timer()\n\n!ls ./data/large-datasets/*.json | xargs sed -i 's/|/?/g;s/\\u0000/?/g'\n\nprint('Elapsed time: %i sec' % int(timeit.default_timer() - start_time))", "Elapsed time: 275 sec\n" ], [ "start_time = timeit.default_timer()\n\n!ls ./data/large-datasets/*.json | grep -Po '.*(?=.json)' | xargs -I {} json2csv -p -d '|' -k asin,helpful,overall,reviewText,reviewTime,reviewerID,reviewerName,summary,unixReviewTime -i {}.json -o {}.csv\n!rm ./data/large-datasets/*.json\n\nprint('Elapsed time: %i sec' % int(timeit.default_timer() - start_time))", "2017/08/28 00:16:16 ERROR Decoding JSON at line 8789077: invalid character '?' in string escape code\n{\"reviewerID\": \"AWGWD8R8PLWH3\", \"asin\": \"B00JL1H75A\", \"reviewerName\": \"Kim\", \"helpful\": [0, 0], \"reviewText\": \"Love this series! I even like Elijah. He has been through a lot and it sounds like his father is some kind of crazy tyrant or despot. I'm thinking along the lines of Saddom Hussein. He really sounds evil. I like Elijah and Natalie together and really hope she can turn him around. I do think there is a chance for them. I just got the 4 th book and now I have to decide if I can wait til the box set comes out. Great Book, and it is hot. He is a little finish and takes over Natalie's life and is very controlling. But as their relationship sort of grows, you get little glimps\\?s of something deeper and better for them. You should read this book. It is really good.\", \"overall\": 5.0, \"summary\": \"Wow\", \"unixReviewTime\": 1403827200, \"reviewTime\": \"06 27, 2014\"}\nElapsed time: 735 sec\n" ] ], [ [ "## Importing the data in psql\n\nTo import the data in psql we create a table with the appropriate shape and import form the csv files generated above.", "_____no_output_____" ], [ "### Some preparation to run psql transactions and queries in python", "_____no_output_____" ] ], [ [ "import psycopg2 as pg\nimport pandas as pd\n\ndb_conf = { \n 'user': 'mariosk',\n 'database': 'amazon_reviews'\n}\n\nconnection_factory = lambda: pg.connect(user=db_conf['user'], database=db_conf['database'])\n\ndef transaction(*statements):\n try:\n connection = connection_factory()\n cursor = connection.cursor()\n for statement in statements:\n cursor.execute(statement)\n connection.commit()\n cursor.close()\n except pg.DatabaseError as error:\n print(error)\n finally:\n if connection is not None:\n connection.close()\n \ndef query(statement):\n try:\n connection = connection_factory()\n cursor = connection.cursor()\n cursor.execute(statement)\n \n header = [ description[0] for description in cursor.description ]\n rows = cursor.fetchall()\n \n cursor.close()\n return pd.DataFrame.from_records(rows, columns=header)\n except (Exception, pg.DatabaseError) as error:\n print(error)\n return None\n finally:\n if connection is not None:\n connection.close()", "_____no_output_____" ] ], [ [ "### Creating tables for with indices for the large datasets", "_____no_output_____" ] ], [ [ "import re\n\ntable_names = [ re.search('reviews_(.*)_5.csv', filename).group(1) \n for filename \n in sorted(os.listdir('./data/large-datasets'))\n if not filename.endswith('json') ]", "_____no_output_____" ], [ "def create_table(table_name):\n transaction(\n 'create table %s (asin text, helpful text, overall double precision, reviewText text, reviewTime text, reviewerID text, reviewerName text, summary text, unixReviewTime int);' % table_name,\n 'create index {0}_asin ON {0} (asin);'.format(table_name),\n 'create index {0}_overall ON {0} (overall);'.format(table_name),\n 'create index {0}_reviewerID ON {0} (reviewerID);'.format(table_name),\n 'create index {0}_unixReviewTime ON {0} (unixReviewTime);'.format(table_name))\n\nfor table_name in table_names:\n create_table(table_name)", "_____no_output_____" ] ], [ [ "### Importing the datasets to psql\n\n**WARNING:** The following command will take long time to complete. Estimate ~1 minute for each GB of csv data.", "_____no_output_____" ] ], [ [ "start_time = timeit.default_timer()\n\n!ls ./data/large-datasets | grep -Po '(?<=reviews_).*(?=_5.csv)' | xargs -I {} psql -U mariosk -d amazon_reviews -c \"\\copy {} from './data/large-datasets/reviews_{}_5.csv' with (format csv, delimiter '|', header true);\"\n\nprint('Elapsed time: %i sec' % int(timeit.default_timer() - start_time))", "COPY 8898040\nCOPY 1097592\nCOPY 1689188\nCOPY 1697533\nElapsed time: 871 sec\n" ] ], [ [ "## Querying the metrics", "_____no_output_____" ] ], [ [ "def average_reviews_per_product(table_name):\n return (query('''\n with distinct_products as (select count(distinct asin) as products from {0}),\n reviews_count as (select cast(count(*) as double precision) as reviews from {0})\n select reviews / products as reviews_per_product\n from distinct_products cross join reviews_count\n '''.format(table_name))\n .rename(index={0: table_name.replace('_', ' ')}))", "_____no_output_____" ], [ "def average_reviews_per_reviewer(table_name):\n return (query('''\n with distinct_reviewers as (select count(distinct reviewerID) as reviewers from {0}),\n reviews_count as (select cast(count(*) as double precision) as reviews from {0})\n select reviews / reviewers as reviews_per_reviewer\n from distinct_reviewers cross join reviews_count\n '''.format(table_name))\n .rename(index={ 0: table_name.replace('_', ' ')}))", "_____no_output_____" ], [ "def percentages_per_rating(table_name):\n return (query('''\n with rating_counts as (select overall, count(overall) as rating_count from {0} group by overall),\n reviews_count as (select cast(count(*) as double precision) as reviews from {0})\n select cast(overall as int) as dataset_name, rating_count / reviews as row\n from rating_counts cross join reviews_count\n '''.format(table_name))\n .set_index('dataset_name')\n .sort_index()\n .transpose()\n .rename(index={'row': table_name.replace('_', ' ')}))", "_____no_output_____" ], [ "def number_of_reviews(table_name):\n return (query('''\n select count(*) as number_of_reviews from {0}\n '''.format(table_name))\n .rename(index={ 0: table_name.replace('_', ' ') }))", "_____no_output_____" ], [ "def all_metrics(table_name):\n print(table_name)\n \n return pd.concat(\n [ f(table_name) \n for f\n in [ percentages_per_rating, number_of_reviews, average_reviews_per_product, average_reviews_per_reviewer ]], \n axis=1)", "_____no_output_____" ], [ "metrics = stopwatch(lambda: pd.concat([ all_metrics(table) for table in table_names ]))", "Books\nCDs_and_Vinyl\nElectronics\nMovies_and_TV\nElapsed time: 146 sec\n" ], [ "metrics.index.name = 'dataset_name'\nmetrics.to_csv('./metadata/large-datasets-evaluation-metrics.csv')", "_____no_output_____" ], [ "metrics", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af67474e47f4e947d728df1042fb72769757303
3,018
ipynb
Jupyter Notebook
notebook/procs-grpc.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
3
2020-01-11T13:55:38.000Z
2020-08-25T22:34:15.000Z
notebook/procs-grpc.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
null
null
null
notebook/procs-grpc.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
1
2021-01-01T05:21:44.000Z
2021-01-01T05:21:44.000Z
22.029197
92
0.550696
[ [ [ "import sys\n\nfrom google.protobuf.json_format import MessageToJson\nfrom client_wrapper import ServiceClient\n\nimport nlpserv_pb2 as nlp_messages\nimport nlpserv_pb2_grpc as nlp_service\n\nclient=ServiceClient(nlp_service, 'NlpProcsStub', 'localhost', 10052)", "_____no_output_____" ], [ "from utils import dump\nrequest = nlp_messages.NlTokenizerRequest(text=nlp_messages.NlText(text=\"这里是北京\"))\nresponse = client.Tokenizer(request)\n# print(response)\nfor t in response.tokens:\n # print(MessageToJson(resp))\n print(t.text, t.label, t.length)", "这里 r 2\n是 v 1\n北京 ns 2\n" ], [ "from client_wrapper import ServiceClient\nimport hello_pb2_grpc\nimport hello_pb2\nfrom google.protobuf.empty_pb2 import Empty\nc=ServiceClient(hello_pb2_grpc, 'HelloServiceStub', 'localhost', 50051)\nc.SayHello(Empty())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4af6849585532826ebc60000f800a262e2b8d913
19,344
ipynb
Jupyter Notebook
docs/source/example.ipynb
skizunov/nbconvert
16d696b2eb9c1969cb0f3205931aaa779302042d
[ "BSD-3-Clause-Clear" ]
1
2019-09-03T01:16:03.000Z
2019-09-03T01:16:03.000Z
docs/source/example.ipynb
skizunov/nbconvert
16d696b2eb9c1969cb0f3205931aaa779302042d
[ "BSD-3-Clause-Clear" ]
3
2020-03-24T17:31:36.000Z
2021-02-02T22:09:23.000Z
docs/source/example.ipynb
skizunov/nbconvert
16d696b2eb9c1969cb0f3205931aaa779302042d
[ "BSD-3-Clause-Clear" ]
1
2021-10-19T21:26:23.000Z
2021-10-19T21:26:23.000Z
132.493151
16,850
0.895782
[ [ [ "# Example notebook", "_____no_output_____" ], [ "### Markdown cells\n\nThis is an example notebook that can be converted with `nbconvert` to different formats. This is an example of a markdown cell.", "_____no_output_____" ], [ "### LaTeX Equations\n\nHere is an equation:\n\n$$\ny = \\sin(x)\n$$", "_____no_output_____" ], [ "### Code cells", "_____no_output_____" ] ], [ [ "print(\"This is a code cell that produces some output\")", "This is a code cell that produces some output\n" ] ], [ [ "### Inline figures", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4af686ae90d5bf211666b1a2be2971811778876d
80,057
ipynb
Jupyter Notebook
shark_tank_india.ipynb
Ava100rav/shark-tank-india
1ffb8f236b1e082f088c3d6c518b119c665d70e4
[ "MIT" ]
null
null
null
shark_tank_india.ipynb
Ava100rav/shark-tank-india
1ffb8f236b1e082f088c3d6c518b119c665d70e4
[ "MIT" ]
null
null
null
shark_tank_india.ipynb
Ava100rav/shark-tank-india
1ffb8f236b1e082f088c3d6c518b119c665d70e4
[ "MIT" ]
null
null
null
48.024595
4,961
0.407934
[ [ [ "<a href=\"https://colab.research.google.com/github/Ava100rav/shark-tank-india/blob/main/shark_tank_india.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from google.colab import drive", "_____no_output_____" ], [ "drive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport matplotlib.style ", "_____no_output_____" ], [ "pd.set_option('display.max_columns',None)\npd.set_option('display.max_rows',None)", "_____no_output_____" ], [ "df=pd.read_csv('/content/drive/MyDrive/shark-tank-india/Shark-Tank-India-Dataset.csv')\ndf.head(2)", "_____no_output_____" ], [ "df.tail(2)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 117 entries, 0 to 116\nData columns (total 28 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 episode_number 117 non-null int64 \n 1 pitch_number 117 non-null int64 \n 2 brand_name 117 non-null object \n 3 idea 117 non-null object \n 4 deal 117 non-null int64 \n 5 pitcher_ask_amount 117 non-null float64\n 6 ask_equity 117 non-null float64\n 7 ask_valuation 117 non-null float64\n 8 deal_amount 117 non-null float64\n 9 deal_equity 117 non-null float64\n 10 deal_valuation 117 non-null float64\n 11 ashneer_present 117 non-null int64 \n 12 anupam_present 117 non-null int64 \n 13 aman_present 117 non-null int64 \n 14 namita_present 117 non-null int64 \n 15 vineeta_present 117 non-null int64 \n 16 peyush_present 117 non-null int64 \n 17 ghazal_present 117 non-null int64 \n 18 ashneer_deal 117 non-null int64 \n 19 anupam_deal 117 non-null int64 \n 20 aman_deal 117 non-null int64 \n 21 namita_deal 117 non-null int64 \n 22 vineeta_deal 117 non-null int64 \n 23 peyush_deal 117 non-null int64 \n 24 ghazal_deal 117 non-null int64 \n 25 total_sharks_invested 117 non-null int64 \n 26 amount_per_shark 117 non-null float64\n 27 equity_per_shark 117 non-null float64\ndtypes: float64(8), int64(18), object(2)\nmemory usage: 25.7+ KB\n" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df['deal'].value_counts()", "_____no_output_____" ], [ "sns.countplot(df['deal'])", "/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "df.drop(['episode_number','pitch_number','brand_name'],axis=1,inplace=True)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df['idea'].unique", "_____no_output_____" ], [ "df.drop('idea',axis=1,inplace=True)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "print(df.corr())", " deal pitcher_ask_amount ask_equity \\\ndeal 1.000000 -0.106927 -0.076438 \npitcher_ask_amount -0.106927 1.000000 0.470198 \nask_equity -0.076438 0.470198 1.000000 \nask_valuation -0.151695 0.911174 0.260603 \ndeal_amount 0.736002 -0.080719 -0.177955 \ndeal_equity 0.609043 -0.067233 0.288341 \ndeal_valuation 0.409138 -0.045988 -0.310331 \nashneer_present -0.020728 0.040759 -0.047404 \nanupam_present NaN NaN NaN \naman_present -0.020728 0.040759 -0.047404 \nnamita_present 0.065484 0.029396 0.030743 \nvineeta_present -0.161874 -0.104309 0.068224 \npeyush_present 0.084100 0.052044 -0.065365 \nghazal_present -0.059761 -0.049727 0.035071 \nashneer_deal 0.373509 -0.044099 -0.088782 \nanupam_deal 0.454369 -0.050348 0.013584 \naman_deal 0.461369 -0.053685 -0.140932 \nnamita_deal 0.430422 -0.045850 0.027449 \nvineeta_deal 0.342997 -0.036575 0.096837 \npeyush_deal 0.489898 -0.052946 -0.026575 \nghazal_deal 0.225630 -0.024308 0.071451 \ntotal_sharks_invested 0.759342 -0.084288 -0.028387 \namount_per_shark 0.653882 -0.071281 -0.165141 \nequity_per_shark 0.461046 -0.050873 0.238074 \n\n ask_valuation deal_amount deal_equity \\\ndeal -0.151695 0.736002 0.609043 \npitcher_ask_amount 0.911174 -0.080719 -0.067233 \nask_equity 0.260603 -0.177955 0.288341 \nask_valuation 1.000000 -0.090163 -0.155565 \ndeal_amount -0.090163 1.000000 0.370487 \ndeal_equity -0.155565 0.370487 1.000000 \ndeal_valuation 0.017869 0.636411 -0.082694 \nashneer_present 0.053817 -0.026851 -0.093570 \nanupam_present NaN NaN NaN \naman_present 0.053817 -0.026851 -0.093570 \nnamita_present 0.042539 0.065587 -0.065976 \nvineeta_present -0.032090 -0.082982 0.014315 \npeyush_present -0.050228 -0.004062 0.088750 \nghazal_present -0.060577 -0.057153 0.062931 \nashneer_deal -0.052650 0.392853 0.127167 \nanupam_deal -0.095488 0.335148 0.267897 \naman_deal -0.085926 0.482279 0.150735 \nnamita_deal -0.075664 0.443929 0.136270 \nvineeta_deal -0.068814 0.315109 0.359560 \npeyush_deal -0.089131 0.384539 0.399927 \nghazal_deal -0.050633 0.188542 0.188478 \ntotal_sharks_invested -0.140668 0.695950 0.432880 \namount_per_shark -0.066631 0.793392 0.359031 \nequity_per_shark -0.118541 0.239212 0.897922 \n\n deal_valuation ashneer_present anupam_present \\\ndeal 0.409138 -0.020728 NaN \npitcher_ask_amount -0.045988 0.040759 NaN \nask_equity -0.310331 -0.047404 NaN \nask_valuation 0.017869 0.053817 NaN \ndeal_amount 0.636411 -0.026851 NaN \ndeal_equity -0.082694 -0.093570 NaN \ndeal_valuation 1.000000 0.018973 NaN \nashneer_present 0.018973 1.000000 NaN \nanupam_present NaN NaN NaN \naman_present 0.018973 1.000000 NaN \nnamita_present 0.073284 -0.141843 NaN \nvineeta_present 0.008492 -0.387059 NaN \npeyush_present -0.138948 -0.252768 NaN \nghazal_present -0.082732 -0.823754 NaN \nashneer_deal 0.206179 0.205939 NaN \nanupam_deal 0.219510 -0.005886 NaN \naman_deal 0.360149 0.246972 NaN \nnamita_deal 0.178616 -0.025346 NaN \nvineeta_deal -0.001066 -0.108422 NaN \npeyush_deal 0.138179 -0.143856 NaN \nghazal_deal -0.002742 -0.475201 NaN \ntotal_sharks_invested 0.319298 -0.026656 NaN \namount_per_shark 0.581751 -0.049810 NaN \nequity_per_shark -0.080662 -0.102568 NaN \n\n aman_present namita_present vineeta_present \\\ndeal -0.020728 0.065484 -0.161874 \npitcher_ask_amount 0.040759 0.029396 -0.104309 \nask_equity -0.047404 0.030743 0.068224 \nask_valuation 0.053817 0.042539 -0.032090 \ndeal_amount -0.026851 0.065587 -0.082982 \ndeal_equity -0.093570 -0.065976 0.014315 \ndeal_valuation 0.018973 0.073284 0.008492 \nashneer_present 1.000000 -0.141843 -0.387059 \nanupam_present NaN NaN NaN \naman_present 1.000000 -0.141843 -0.387059 \nnamita_present -0.141843 1.000000 -0.283176 \nvineeta_present -0.387059 -0.283176 1.000000 \npeyush_present -0.252768 -0.184927 -0.504627 \nghazal_present -0.823754 0.172191 0.469871 \nashneer_deal 0.205939 -0.001957 -0.127833 \nanupam_deal -0.005886 0.018596 -0.108356 \naman_deal 0.246972 0.043412 -0.234098 \nnamita_deal -0.025346 0.155022 -0.150433 \nvineeta_deal -0.108422 -0.051660 0.337100 \npeyush_deal -0.143856 0.037427 -0.254899 \nghazal_deal -0.475201 0.081264 0.221751 \ntotal_sharks_invested -0.026656 0.073785 -0.137834 \namount_per_shark -0.049810 0.066861 -0.048368 \nequity_per_shark -0.102568 -0.114103 0.028488 \n\n peyush_present ghazal_present ashneer_deal \\\ndeal 0.084100 -0.059761 0.373509 \npitcher_ask_amount 0.052044 -0.049727 -0.044099 \nask_equity -0.065365 0.035071 -0.088782 \nask_valuation -0.050228 -0.060577 -0.052650 \ndeal_amount -0.004062 -0.057153 0.392853 \ndeal_equity 0.088750 0.062931 0.127167 \ndeal_valuation -0.138948 -0.082732 0.206179 \nashneer_present -0.252768 -0.823754 0.205939 \nanupam_present NaN NaN NaN \naman_present -0.252768 -0.823754 0.205939 \nnamita_present -0.184927 0.172191 -0.001957 \nvineeta_present -0.504627 0.469871 -0.127833 \npeyush_present 1.000000 0.306848 0.010581 \nghazal_present 0.306848 1.000000 -0.142857 \nashneer_deal 0.010581 -0.142857 1.000000 \nanupam_deal 0.095532 -0.016971 0.203653 \naman_deal -0.002776 -0.251628 0.311859 \nnamita_deal 0.073610 0.005846 0.116921 \nvineeta_deal -0.194332 0.163984 0.220354 \npeyush_deal 0.314426 0.048795 0.219578 \nghazal_deal 0.144814 0.471940 -0.024079 \ntotal_sharks_invested 0.122518 0.014639 0.573341 \namount_per_shark -0.037216 -0.049405 0.107673 \nequity_per_shark 0.083101 0.040184 -0.049699 \n\n anupam_deal aman_deal namita_deal vineeta_deal \\\ndeal 0.454369 0.461369 0.430422 0.342997 \npitcher_ask_amount -0.050348 -0.053685 -0.045850 -0.036575 \nask_equity 0.013584 -0.140932 0.027449 0.096837 \nask_valuation -0.095488 -0.085926 -0.075664 -0.068814 \ndeal_amount 0.335148 0.482279 0.443929 0.315109 \ndeal_equity 0.267897 0.150735 0.136270 0.359560 \ndeal_valuation 0.219510 0.360149 0.178616 -0.001066 \nashneer_present -0.005886 0.246972 -0.025346 -0.108422 \nanupam_present NaN NaN NaN NaN \naman_present -0.005886 0.246972 -0.025346 -0.108422 \nnamita_present 0.018596 0.043412 0.155022 -0.051660 \nvineeta_present -0.108356 -0.234098 -0.150433 0.337100 \npeyush_present 0.095532 -0.002776 0.073610 -0.194332 \nghazal_present -0.016971 -0.251628 0.005846 0.163984 \nashneer_deal 0.203653 0.311859 0.116921 0.220354 \nanupam_deal 1.000000 0.211158 0.134733 0.185069 \naman_deal 0.211158 1.000000 0.294019 0.024582 \nnamita_deal 0.134733 0.294019 1.000000 0.142601 \nvineeta_deal 0.185069 0.024582 0.142601 1.000000 \npeyush_deal 0.324617 0.120692 0.151757 -0.028006 \nghazal_deal 0.139591 -0.141494 0.247520 0.442232 \ntotal_sharks_invested 0.609817 0.549698 0.559506 0.464893 \namount_per_shark 0.088905 0.323914 0.232283 0.061331 \nequity_per_shark 0.063640 0.007305 0.024287 0.113549 \n\n peyush_deal ghazal_deal total_sharks_invested \\\ndeal 0.489898 0.225630 0.759342 \npitcher_ask_amount -0.052946 -0.024308 -0.084288 \nask_equity -0.026575 0.071451 -0.028387 \nask_valuation -0.089131 -0.050633 -0.140668 \ndeal_amount 0.384539 0.188542 0.695950 \ndeal_equity 0.399927 0.188478 0.432880 \ndeal_valuation 0.138179 -0.002742 0.319298 \nashneer_present -0.143856 -0.475201 -0.026656 \nanupam_present NaN NaN NaN \naman_present -0.143856 -0.475201 -0.026656 \nnamita_present 0.037427 0.081264 0.073785 \nvineeta_present -0.254899 0.221751 -0.137834 \npeyush_present 0.314426 0.144814 0.122518 \nghazal_present 0.048795 0.471940 0.014639 \nashneer_deal 0.219578 -0.024079 0.573341 \nanupam_deal 0.324617 0.139591 0.609817 \naman_deal 0.120692 -0.141494 0.549698 \nnamita_deal 0.151757 0.247520 0.559506 \nvineeta_deal -0.028006 0.442232 0.464893 \npeyush_deal 1.000000 0.203965 0.560000 \nghazal_deal 0.203965 1.000000 0.394771 \ntotal_sharks_invested 0.560000 0.394771 1.000000 \namount_per_shark 0.198726 0.004715 0.293030 \nequity_per_shark 0.311324 0.025510 0.138429 \n\n amount_per_shark equity_per_shark \ndeal 0.653882 0.461046 \npitcher_ask_amount -0.071281 -0.050873 \nask_equity -0.165141 0.238074 \nask_valuation -0.066631 -0.118541 \ndeal_amount 0.793392 0.239212 \ndeal_equity 0.359031 0.897922 \ndeal_valuation 0.581751 -0.080662 \nashneer_present -0.049810 -0.102568 \nanupam_present NaN NaN \naman_present -0.049810 -0.102568 \nnamita_present 0.066861 -0.114103 \nvineeta_present -0.048368 0.028488 \npeyush_present -0.037216 0.083101 \nghazal_present -0.049405 0.040184 \nashneer_deal 0.107673 -0.049699 \nanupam_deal 0.088905 0.063640 \naman_deal 0.323914 0.007305 \nnamita_deal 0.232283 0.024287 \nvineeta_deal 0.061331 0.113549 \npeyush_deal 0.198726 0.311324 \nghazal_deal 0.004715 0.025510 \ntotal_sharks_invested 0.293030 0.138429 \namount_per_shark 1.000000 0.407074 \nequity_per_shark 0.407074 1.000000 \n" ], [ "\n# Copy all the predictor variables into X dataframe\nX = df.drop('deal', axis=1)\ny=df['deal']", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n# Split X and y into training and test set in 65:35 ratio\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35 , random_state=10)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n", "/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,\n" ], [ "y_pred = classifier.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report, confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint (\"Confusion Matrix : \\n\", cm)\n", "Confusion Matrix : \n [[16 0]\n [ 3 22]]\n" ], [ "print (classification_report(y_test, y_pred))", " precision recall f1-score support\n\n 0 0.84 1.00 0.91 16\n 1 1.00 0.88 0.94 25\n\n accuracy 0.93 41\n macro avg 0.92 0.94 0.93 41\nweighted avg 0.94 0.93 0.93 41\n\n" ], [ "from sklearn.metrics import classification_report, confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint (\"Confusion Matrix : \\n\", cm)", "Confusion Matrix : \n [[16 0]\n [ 3 22]]\n" ], [ "from sklearn.metrics import accuracy_score\nprint (\"Accuracy : \", accuracy_score(y_test, y_pred))", "Accuracy : 0.926829268292683\n" ], [ "# Import necessary modules\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Split into training and test set\nX_train, X_test, y_train, y_test = train_test_split(\n\t\t\tX, y, test_size = 0.2, random_state=42)\n\nknn = KNeighborsClassifier(n_neighbors=7)\n\nknn.fit(X_train, y_train)\n\n# Calculate the accuracy of the model\nprint(knn.score(X_test, y_test))\n", "0.875\n" ], [ "from sklearn.tree import DecisionTreeClassifier\n# Splitting the dataset into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 100)\n\n# Function to perform training with giniIndex.\ndef train_using_gini(X_train, X_test, y_train):\n \n # Creating the classifier object\n clf_gini = DecisionTreeClassifier(criterion = \"gini\",\n random_state = 100,max_depth=3, min_samples_leaf=5)\n \n # Performing training\n clf_gini.fit(X_train, y_train)\n return clf_gini\n\n# Function to perform training with entropy.\ndef tarin_using_entropy(X_train, X_test, y_train):\n \n # Decision tree with entropy\n clf_entropy = DecisionTreeClassifier(\n criterion = \"entropy\", random_state = 100,\n max_depth = 3, min_samples_leaf = 5)\n \n # Performing training\n clf_entropy.fit(X_train, y_train)\n return clf_entropy\n\n# Function to make predictions\ndef prediction(X_test, clf_object):\n \n # Predicton on test with giniIndex\n y_pred = clf_object.predict(X_test)\n print(\"Predicted values:\")\n print(y_pred)\n return y_pred\n\n# Function to calculate accuracy\ndef cal_accuracy(y_test, y_pred):\n \n print(\"Confusion Matrix: \",\n confusion_matrix(y_test, y_pred))\n \n print (\"Accuracy : \",\n accuracy_score(y_test,y_pred)*100)\n \n print(\"Report : \",\n classification_report(y_test, y_pred)) ", "_____no_output_____" ], [ "clf_gini = train_using_gini(X_train, X_test, y_train)\n", "_____no_output_____" ], [ "clf_entropy = tarin_using_entropy(X_train, X_test, y_train)", "_____no_output_____" ], [ " # Operational Phase\nprint(\"Results Using Gini Index:\")\n \n# Prediction using gini\ny_pred_gini = prediction(X_test, clf_gini)\ncal_accuracy(y_test, y_pred_gini)", "Results Using Gini Index:\nPredicted values:\n[1 0 1 1 0 0 1 1 1 1 1 0 1 1 1 0 0 1 1 0 1 0 1 0 1 1 0 1 1 0 1 1 0 0 0 1]\nConfusion Matrix: [[14 0]\n [ 0 22]]\nAccuracy : 100.0\nReport : precision recall f1-score support\n\n 0 1.00 1.00 1.00 14\n 1 1.00 1.00 1.00 22\n\n accuracy 1.00 36\n macro avg 1.00 1.00 1.00 36\nweighted avg 1.00 1.00 1.00 36\n\n" ], [ "print(\"Results Using Entropy:\")\n# Prediction using entropy\ny_pred_entropy = prediction(X_test, clf_entropy)\ncal_accuracy(y_test, y_pred_entropy)", "Results Using Entropy:\nPredicted values:\n[1 0 1 1 0 0 1 1 1 1 1 0 1 1 1 0 0 1 1 0 1 0 1 0 1 1 0 1 1 0 1 1 0 0 0 1]\nConfusion Matrix: [[14 0]\n [ 0 22]]\nAccuracy : 100.0\nReport : precision recall f1-score support\n\n 0 1.00 1.00 1.00 14\n 1 1.00 1.00 1.00 22\n\n accuracy 1.00 36\n macro avg 1.00 1.00 1.00 36\nweighted avg 1.00 1.00 1.00 36\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af690ea6f1a4f02fcbdd57275d7943c5042ea3c
288,540
ipynb
Jupyter Notebook
_notebooks/2020-07-06-e-is-for-eda.ipynb
educatorsRlearners/an-a-z-of-machine-learning
dd699d3cde887094441005e0422b8669d8829575
[ "Apache-2.0" ]
2
2020-07-07T13:52:50.000Z
2021-04-05T12:49:04.000Z
_notebooks/2020-07-06-e-is-for-eda.ipynb
educatorsRlearners/an-a-z-of-machine-learning
dd699d3cde887094441005e0422b8669d8829575
[ "Apache-2.0" ]
2
2021-07-16T17:57:22.000Z
2021-10-29T18:10:39.000Z
_notebooks/2020-07-06-e-is-for-eda.ipynb
educatorsRlearners/an-a-z-of-machine-learning
dd699d3cde887094441005e0422b8669d8829575
[ "Apache-2.0" ]
null
null
null
128.468388
62,744
0.803327
[ [ [ "# \"E is for Exploratory Data Analysis: Categorical Data\"\n> What is Exploratory Data Analysis (EDA), why is it done, and how do we do it in Python?\n\n- toc: false \n- badges: True\n- comments: true\n- categories: [E]\n- hide: False\n- image: images/e-is-for-eda-text/alphabet-close-up-communication-conceptual-278887.jpg", "_____no_output_____" ], [ "## _What is **Exploratory Data Analysis(EDA)**?_\nWhile I answered these questions in the [last post](https://educatorsrlearners.github.io/an-a-z-of-machine-learning/e/2020/06/15/e-is-for-eda.html), since [all learning is repetition](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=224340), I'll do it again :grin: \n\nEDA is an ethos for how we scrutinize data including, but not limited to: \n- what we look for (i.e. shapes, trends, outliers) \n- the approaches we employ (i.e. [five-number summary](https://www.statisticshowto.com/how-to-find-a-five-number-summary-in-statistics/), visualizations)\n- and the decisions we reach{% fn 1 %} \n\n## _Why is it done?_\nTwo main reasons: \n\n1. If we collected the data ourselves, we need to know if our data suits our needs or if we need to collect more/different data. \n\n2. If we didn't collect the data ourselves, we need to interrogate the data to answer the \"5 W's\"\n- __What__ kind of data do we have (i.e. numeric, categorical)?\n- __When__ was the data collected? There could be more recent data which we could collect which would better inform our model.\n- __How__ much data do we have? Also, how was the data collected? \n- __Why__ was the data collected? The original motivation could highlight potential areas of bias in the data. \n- __Who__ collected the data? \n\nSome of these questions can't necessarily be answered by looking at the data alone which is fine because _[nothing comes from nothing](http://parmenides.me/nothing-comes-from-nothing/)_; someone will know the answers so all we have to do is know where to look and whom to ask. \n\n## _How do we do it in Python?_\nAs always, I'll follow the steps outlined in [_Hands-on Machine Learning with Scikit-Learn, Keras & TensorFlow_](https://github.com/ageron/handson-ml/blob/master/ml-project-checklist.md)\n\n### Step 1: Frame the Problem\n\"Given a set of features, can we determine how old someone needs to be to read a book?\" \n\n### Step 2: Get the Data\nWe'll be using the same dataset as in the [previous post](https://educatorsrlearners.github.io/an-a-z-of-machine-learning/e/2020/06/15/e-is-for-eda.html). \n\n### Step 3: Explore the Data to Gain Insights (i.e. EDA)\nAs always, import the essential libraries, then load the data. ", "_____no_output_____" ] ], [ [ "#hide\nimport warnings; warnings.simplefilter('ignore')", "_____no_output_____" ], [ "#For data manipulation\nimport pandas as pd\nimport numpy as np\n\n#For visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport missingno as msno \n\nurl = 'https://raw.githubusercontent.com/educatorsRlearners/book-maturity/master/csv/book_info_complete.csv'\n\ndf = pd.read_csv(url)", "_____no_output_____" ] ], [ [ "To review,\n\n***How much data do we have?*** ", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ] ], [ [ "- 23 features\n- one target\n- 5,816 observations\n\n***What type of data do we have?*** ", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5816 entries, 0 to 5815\nData columns (total 24 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 title 5816 non-null object \n 1 description 5816 non-null object \n 2 plot 5816 non-null object \n 3 csm_review 5816 non-null object \n 4 need_to_know 5816 non-null object \n 5 par_rating 2495 non-null float64\n 6 kids_rating 3026 non-null float64\n 7 csm_rating 5816 non-null int64 \n 8 Author 5468 non-null object \n 9 Genre 5816 non-null object \n 10 Topics 3868 non-null object \n 11 Book type 5816 non-null object \n 12 Publisher 5675 non-null object \n 13 Publication date 5816 non-null object \n 14 Publisher's recommended age(s) 4647 non-null object \n 15 Number of pages 5767 non-null float64\n 16 Available on 3534 non-null object \n 17 Last updated 5816 non-null object \n 18 Illustrator 2490 non-null object \n 19 Authors 348 non-null object \n 20 Awards 68 non-null object \n 21 Publishers 33 non-null object \n 22 Award 415 non-null object \n 23 Illustrators 61 non-null object \ndtypes: float64(3), int64(1), object(20)\nmemory usage: 1.1+ MB\n" ] ], [ [ "Looks like mostly categorical with some numeric. \n\nLets take a closer look. ", "_____no_output_____" ] ], [ [ "df.head().T", "_____no_output_____" ] ], [ [ "Again, I collected the data so I know the target is `csm_rating` which is the minimum age Common Sense Media (CSM) says a reader should be for the given book.\n\nAlso, we have essentially three types of features:\n- Numeric \n - `par_rating` : Ratings of the book by parents\n - `kids_rating` : Ratings of the book by children \n - :dart:`csm_rating` : Ratings of the books by Common Sense Media\n - `Number of pages` : Length of the book \n - `Publisher's recommended age(s)`: Self explanatory\n \n\n- Date\n - `Publication date` : When the book was published\n - `Last updated`: When the book's information was updated on the website\n\nwith the rest of the features being categorical and text; these features will be our focus for today. \n\n#### Step 3.1 Housekeeping \nClean the feature names to make inspection easier. {% fn 3 %}", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ], [ "df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ], [ [ "Much better. \n\nNow lets subset the data frame so we only have the features of interest. \n\nGiven there are twice as many text features compared to non-text features, and the fact that I'm ~~lazy~~ efficient, I'll create a list of the features I ***don't*** want ", "_____no_output_____" ] ], [ [ "numeric = ['par_rating', 'kids_rating', 'csm_rating', 'number_of_pages', \"publisher's_recommended_ages\", \"publication_date\", \"last_updated\"]", "_____no_output_____" ] ], [ [ "and use it to keep the features I ***do*** want. ", "_____no_output_____" ] ], [ [ "df_strings = df.drop(df[numeric], axis=1)", "_____no_output_____" ] ], [ [ "_Voila!_", "_____no_output_____" ] ], [ [ "df_strings.head().T", "_____no_output_____" ] ], [ [ "Clearly, the non-numeric data falls into two groups:\n- text \n - `description`\n - `plot`\n - `csm_review`\n - `need_to_know`\n- categories \n - `author`/`authors`\n - `genre`\n - `award`/`awards`\n - etc.\n \nLooking at the output above, so many questions come to mind: \n\n1. How many missing values do we have?\n2. How long are the descriptions? \n3. What's the difference between `csm_review` and `need_to_know`?\n4. Similarly, what the difference between `description` and `plot`?\n5. How many different authors do we have in the dataset?\n6. How many types of books do we have? \n\nand I'm sure more will arise once we start. \n\nWhere to start? Lets answer the easiest questions first :grin:\n\n## Categories\n\n#### ***How many missing values do we have?*** \nA cursory glance at the output above indicates there are potentially a ton of missing values; lets inspect this hunch visually. ", "_____no_output_____" ] ], [ [ "msno.bar(df_strings, sort='descending');", "_____no_output_____" ] ], [ [ "Hunch confirmed: 10 the 17 columns are missing values with some being practically empty. \n\nTo get a precise count, we can use `sidetable`.{% fn 2 %} \n", "_____no_output_____" ] ], [ [ "import sidetable\n\ndf_strings.stb.missing(clip_0=True, style=True)", "_____no_output_____" ] ], [ [ "OK, we have lots of missing values and several columns which appear to be measuring similar features (i.e., authors, illustrators, publishers, awards) so lets inspect these features in pairs. \n\n### `author` and `authors` \nEvery book has an author, even if the author is \"[Anonymous](https://bookshop.org/a/9791/9781538718469),\" so then why do we essentially have two columns for the same thing? \n\n:thinking: `author` is for books with a single writer whereas `authors` is for books with multiple authors like [_Good Omens_](https://bookshop.org/a/9791/9780060853983).\n\nLet's test that theory. ", "_____no_output_____" ] ], [ [ "msno.matrix(df_strings.loc[:, ['author', 'authors']]);", "_____no_output_____" ] ], [ [ "*Bazinga!* \n\nWe have a perfect correlation between missing data for `author` and `authors` but lets' have a look just in case. ", "_____no_output_____" ] ], [ [ "df_strings.loc[df_strings['author'].isna() & df_strings[\"authors\"].notna(), ['title', 'author', 'authors']].head()", "_____no_output_____" ], [ "df_strings.loc[df_strings['author'].notna() & df_strings[\"authors\"].isna(), ['title', 'author', 'authors']].head()", "_____no_output_____" ], [ "df_strings.loc[df_strings['author'].notna() & df_strings[\"authors\"].notna(), ['title', 'author', 'authors']].head()", "_____no_output_____" ] ], [ [ "My curiosity is satiated.\n\nNow the question is how to successfully merge the two columns? \n\nWe could replace the `NaN` in `author` with the:\n- values in `authors`\n- word `multiple`\n- first author in `authors`\n- more/most popular of the authors in `authors`\n\nand I'm sure I could come up with even more if I thought about/Googled it but the key is to understand that no matter what we choose, it will have consequences when we build our model{% fn 3 %}. \n\nNext question which comes to mind is:\n\n:thinking: ***How many different authors are there?***", "_____no_output_____" ] ], [ [ "df_strings.loc[:, 'author'].nunique()", "_____no_output_____" ] ], [ [ "Wow! Nearly half our our observations contain a unique name meaning this feature has [high cardinality](https://www.kdnuggets.com/2016/08/include-high-cardinality-attributes-predictive-model.html).\n\n:thinking: ***Which authors are most represented in the data set?*** \n\nLets create a [frequency table](https://www.mathsteacher.com.au/year8/ch17_stat/03_freq/freq.htm) to find out. ", "_____no_output_____" ] ], [ [ "author_counts = df_strings.loc[:, [\"title\", 'author']].groupby('author').count().reset_index()\nauthor_counts.sort_values('title', ascending=False).head(10)", "_____no_output_____" ] ], [ [ "Given that I've scraped the data from a website focusing on children, teens, and young adults, the results above only make sense; authors like [Dr. Seuss](https://bookshop.org/contributors/dr-seuss), [Eoin Coifer](https://bookshop.org/contributors/eoin-colfer-20dba4fd-138e-477e-bca5-75b9fa9bfe2f), and [Lemony Snicket](https://bookshop.org/books?keywords=lemony+snicket) are famous children's authors whereas [Rick Riordan](https://bookshop.org/books?keywords=percy+jackson), [Walter Dean Myers](https://bookshop.org/books?keywords=Walter+Dean+Myers) occupy the teen/young adult space and [Neil Gaiman](https://bookshop.org/contributors/neil-gaiman) writes across ages. \n\n:thinking: ***How many authors are only represented once?*** \n\nThat's easy to check.", "_____no_output_____" ] ], [ [ "from matplotlib.ticker import FuncFormatter\n\nax = author_counts['title'].value_counts(normalize=True).nlargest(5).plot.barh()\nax.invert_yaxis();\n\n#Set the x-axis to a percentage\nax.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x))) ", "_____no_output_____" ] ], [ [ "Wow! So approximately 60% of the authors have one title in our data set. \n\n**Why does that matter?**\n\nWhen it comes time to build our model we'll need to either [label encode](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html), [one-hot encode](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html), or [hash](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.FeatureHasher.html) this feature and whichever we decide to do will end up effecting the model profoundly due to the high [cardinality](https://pkghosh.wordpress.com/2017/10/09/combating-high-cardinality-features-in-supervised-machine-learning/) of this feature; however, we'll deal with all this another time :grin:. \n\n### `illustrator` and `illustrators`\n\nMissing values can be quite informative. \n\n:thinking: What types of books typically have illustrators? \n:bulb: Children's books! \n\nTherefore, if a book's entries for both `illustrator` and `illustrators` is blank, that *probably* means that book doesn't have illustrations which would mean it is *more likely* to be for older children.\n\nLet's test this theory in the simplest way I can think of :smile: ", "_____no_output_____" ] ], [ [ "#Has an illustrator\ndf.loc[df['illustrator'].notna() | df['illustrators'].notna(), ['csm_rating']].hist();", "_____no_output_____" ], [ "#Doesn't have an illustrator\ndf.loc[df['illustrators'].isna() & df[\"illustrator\"].isna(), ['csm_rating']].hist();", "_____no_output_____" ] ], [ [ ":bulb: *Who* the illustrator is doesn't matter as much as *whether* there is an illustrator.\n\nLooks like when I do some feature engineering I'll need to create a `has_illustrator` feature.", "_____no_output_____" ], [ "### `book_type` and `genre`\n\nThese two features should be relatively straightforward but we'll have a quick look anyway. \n\n`book_type` should be easy because, after a cursory inspection using `head` above, I'd expect to only see 'fiction' or 'non-fiction' but I'll double check. ", "_____no_output_____" ] ], [ [ "ax_book_type = df_strings['book_type'].value_counts().plot.barh();\nax_book_type.invert_yaxis()", "_____no_output_____" ] ], [ [ "Good! The only values I have are the ones I expected but the ratio is highly skewed. \n\n:thinking: What impact will this have on our model?\n\n`genre` (e.g. fantasy, romance, sci-fi) is a *far* broader topic than `booktype` but how many different genres are represented in the data set? ", "_____no_output_____" ] ], [ [ "df_strings['genre'].nunique()", "_____no_output_____" ] ], [ [ ":roll_eyes: Great\n\nWhat's the breakdown?", "_____no_output_____" ] ], [ [ "ax_genre = df_strings['genre'].value_counts().plot.barh();\nax_genre.invert_yaxis()", "_____no_output_____" ] ], [ [ "That's not super useful but what if I took 10 most common genres? ", "_____no_output_____" ] ], [ [ "ax_genre_10 = df_strings['genre'].value_counts(normalize=True).nlargest(10).plot.barh();\nax_genre_10.invert_yaxis()\n\n#Set the x axis to percentage\nax_genre_10.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x))) ", "_____no_output_____" ] ], [ [ "Hmmm. Looks like approximately half the books fall into one of three genres. \n\n:bulb: To reduce dimensionality, recode any genre outside of the top 10 as 'other'.\n\nWill save that idea for the feature engineering stage. ", "_____no_output_____" ], [ "### `award` and `awards`\n\nSince certain awards (e.g. [The Caldecott Medal](https://cloviscenter.libguides.com/children/caldecott#:~:text=The%20Medal%20shall%20be%20awarded,the%20illustrations%20be%20original%20work.)) are only awarded to children's books whereas others, namely [The RITA Award](https://en.wikipedia.org/wiki/RITA_Award#Winners) is only for \"mature\" readers. \n\n:thinking: Will knowing if a work is an award winner provide insight? \n:thinking: Which awards are represented? ", "_____no_output_____" ] ], [ [ "award_ax = df_strings['award'].value_counts().plot.barh()\naward_ax.invert_yaxis();", "_____no_output_____" ], [ "awards_ax = df_strings['awards'].str.split(\",\").explode().str.strip().value_counts().plot.barh()\nawards_ax.invert_yaxis()", "_____no_output_____" ] ], [ [ "Hmmmmm. The Caldecott Medal is for picture books so that should mean the target readers are very young; however, we've already seen that \"picture books\" is the second most common value in `genre` so being a Caldecott Medal winner won't add much. Also, to be eligible for the other awards, a book needs to be aimed a t children 14 or below so that doesn't really tell us much either. \n\nConclusion: drop this feature. \n\nWhile I could keep going and analyze `publisher`, `publishers`, and `available_on`, I'd be using the exact same techniques as above so, instead, time to move on to...\n\n## Text\n\n### `description`, `plot`, `csm_review`, `need_to_know`\n\nNow for some REALLY fun stuff! \n\n:thinking: How long are each of these observations? \n\nTrying to be as efficient as possible, I'll: \n- make a list of the features I want", "_____no_output_____" ] ], [ [ "variables = ['description', 'plot', 'csm_review', 'need_to_know']", "_____no_output_____" ] ], [ [ "- write a function to:\n - convert the text to lowercase \n - tokenize the text and remove [stop words](https://en.wikipedia.org/wiki/Stop_words)\n - identify the length of each feature", "_____no_output_____" ] ], [ [ "from nltk import word_tokenize\nfrom nltk.corpus import stopwords\n\nstop = stopwords.words('english')\n\ndef text_process(df, feature): \n df.loc[:, feature+'_tokens'] = df.loc[:, feature].apply(str.lower)\n df.loc[:, feature+'_tokens'] = df.loc[:, feature+'_tokens'].apply(lambda x: [item for item in x.split() if item not in stop])\n df.loc[:, feature+'_len'] = df.loc[:, feature+'_tokens'].apply(len) \n return df", "_____no_output_____" ] ], [ [ "- loop through the list of variables saving it to the data frame", "_____no_output_____" ] ], [ [ "for var in variables: \n df_text = text_process(df_strings, var)", "_____no_output_____" ], [ "df_text.iloc[:, -8:].head()", "_____no_output_____" ] ], [ [ ":thinking: `description` seems to be significantly shorter than the other three. \n\nLet's plot them to investigate. ", "_____no_output_____" ] ], [ [ "len_columns = df_text.columns.str.endswith('len')\ndf_text.loc[:,len_columns].hist();\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Yep - `description` is significantly shorter but how do the other three compare. ", "_____no_output_____" ] ], [ [ "columns = ['plot_len', 'need_to_know_len', 'csm_review_len']\ndf_text[columns].plot.box()\nplt.xticks(rotation='vertical');", "_____no_output_____" ] ], [ [ "Hmmm. Lots of outliers for `csm_review` but, in general, the three features are of similar lengths.\n\n### Next Steps\n\nWhile I could create [word clouds](https://www.datacamp.com/community/tutorials/wordcloud-python) to visualize the most frequent words for each feature, or calculate the [sentiment](https://towardsdatascience.com/a-complete-exploratory-data-analysis-and-visualization-for-text-data-29fb1b96fb6a) of each feature, my stated goal is to identify how old someone should be to read a book and not whether a review is good or bad.\n\nTo that end, my curiosity about these features is satiated so I'm ready to move on to another chapter. ", "_____no_output_____" ], [ "## Summary\n\n - :ballot_box_with_check: numeric data \n - :ballot_box_with_check: categorical data\n - :black_square_button: images (book covers)\n\nTwo down; one to go! \n\nGoing forward, my key points to remember are: \n\n### What type of categorical data do I have?\nThere is a huge difference between ordered (i.e. \"bad\", \"good\", \"great\") and truly nominal data that has no order/ranking like different genres; just because ***I*** prefer science fiction to fantasy, it doesn't mean it actually ***is*** superior. \n\n### Are missing values really missing? \nSeveral of the features had missing values which were, in fact, not truly missing; for example, the `award` and `awards` features were mostly blank for a very good reason: the book didn't win one of the four awards recognized by Common Sense Media. \n\nIn conclusion, both of the points above can be summarized simply by as \"be sure to get to know your data.\" \n\nHappy coding!", "_____no_output_____" ], [ "#### Footnotes\n{{ 'Adapted from [_Engineering Statistics Handbook_](https://www.itl.nist.gov/div898/handbook/eda/section1/eda11.htm)' | fndetail: 1 }} \n{{ 'Be sure to check out this excellent [post](https://beta.deepnote.com/article/sidetable-pandas-methods-you-didnt-know-you-needed) by Jeff Hale for more examples on how to use this package' | fndetail: 2 }} \n{{ 'See this post on [Smarter Ways to Encode Categorical Data](https://towardsdatascience.com/smarter-ways-to-encode-categorical-data-for-machine-learning-part-1-of-3-6dca2f71b159)' | fndetail: 3 }} \n{{ 'Big *Thank You* to [Chaim Gluck](https://medium.com/@chaimgluck1/working-with-pandas-fixing-messy-column-names-42a54a6659cd) for providing this tip' | fndetail: 4 }}", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4af6b9d2552565e091096f159f190c0f54246ff0
562,753
ipynb
Jupyter Notebook
sequential/annotated_prtr.ipynb
mlpc-ucsd/PRTR
64970b88e204fbdba757303747fe7370540aa09a
[ "Apache-2.0" ]
115
2021-04-15T02:53:20.000Z
2022-03-30T06:19:40.000Z
sequential/annotated_prtr.ipynb
w-sugar/prtr
43ec7413712aad3e88eb54f76f39395e6fa66d48
[ "Apache-2.0" ]
12
2021-04-29T13:50:57.000Z
2022-03-27T15:30:46.000Z
sequential/annotated_prtr.ipynb
w-sugar/prtr
43ec7413712aad3e88eb54f76f39395e6fa66d48
[ "Apache-2.0" ]
25
2021-04-16T01:47:15.000Z
2022-03-31T08:28:03.000Z
1,833.071661
548,476
0.956249
[ [ [ "from IPython.display import Image\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, random\nfrom scipy.optimize import linear_sum_assignment\nfrom utils import NestedTensor, nested_tensor_from_tensor_list, MLP\n \nImage(filename=\"figs/model.png\", retina=True)", "_____no_output_____" ] ], [ [ "This notebook provides a Pytorch implementation for the sequential variant of PRTR (Pose Regression TRansformers) in [Pose Recognition with Cascade Transformers](https://arxiv.org/abs/2104.06976). \n\nIt is intended to provide researchers interested in sequential PRTR with a concrete understanding that only code can deliver. It can also be used as a starting point for end-to-end top-down pose estimation research.", "_____no_output_____" ] ], [ [ "class PRTR_sequential(nn.Module):\n def __init__(self, backbone, transformer, transformer_kpt, level, x_res=10, y_res=10):\n super().__init__()\n self.backbone = backbone\n self.transformer = transformer\n hidden_dim = transformer.d_model\n self.class_embed = nn.Linear(hidden_dim, 2)\n self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.query_embed = nn.Embedding(100, hidden_dim)\n self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)\n self.transformer_kpt = transformer_kpt\n x_interpolate = torch.linspace(-1.25, 1.25, x_res, requires_grad=False).unsqueeze(0) # [1, x_res], ANNOT ?(1)\n y_interpolate = torch.linspace(-1.25, 1.25, y_res, requires_grad=False).unsqueeze(0) # [1, y_res]\n self.register_buffer(\"x_interpolate\", x_interpolate)\n self.register_buffer(\"y_interpolate\", y_interpolate)\n self.x_res = x_res\n self.y_res = y_res\n self.level = level\n mask = torch.zeros(1, y_res, x_res, requires_grad=False) # [1, y_res, x_res]\n self.register_buffer(\"mask\", mask)\n self.build_pe()", "_____no_output_____" ] ], [ [ "Class `PRTR_sequential` needs the following arguments:\n+ backbone: a customizable CNN backbone which returns a pyramid of feature maps with different spatial size\n+ transformer: a customizable Transformer for person detection (1st Transformer)\n+ transformer_kpt: a customizable Transformer for keypoint detection (2nd Transformer)\n+ level: from which layers of pyramid we will extract features\n+ x_res: the width of STN-cropped featrure map fed to 2nd Transformer\n+ y_res: the height of STN-cropped featrure map fed to 2nd Transformer\n\nSome annotations:\n1. For `x_interpolate` and `y_interpolate`, we use an extended eyesight of 125% to the orginal boudning box to provide more information from backbone to the 2nd Transformer.", "_____no_output_____" ] ], [ [ "def build_pe(self):\n # fixed sine pe\n not_mask = 1 - self.mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n\n eps = 1e-6; scale = 2 * math.pi # normalize?\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale\n\n num_pos_feats = 128; temperature = 10000\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=self.mask.device)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n self.register_buffer(\"pe\", pos)\n\n # learnable pe\n self.row_embed = nn.Embedding(num_pos_feats, self.x_res)\n self.col_embed = nn.Embedding(num_pos_feats, self.y_res)\n nn.init.uniform_(self.row_embed.weight)\n nn.init.uniform_(self.col_embed.weight)\n\ndef get_leant_pe(self):\n y_embed = self.col_embed.weight.unsqueeze(-1).expand(-1, -1, self.x_res)\n x_embed = self.row_embed.weight.unsqueeze(1).expand(-1, self.y_res, -1)\n embed = torch.cat([y_embed, x_embed], dim=0).unsqueeze(0)\n return embed\n\nPRTR_sequential.build_pe = build_pe\nPRTR_sequential.get_leant_pe = get_leant_pe", "_____no_output_____" ] ], [ [ "Then we build positional embedding for the 2nd Transformer, which ensembles both fixed sinusoidal embedding and learnt embedding. \nFor each box containing person cropped from original image, we use the same positional embedding, irrelevent to where the box is.", "_____no_output_____" ] ], [ [ "def forward(self, samples):\n # the 1st Transformer, to detect person\n features, pos = self.backbone(samples)\n hs = self.transformer(self.input_proj(features[-1].tensors), features[-1].mask, self.query_embed.weight, pos[-1])[0][-1] # [B, person per image, f]\n logits = self.class_embed(hs) # [B, person per image, 2]\n bboxes = self.bbox_embed(hs).sigmoid() # [B, person per image, 4]\n outputs = {'pred_logits': logits, 'pred_boxes': bboxes}\n\n # some preperation for STN feature cropping\n person_per_image = hs.size(1)\n num_person = person_per_image * hs.size(0)\n heights, widths = samples.get_shape().unbind(-1) # [B] * 2\n rh = heights.repeat_interleave(person_per_image) # [person per image * B]\n rw = widths.repeat_interleave(person_per_image) # [person per image * B]\n srcs = [features[_].decompose()[0] for _ in self.level]\n cx, cy, w, h = bboxes.flatten(end_dim=1).unbind(-1) # [person per image * B] * 4\n cx, cy, w, h = cx * rw, cy * rh, w * rw, h * rh # ANNOT (1)\n\n # STN cropping\n y_grid = (h.unsqueeze(-1) @ self.y_interpolate + cy.unsqueeze(-1) * 2 - 1).unsqueeze(-1).unsqueeze(-1) # [person per image * B, y_res, 1, 1]\n x_grid = (w.unsqueeze(-1) @ self.x_interpolate + cx.unsqueeze(-1) * 2 - 1).unsqueeze(-1).unsqueeze(1) # [person per image * B, 1, x_res, 1]\n grid = torch.cat([x_grid.expand(-1, self.y_res, -1, -1), y_grid.expand(-1, -1, self.x_res, -1)], dim=-1)\n cropped_feature = []\n cropped_pos = []\n for j, l in enumerate(self.level):\n cropped_feature.append(F.grid_sample(srcs[j].expand(num_person, -1, -1, -1), grid, padding_mode=\"border\")) # [person per image * B, C, y_res, x_res]\n cropped_feature = torch.cat(cropped_feature, dim=1)\n cropped_pos.append(self.pe.expand(num_person, -1, -1, -1))\n cropped_pos.append(self.get_leant_pe().expand(num_person, -1, -1, -1))\n cropped_pos = torch.cat(cropped_pos, dim=1)\n mask = self.mask.bool().expand(num_person, -1, -1) # ANNOT (2)\n \n # 2nd Transformer\n coord, logtis = self.transformer_kpt(bboxes, cropped_feature, cropped_pos, mask) # [person per image * B, 17, 2]\n\n outputs[\"pred_kpt_coord\"] = coord.reshape(hs.size(0), -1, self.transformer_kpt.num_queries, 2)\n outputs[\"pred_kpt_logits\"] = logtis.reshape(hs.size(0), -1, self.transformer_kpt.num_queries, self.transformer_kpt.num_kpts + 1)\n return outputs\n\nPRTR_sequential.forward = forward", "_____no_output_____" ] ], [ [ "`forward` method takes in a `NestedTensor` and returns a dictionary of predictions, some annotations:\n1. Input `samples` and `features` are `NestedTensor`, which basically stacks a list of tensors of different shapes by their top-left corner and uses masks to denote valid positions. Thus when we need to crop person bounding boxes from the whole feature map, we need to scale boxes according to image size\n2. we always gives unmasked image to the 2nd Transformer, becasue all the persons are cropped to the same resolution", "_____no_output_____" ] ], [ [ "def infer(self, samples):\n self.eval()\n outputs = self(samples)\n out_logits, out_coord = outputs['pred_kpt_logits'], outputs['pred_kpt_coord']\n\n C_stacked = out_logits[..., 1:].transpose(2, 3).flatten(0, 1).detach().cpu().numpy() # [person per image * B, 17, num queries (for keypoint)]\n out_coord = out_coord.flatten(0, 1)\n coord_holder = []\n for b, C in enumerate(C_stacked):\n _, query_ind = linear_sum_assignment(-C)\n coord_holder.append(out_coord[b, query_ind.tolist()])\n matched_coord = torch.stack(coord_holder, dim=0).reshape(out_logits.size(0), out_logits.size(1), 17, -1)\n return matched_coord # [B, num queries, num kpts, 2]\n\nPRTR_sequential.infer = infer", "_____no_output_____" ] ], [ [ "`infer` takes the same input as `forward`, but instead of returning all keypoint queries for loss calculaiton, it leverages a Hungarian algorithm to select the 17 keytpoints as prediction. \nThe selection process can be thought of as a bipartite graph matching problem, graph constructed as below:\n+ for each query in 2nd Transformer a node is made, creating set Q\n+ for each keypoint type, a node is made, creating set K\n+ set Q and K are fully inter-connected, edge weight between $Q_i$ and $K_j$ are the _unnormalized logits_ of query $i$ classified as keypoint type $k$\n+ Q, K have no intra-connection, \n\nHungarian algorithm will find the matching between Q and K with highest edge weights, selected queries are returned as prediction. A minimal example with only 3 queries and 2 keypoint types are shown as below:\n\n![](figs/readout.png)", "_____no_output_____" ] ], [ [ "class DETR_kpts(nn.Module):\n def __init__(self, transformer, num_kpts, num_queries, input_dim):\n super().__init__()\n self.num_kpts = num_kpts\n self.num_queries = num_queries\n hidden_dim = transformer.d_model\n self.query_embed = nn.Embedding(num_queries, hidden_dim)\n self.input_proj = nn.Conv2d(input_dim, hidden_dim, kernel_size=1)\n self.transformer = transformer\n self.coord_predictor = MLP(hidden_dim, hidden_dim, 2, num_layers=3)\n self.class_predictor = nn.Linear(hidden_dim, num_kpts + 1)\n\n def forward(self, bboxes, features, pos, mask):\n src_proj = self.input_proj(features)\n j_embed = self.transformer(src_proj, mask, self.query_embed.weight, pos)[0][-1] # [B, num queries, hidden dim]\n j_coord_ = self.coord_predictor(j_embed).sigmoid()\n x, y = j_coord_.unbind(-1) # [B, Q] * 2\n x = (x * 1.25 - 0.625) * bboxes[:, 2].unsqueeze(-1) + bboxes[:, 0].unsqueeze(-1)\n y = (y * 1.25 - 0.625) * bboxes[:, 3].unsqueeze(-1) + bboxes[:, 1].unsqueeze(-1)\n x = x.clamp(0, 1)\n y = y.clamp(0, 1)\n j_coord = torch.stack([x, y], dim=-1)\n j_class = self.class_predictor(j_embed[-1]) # [B, J, c+1], logits\n return j_coord, j_class", "_____no_output_____" ] ], [ [ "Class `DETR_kpts` is the 2nd Transformer in PRTR and needs the following arguments:\n+ transformer: a customizable Transformer for keypoint detection (2nd Transformer)\n+ num_kpts: number of keypoint annotations per person of this dataset, e.g., COCO has 17 keypoints\n+ num_queries: query number, similar to DETR\n+ input_dim: image feature dimension from 1st Transformer\n\nIts `forward` takes in `bboxes` becasue we need to recover per-person prediction to whole image coordinates, `features`, `pos` and `mask` for Transformer input. \n`forward` returns predicted keypoint coordinates in 0 to 1, relative to whole image, and their probability belonging to each keypoint class, e.g. nose, left shoulder.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af6ba2fc5eb2933bc23d565835f01eac97926a0
32,482
ipynb
Jupyter Notebook
notebooks/dev/dev_Baltic.ipynb
gmacgilchrist/wmt_bgc
5ccb6f5c59786809b95909144ff02ebf3781318f
[ "MIT" ]
1
2020-10-16T12:33:39.000Z
2020-10-16T12:33:39.000Z
notebooks/dev/dev_Baltic.ipynb
gmacgilchrist/wmt_bgc
5ccb6f5c59786809b95909144ff02ebf3781318f
[ "MIT" ]
1
2020-09-03T10:44:55.000Z
2020-09-03T20:23:48.000Z
notebooks/dev/dev_Baltic.ipynb
gmacgilchrist/wmt_bgc
5ccb6f5c59786809b95909144ff02ebf3781318f
[ "MIT" ]
1
2020-09-24T23:16:40.000Z
2020-09-24T23:16:40.000Z
161.60199
25,644
0.868881
[ [ [ "## Baltic test case configuration\nDiagnostics output to close heat, salt, thickness budgets, and derive watermass transformation. \nThis notebook is a working space to explore that output.", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport numpy as np\nfrom xhistogram.xarray import histogram", "_____no_output_____" ], [ "### Data loading, grabbed from MOM6-analysis cookbook\n# Load data on native grid\nrootdir = '/archive/gam/MOM6-examples/ice_ocean_SIS2/Baltic_OM4_025/tutorial_wmt/'\ngridname = 'natv'\nprefix = '19000101.ocean_'\n# Diagnostics were saved into different files\nsuffixs = ['thck','heat','salt','surf','xtra']\nds = xr.Dataset()\nfor suffix in suffixs:\n filename = prefix+gridname+'_'+suffix+'*.nc'\n dsnow = xr.open_mfdataset(rootdir+filename)\n ds = xr.merge([ds,dsnow])\ngridname = '19000101.ocean_static.nc'\ngrid = xr.open_dataset(rootdir+gridname).squeeze()\n# Specify constants for the reference density and the specific heat capacity\nrho0 = 1035.\nCp = 3992.", "_____no_output_____" ], [ "# Specify the diffusive tendency terms\nprocesses=['boundary forcing','vertical diffusion','neutral diffusion',\n 'frazil ice','internal heat']\nterms = {}\nterms['heat'] = {'boundary forcing':'boundary_forcing_heat_tendency',\n 'vertical diffusion':'opottempdiff',\n 'neutral diffusion':'opottemppmdiff',\n 'frazil ice':'frazil_heat_tendency',\n 'internal heat':'internal_heat_heat_tendency'}\nterms['salt'] = {'boundary forcing':'boundary_forcing_salt_tendency',\n 'vertical diffusion':'osaltdiff',\n 'neutral diffusion':'osaltpmdiff',\n 'frazil ice':None,\n 'internal heat':None}\nterms['thck'] = {'boundary forcing':'boundary_forcing_h_tendency',\n 'vertical diffusion':None,\n 'neutral diffusion':None,\n 'frazil ice':None,\n 'internal heat':None}\n\ncolors = {'boundary forcing':'tab:blue',\n 'vertical diffusion':'tab:orange',\n 'neutral diffusion':'tab:green',\n 'frazil ice':'tab:red',\n 'internal heat':'tab:purple'}", "_____no_output_____" ] ], [ [ "***\n11/11/20 gmac \nIn equating the content tendency output by the model with the tendency of the materially conserved tracer (e.g. heat tendency and temperature), I think I am making an error by not accomodating changes in thickness. The product rule shows clearly that $h\\dot{\\lambda} \\neq \\dot{(h\\lambda)}$, and it is the LHS that we wish to have in the WMT expression. Here, try applying a correction for $\\lambda\\dot{h}$. \n*[But, look again carefully at MOM5_elements, Eq. 36.87, equates the two. There is no thickness rate of change on the LHS. This is true due to continuity, **except** in the presence of a surface volume flux. This is what is then explored in Section 36.8.6.]* ", "_____no_output_____" ] ], [ [ "G_prior = xr.Dataset()\nG = xr.Dataset()\nbudget = 'salt'\n\n# Specify the tracer, its range and bin widths (\\delta\\lambda) for the calculation\nif budget == 'heat':\n tracer = ds['temp']\n delta_l = 0.2\n lmin = -2\n lmax = 10\nelif budget == 'salt':\n tracer = ds['salt']\n delta_l = 0.2\n lmin = 2\n lmax = 36\nbins = np.arange(lmin,lmax,delta_l)\n\nfor process in processes:\n term = terms[budget][process]\n if term is not None:\n nanmask = np.isnan(ds[term])\n tendency = ds[term]\n if budget == 'heat':\n tendency /= Cp*rho0\n \n # Calculate G prior to thickness correction\n G_prior[process] = histogram(tracer.where(~nanmask).squeeze(),\n bins=[bins],\n dim=['xh','yh','zl'],\n weights=(\n rho0*(tendency\n )*grid['areacello']\n ).where(~nanmask).squeeze()\n )/np.diff(bins)\n \n # Accomodate thickness changes if nonzero\n term_thck = terms['thck'][process]\n if term_thck is not None:\n tendency -= tracer*ds[term_thck]\n \n G[process] = histogram(tracer.where(~nanmask).squeeze(),\n bins=[bins],\n dim=['xh','yh','zl'],\n weights=(\n rho0*(tendency\n )*grid['areacello']\n ).where(~nanmask).squeeze()\n )/np.diff(bins)", "_____no_output_____" ], [ "for process in G.data_vars:\n G_prior[process].plot(label=process,color=colors[process],linestyle=':')\n G[process].plot(label=process,color=colors[process])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4af6baf4681662bfa73d3a2ed413f3f84e4095ba
70,748
ipynb
Jupyter Notebook
shp.ipynb
Tripleler/NationalPark_Project
d614aa66299b0be9dd6ae4b00ed1c34f41a5d37f
[ "MIT" ]
null
null
null
shp.ipynb
Tripleler/NationalPark_Project
d614aa66299b0be9dd6ae4b00ed1c34f41a5d37f
[ "MIT" ]
null
null
null
shp.ipynb
Tripleler/NationalPark_Project
d614aa66299b0be9dd6ae4b00ed1c34f41a5d37f
[ "MIT" ]
null
null
null
41.763872
5,644
0.45279
[ [ [ "# !pip install pyshp\n# !pip install pyproj", "_____no_output_____" ], [ "import shapefile #the pyshp module : Should install pyshp module.\nimport pandas as pd\nfrom pyproj import Proj, transform #Should install pyproj module.", "_____no_output_____" ], [ "# read data (Copy all files from nodelink into nodelink folder: I made it.)\n# using old_data\nshp_path_node = 'C:/과제\\project/정밀관리도/정밀관리도(곽형진)/NLPRK_DPMM-YR201701/GSTN_PMNTN_PT.shp'\nsf_node = shapefile.Reader(shp_path_node)\n# shp_path_link = './...경로.../MOCT_LINK.shp'\n# sf_link = shapefile.Reader(shp_path_link)\n# construct pandas dataframe\n#grab the shapefile's field names\n# node\nfields_node = [x[0] for x in sf_node.fields][1:]\nrecords_node = sf_node.records()\nshps = [s.points for s in sf_node.shapes()] # node has coordinate data.\n# link\n# fields_link = [x[0] for x in sf_link.fields][1:]\n# records_link = sf_link.records()\n\n#write the records into a dataframe\n#node\nnode_dataframe = pd.DataFrame(columns=fields_node, data=records_node)\n#add the coordinate data to a column called \"coords\"\nnode_dataframe = node_dataframe.assign(coords=shps)\n# link\n# link_dataframe = pd.DataFrame(columns=fields_link, data=records_link)", "_____no_output_____" ], [ "node_dataframe.head()", "_____no_output_____" ], [ "node_dataframe.shape", "_____no_output_____" ], [ "node_dataframe.info()", "_____no_output_____" ], [ "node_dataframe.COURSE_ID", "_____no_output_____" ], [ "bookhansan=node_dataframe[node_dataframe.ID_CD.str[:4]=='1501']", "_____no_output_____" ], [ "bookhansan", "_____no_output_____" ], [ "shapefile.Writer(bookhansan, 'bookhansan.shp')", "_____no_output_____" ], [ "df=pd.read_csv('C:/Users/tripleler/Desktop/등산로.csv')\ndf.head()", "_____no_output_____" ], [ "df.COS_KOR_NM.unique()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "import geopandas as gpd", "_____no_output_____" ], [ "bookhansan.head()", "_____no_output_____" ], [ "gdf=gpd.points_from_xy(bookhansan.LONGITUDE, bookhansan.LATITUDE)", "_____no_output_____" ], [ "gdf", "_____no_output_____" ], [ "import pyproj\nfrom fiona.crs import from_epsg", "_____no_output_____" ], [ "bookhansan.drop(['LONGITUDE','LATITUDE'], axis=1)", "_____no_output_____" ], [ "gdf = gpd.GeoDataFrame(bookhansan.drop(['LONGITUDE','LATITUDE'], axis=1), geometry=gdf, crs=from_epsg(4326))\n#df_cctv_gdf.info()\ngdf.head()", "C:\\anaconda\\lib\\site-packages\\pyproj\\crs\\crs.py:68: FutureWarning: '+init=<authority>:<code>' syntax is deprecated. '<authority>:<code>' is the preferred initialization method. When making the change, be mindful of axis order changes: https://pyproj4.github.io/pyproj/stable/gotchas.html#axis-order-changes-in-proj-6\n return _prepare_from_string(\" \".join(pjargs))\n" ], [ "gdf2 = gdf.to_crs(epsg=3857) #좌표계를 epsg 3857로 변환\ngdf2.plot(color='gray')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af6c5938ebca2d81984be80b0627cc2fec5b0cc
34,449
ipynb
Jupyter Notebook
123456789.ipynb
JongHyun2332/ipympl
718365341f524c986ab4e9e627eb1d53d124c710
[ "BSD-3-Clause" ]
null
null
null
123456789.ipynb
JongHyun2332/ipympl
718365341f524c986ab4e9e627eb1d53d124c710
[ "BSD-3-Clause" ]
null
null
null
123456789.ipynb
JongHyun2332/ipympl
718365341f524c986ab4e9e627eb1d53d124c710
[ "BSD-3-Clause" ]
null
null
null
35.187947
227
0.409068
[ [ [ "<a href=\"https://colab.research.google.com/github/JongHyun2332/ipympl/blob/master/123456789.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('credit_cards_dataset.csv')", "_____no_output_____" ], [ "df.head(3)", "_____no_output_____" ], [ "df.tail(3)", "_____no_output_____" ], [ "import xgboost as xgb", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "X = df.drop(['default.payment.next.month'], axis = 1).values", "_____no_output_____" ], [ "X[:,-4:-1]", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "y = df['default.payment.next.month'].values", "_____no_output_____" ], [ "X_train, X_test, Y_train, Y_test = train_test_split(X, y, train_size = 0.7, random_state = 100)", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ], [ "y = df['default.payment.next.month'].values", "_____no_output_____" ] ], [ [ "XGB Matrix 설정", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ], [ "dtrain = xgb.DMatrix(X_train, label=Y_train)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "dtest = xgb.DMatrix(X_test, label=Y_test)", "_____no_output_____" ], [ "type(dtrain)", "_____no_output_____" ], [ "param = {'max_depth' : 10, 'eta' : 0.01, 'objective' : 'binary:logistic'}\nparam['nthread'] = 4\nparam['eval_metric'] = 'auc'", "_____no_output_____" ], [ "evallist = [(dtest,'eval'),(dtrain,'train')]", "_____no_output_____" ], [ "num_round = 200\nbst = xgb.train(param, dtrain, num_round, evallist)", "[0]\teval-auc:0.757028\ttrain-auc:0.816388\n[1]\teval-auc:0.765205\ttrain-auc:0.825584\n[2]\teval-auc:0.765845\ttrain-auc:0.828532\n[3]\teval-auc:0.765386\ttrain-auc:0.828403\n[4]\teval-auc:0.766596\ttrain-auc:0.829558\n[5]\teval-auc:0.766115\ttrain-auc:0.830237\n[6]\teval-auc:0.766968\ttrain-auc:0.830992\n[7]\teval-auc:0.766464\ttrain-auc:0.831067\n[8]\teval-auc:0.7671\ttrain-auc:0.831211\n[9]\teval-auc:0.767116\ttrain-auc:0.831453\n[10]\teval-auc:0.767181\ttrain-auc:0.832088\n[11]\teval-auc:0.767108\ttrain-auc:0.832493\n[12]\teval-auc:0.767423\ttrain-auc:0.832884\n[13]\teval-auc:0.767413\ttrain-auc:0.833082\n[14]\teval-auc:0.767837\ttrain-auc:0.833463\n[15]\teval-auc:0.768482\ttrain-auc:0.834105\n[16]\teval-auc:0.769166\ttrain-auc:0.836252\n[17]\teval-auc:0.769738\ttrain-auc:0.837551\n[18]\teval-auc:0.769788\ttrain-auc:0.837683\n[19]\teval-auc:0.76995\ttrain-auc:0.838515\n[20]\teval-auc:0.770344\ttrain-auc:0.838786\n[21]\teval-auc:0.770651\ttrain-auc:0.839631\n[22]\teval-auc:0.771436\ttrain-auc:0.841769\n[23]\teval-auc:0.771904\ttrain-auc:0.842282\n[24]\teval-auc:0.772444\ttrain-auc:0.843569\n[25]\teval-auc:0.772896\ttrain-auc:0.845051\n[26]\teval-auc:0.77312\ttrain-auc:0.845217\n[27]\teval-auc:0.773533\ttrain-auc:0.846242\n[28]\teval-auc:0.773884\ttrain-auc:0.846947\n[29]\teval-auc:0.774179\ttrain-auc:0.848363\n[30]\teval-auc:0.774421\ttrain-auc:0.848458\n[31]\teval-auc:0.774631\ttrain-auc:0.849587\n[32]\teval-auc:0.774884\ttrain-auc:0.850536\n[33]\teval-auc:0.775089\ttrain-auc:0.851377\n[34]\teval-auc:0.775265\ttrain-auc:0.852067\n[35]\teval-auc:0.775732\ttrain-auc:0.852778\n[36]\teval-auc:0.775753\ttrain-auc:0.853448\n[37]\teval-auc:0.775862\ttrain-auc:0.853953\n[38]\teval-auc:0.775915\ttrain-auc:0.854485\n[39]\teval-auc:0.776335\ttrain-auc:0.854971\n[40]\teval-auc:0.776286\ttrain-auc:0.855386\n[41]\teval-auc:0.776585\ttrain-auc:0.855786\n[42]\teval-auc:0.776463\ttrain-auc:0.856264\n[43]\teval-auc:0.776716\ttrain-auc:0.8566\n[44]\teval-auc:0.776732\ttrain-auc:0.857025\n[45]\teval-auc:0.776844\ttrain-auc:0.857418\n[46]\teval-auc:0.776942\ttrain-auc:0.857761\n[47]\teval-auc:0.776792\ttrain-auc:0.858049\n[48]\teval-auc:0.77694\ttrain-auc:0.858358\n[49]\teval-auc:0.776996\ttrain-auc:0.858559\n[50]\teval-auc:0.776836\ttrain-auc:0.858953\n[51]\teval-auc:0.776859\ttrain-auc:0.859315\n[52]\teval-auc:0.776914\ttrain-auc:0.85954\n[53]\teval-auc:0.777007\ttrain-auc:0.859898\n[54]\teval-auc:0.777162\ttrain-auc:0.860099\n[55]\teval-auc:0.777241\ttrain-auc:0.860466\n[56]\teval-auc:0.777184\ttrain-auc:0.86086\n[57]\teval-auc:0.777207\ttrain-auc:0.861068\n[58]\teval-auc:0.777323\ttrain-auc:0.861539\n[59]\teval-auc:0.777349\ttrain-auc:0.861796\n[60]\teval-auc:0.777306\ttrain-auc:0.862056\n[61]\teval-auc:0.777443\ttrain-auc:0.86243\n[62]\teval-auc:0.777619\ttrain-auc:0.862619\n[63]\teval-auc:0.777707\ttrain-auc:0.863072\n[64]\teval-auc:0.777788\ttrain-auc:0.863509\n[65]\teval-auc:0.777955\ttrain-auc:0.86384\n[66]\teval-auc:0.777905\ttrain-auc:0.864052\n[67]\teval-auc:0.77793\ttrain-auc:0.864449\n[68]\teval-auc:0.777938\ttrain-auc:0.864834\n[69]\teval-auc:0.778028\ttrain-auc:0.865266\n[70]\teval-auc:0.778084\ttrain-auc:0.865694\n[71]\teval-auc:0.778081\ttrain-auc:0.866083\n[72]\teval-auc:0.778073\ttrain-auc:0.866395\n[73]\teval-auc:0.778118\ttrain-auc:0.866814\n[74]\teval-auc:0.778143\ttrain-auc:0.867119\n[75]\teval-auc:0.778071\ttrain-auc:0.867365\n[76]\teval-auc:0.778208\ttrain-auc:0.867833\n[77]\teval-auc:0.778249\ttrain-auc:0.868317\n[78]\teval-auc:0.778217\ttrain-auc:0.868682\n[79]\teval-auc:0.778253\ttrain-auc:0.869185\n[80]\teval-auc:0.778177\ttrain-auc:0.869396\n[81]\teval-auc:0.778141\ttrain-auc:0.869627\n[82]\teval-auc:0.778043\ttrain-auc:0.869907\n[83]\teval-auc:0.778121\ttrain-auc:0.870451\n[84]\teval-auc:0.778212\ttrain-auc:0.870849\n[85]\teval-auc:0.778327\ttrain-auc:0.871347\n[86]\teval-auc:0.77834\ttrain-auc:0.871597\n[87]\teval-auc:0.778364\ttrain-auc:0.87183\n[88]\teval-auc:0.778374\ttrain-auc:0.872283\n[89]\teval-auc:0.778462\ttrain-auc:0.872733\n[90]\teval-auc:0.77854\ttrain-auc:0.873056\n[91]\teval-auc:0.778571\ttrain-auc:0.873495\n[92]\teval-auc:0.778551\ttrain-auc:0.873919\n[93]\teval-auc:0.778617\ttrain-auc:0.874256\n[94]\teval-auc:0.778682\ttrain-auc:0.874646\n[95]\teval-auc:0.778715\ttrain-auc:0.875001\n[96]\teval-auc:0.778758\ttrain-auc:0.875712\n[97]\teval-auc:0.778759\ttrain-auc:0.876348\n[98]\teval-auc:0.778786\ttrain-auc:0.877005\n[99]\teval-auc:0.778746\ttrain-auc:0.877588\n[100]\teval-auc:0.778792\ttrain-auc:0.878168\n[101]\teval-auc:0.778697\ttrain-auc:0.878718\n[102]\teval-auc:0.778735\ttrain-auc:0.879371\n[103]\teval-auc:0.778721\ttrain-auc:0.88005\n[104]\teval-auc:0.778641\ttrain-auc:0.880731\n[105]\teval-auc:0.778601\ttrain-auc:0.881133\n[106]\teval-auc:0.778687\ttrain-auc:0.881676\n[107]\teval-auc:0.778641\ttrain-auc:0.88222\n[108]\teval-auc:0.778687\ttrain-auc:0.882608\n[109]\teval-auc:0.778703\ttrain-auc:0.883119\n[110]\teval-auc:0.778692\ttrain-auc:0.883635\n[111]\teval-auc:0.778726\ttrain-auc:0.884241\n[112]\teval-auc:0.778854\ttrain-auc:0.88484\n[113]\teval-auc:0.778934\ttrain-auc:0.885408\n[114]\teval-auc:0.778881\ttrain-auc:0.885908\n[115]\teval-auc:0.77885\ttrain-auc:0.886489\n[116]\teval-auc:0.778851\ttrain-auc:0.886883\n[117]\teval-auc:0.778769\ttrain-auc:0.887465\n[118]\teval-auc:0.778693\ttrain-auc:0.888124\n[119]\teval-auc:0.778639\ttrain-auc:0.889042\n[120]\teval-auc:0.778627\ttrain-auc:0.889446\n[121]\teval-auc:0.778588\ttrain-auc:0.890189\n[122]\teval-auc:0.778657\ttrain-auc:0.891023\n[123]\teval-auc:0.778587\ttrain-auc:0.891494\n[124]\teval-auc:0.778589\ttrain-auc:0.892305\n[125]\teval-auc:0.778612\ttrain-auc:0.893098\n[126]\teval-auc:0.778612\ttrain-auc:0.893553\n[127]\teval-auc:0.77864\ttrain-auc:0.894353\n[128]\teval-auc:0.778624\ttrain-auc:0.895005\n[129]\teval-auc:0.778547\ttrain-auc:0.895708\n[130]\teval-auc:0.778521\ttrain-auc:0.896133\n[131]\teval-auc:0.778541\ttrain-auc:0.896749\n[132]\teval-auc:0.778498\ttrain-auc:0.897363\n[133]\teval-auc:0.778431\ttrain-auc:0.897776\n[134]\teval-auc:0.778425\ttrain-auc:0.89833\n[135]\teval-auc:0.778385\ttrain-auc:0.898934\n[136]\teval-auc:0.778406\ttrain-auc:0.899308\n[137]\teval-auc:0.778314\ttrain-auc:0.899775\n[138]\teval-auc:0.778295\ttrain-auc:0.900126\n[139]\teval-auc:0.778201\ttrain-auc:0.900497\n[140]\teval-auc:0.778171\ttrain-auc:0.900844\n[141]\teval-auc:0.778135\ttrain-auc:0.901128\n[142]\teval-auc:0.778276\ttrain-auc:0.901547\n[143]\teval-auc:0.778265\ttrain-auc:0.902067\n[144]\teval-auc:0.778341\ttrain-auc:0.902461\n[145]\teval-auc:0.778307\ttrain-auc:0.902773\n[146]\teval-auc:0.7783\ttrain-auc:0.903261\n[147]\teval-auc:0.778316\ttrain-auc:0.9036\n[148]\teval-auc:0.778461\ttrain-auc:0.903919\n[149]\teval-auc:0.778562\ttrain-auc:0.904287\n[150]\teval-auc:0.778495\ttrain-auc:0.904627\n[151]\teval-auc:0.778496\ttrain-auc:0.904927\n[152]\teval-auc:0.778577\ttrain-auc:0.905252\n[153]\teval-auc:0.778673\ttrain-auc:0.905809\n[154]\teval-auc:0.778771\ttrain-auc:0.906299\n[155]\teval-auc:0.77881\ttrain-auc:0.906843\n[156]\teval-auc:0.778899\ttrain-auc:0.907272\n[157]\teval-auc:0.778965\ttrain-auc:0.907744\n[158]\teval-auc:0.77901\ttrain-auc:0.90814\n[159]\teval-auc:0.779063\ttrain-auc:0.908654\n[160]\teval-auc:0.779132\ttrain-auc:0.909041\n[161]\teval-auc:0.779098\ttrain-auc:0.909455\n[162]\teval-auc:0.779171\ttrain-auc:0.909904\n[163]\teval-auc:0.779244\ttrain-auc:0.910315\n[164]\teval-auc:0.779199\ttrain-auc:0.910734\n[165]\teval-auc:0.77922\ttrain-auc:0.911108\n[166]\teval-auc:0.779267\ttrain-auc:0.911564\n[167]\teval-auc:0.779309\ttrain-auc:0.911921\n[168]\teval-auc:0.779274\ttrain-auc:0.912264\n[169]\teval-auc:0.779327\ttrain-auc:0.912572\n[170]\teval-auc:0.779413\ttrain-auc:0.912954\n[171]\teval-auc:0.779465\ttrain-auc:0.913224\n[172]\teval-auc:0.779468\ttrain-auc:0.913591\n[173]\teval-auc:0.779384\ttrain-auc:0.914012\n[174]\teval-auc:0.779424\ttrain-auc:0.914281\n[175]\teval-auc:0.77945\ttrain-auc:0.914659\n[176]\teval-auc:0.779515\ttrain-auc:0.914941\n[177]\teval-auc:0.77957\ttrain-auc:0.9153\n[178]\teval-auc:0.779693\ttrain-auc:0.915636\n[179]\teval-auc:0.77971\ttrain-auc:0.915952\n[180]\teval-auc:0.779721\ttrain-auc:0.916264\n[181]\teval-auc:0.779846\ttrain-auc:0.916598\n[182]\teval-auc:0.779863\ttrain-auc:0.916909\n[183]\teval-auc:0.779873\ttrain-auc:0.917199\n[184]\teval-auc:0.779914\ttrain-auc:0.917444\n[185]\teval-auc:0.779903\ttrain-auc:0.917689\n[186]\teval-auc:0.77993\ttrain-auc:0.917976\n[187]\teval-auc:0.779873\ttrain-auc:0.918215\n[188]\teval-auc:0.779854\ttrain-auc:0.918487\n[189]\teval-auc:0.77986\ttrain-auc:0.9188\n[190]\teval-auc:0.779889\ttrain-auc:0.919197\n[191]\teval-auc:0.779921\ttrain-auc:0.919488\n[192]\teval-auc:0.77993\ttrain-auc:0.919906\n[193]\teval-auc:0.779903\ttrain-auc:0.920115\n[194]\teval-auc:0.779886\ttrain-auc:0.920387\n[195]\teval-auc:0.779907\ttrain-auc:0.920755\n[196]\teval-auc:0.779905\ttrain-auc:0.921054\n[197]\teval-auc:0.779885\ttrain-auc:0.921331\n[198]\teval-auc:0.779896\ttrain-auc:0.921678\n[199]\teval-auc:0.779932\ttrain-auc:0.921881\n" ], [ "bst.save_model('0001.model')", "_____no_output_____" ], [ "bst.load_model('./0001.model')", "_____no_output_____" ], [ "ypred = bst.predict(dtest)", "_____no_output_____" ], [ "ypred[:3]", "_____no_output_____" ], [ "import numpy as np\npredictions = np.round(ypred)", "_____no_output_____" ], [ "predictions", "_____no_output_____" ], [ "from sklearn.metrics import recall_score", "_____no_output_____" ], [ "recall_score(Y_test, predictions)", "_____no_output_____" ], [ "xgb.plot_importance(bst)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af6c7c31879965cdea24f1b38653c8d88b57fed
93,216
ipynb
Jupyter Notebook
Model_Performance.ipynb
Ninjaneer1/theworks
8c44a07fd114e793a19d677953c0037ebf857533
[ "MIT" ]
null
null
null
Model_Performance.ipynb
Ninjaneer1/theworks
8c44a07fd114e793a19d677953c0037ebf857533
[ "MIT" ]
null
null
null
Model_Performance.ipynb
Ninjaneer1/theworks
8c44a07fd114e793a19d677953c0037ebf857533
[ "MIT" ]
null
null
null
117.105528
21,012
0.857921
[ [ [ "## 1. Import necessary packages\n\nFor this exercise we need\n\n* pandas\n* train_test_split\n* LogisticRegression\n* pyplot from matplotlib\n* KNeighborsClassifier\n* LogisticRegressionClassifier\n* RandomForestClassifier\n* DummyClassifier", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, fbeta_score, classification_report\nfrom sklearn.metrics import roc_curve, precision_recall_curve, roc_auc_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 2. Load and prepare the dataset\n", "_____no_output_____" ], [ "* Load the training data into a dataframe named df_train_data (this step is done for you).\n* Create binary classification problem - rename some class labels (this step done for you).\n* Create a dataframe of 9 features named X.\n* Create a data frame of labels named y.\n* Split the data into a training set and a test set.", "_____no_output_____" ] ], [ [ "url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/shuttle/shuttle.tst'\ndf = pd.read_csv(url, header=None, sep=' ')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.loc[df[9] != 4, 9] = 0\ndf.loc[df[9] == 4, 9] = 1\n\nX = df.drop([9], axis=1)\ny = df[9]\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nprint('There are {} training samples and {} test samples'.format(X_train.shape[0], X_test.shape[0]))", "There are 10875 training samples and 3625 test samples\n" ] ], [ [ "## Create the model", "_____no_output_____" ], [ "* Instantiate a Logistic Regression classifier with a lbfgs solver.\n* Fit the classifier to the data.", "_____no_output_____" ] ], [ [ "lr = LogisticRegression(solver='lbfgs', penalty='none', max_iter=1000)\nlr.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## 4. Calculate Accuracy", "_____no_output_____" ] ], [ [ "lr.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## 5. Dummy Classifier", "_____no_output_____" ] ], [ [ "dummy = DummyClassifier(strategy = 'uniform')\ndummy.fit(X_train, y_train)\ndummy.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## 6. Confusion Matrix", "_____no_output_____" ] ], [ [ "y_pred = lr.predict(X_test)\nconfusion = confusion_matrix(y_test, y_pred)\nprint(confusion)", "[[2960 128]\n [ 506 31]]\n" ], [ "\ndef plot_confusion_matrix(cm,\n target_names,\n title='Confusion matrix',\n cmap=None,\n normalize=True):\n \"\"\"\n given a sklearn confusion matrix (cm), make a nice plot\n\n Arguments\n ---------\n cm: confusion matrix from sklearn.metrics.confusion_matrix\n\n target_names: given classification classes such as [0, 1, 2]\n the class names, for example: ['high', 'medium', 'low']\n\n title: the text to display at the top of the matrix\n\n cmap: the gradient of the values displayed from matplotlib.pyplot.cm\n see http://matplotlib.org/examples/color/colormaps_reference.html\n plt.get_cmap('jet') or plt.cm.Blues\n\n normalize: If False, plot the raw numbers\n If True, plot the proportions\n\n Usage\n -----\n plot_confusion_matrix(cm = cm, # confusion matrix created by\n # sklearn.metrics.confusion_matrix\n normalize = True, # show proportions\n target_names = y_labels_vals, # list of names of the classes\n title = best_estimator_name) # title of graph\n\n Citiation\n ---------\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n import itertools\n\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('Predicted label')\n plt.xlabel('True label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()", "_____no_output_____" ] ], [ [ "## 7. Plot a nicer confusion matrix (Use the plt_confusion_matrix function)", "_____no_output_____" ] ], [ [ "plot_confusion_matrix(cm=confusion, target_names = ['Positive', 'Negative'], title = 'Confusion Matrix',normalize=False)", "_____no_output_____" ] ], [ [ "## 8. Calculate Metrics", "_____no_output_____" ] ], [ [ "accuracy = accuracy_score(y_test, y_pred)\nprecision = precision_score(y_test, y_pred)\nrecall = recall_score(y_test, y_pred)\nf1 = f1_score(y_test, y_pred)\nfbeta_precision = fbeta_score(y_test, y_pred, 0.5)\nfbeta_recall = fbeta_score(y_test, y_pred, 2)\n\nprint('Accuracy score: {}'.format(accuracy))\nprint('Precision score: {}'.format(precision))\nprint('Recall score: {}'.format(recall))\nprint('F1 score: {}'.format(f1))\nprint('Fbeta score favoring precision: {}'.format(fbeta_precision))\nprint('FBeta score favoring recall: {}'.format(fbeta_recall))", "Accuracy score: 0.825103448275862\nPrecision score: 0.1949685534591195\nRecall score: 0.05772811918063315\nF1 score: 0.08908045977011494\nFbeta score favoring precision: 0.1321398124467178\nFBeta score favoring recall: 0.06718682271348071\n" ] ], [ [ "## 9. Print a classification report", "_____no_output_____" ] ], [ [ "report = classification_report(y_test, y_pred, target_names=['Negative', 'Positive'])\nprint(report)", " precision recall f1-score support\n\n Negative 0.85 0.96 0.90 3088\n Positive 0.19 0.06 0.09 537\n\n accuracy 0.83 3625\n macro avg 0.52 0.51 0.50 3625\nweighted avg 0.76 0.83 0.78 3625\n\n" ] ], [ [ "## 10. Plot ROC Curve and AUC", "_____no_output_____" ] ], [ [ "probs = lr.predict_proba(X_test)[:, 1]\nfpr, tpr, thresholds = roc_curve(y_test, probs)\nfig = plt.figure(figsize = (6, 6))\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr, tpr)\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve for Logistic Regression Model')\nplt.show()\n\nauc = roc_auc_score(y_test, probs)\nprint('Area under the ROC curve: {:.3f}'.format(auc))", "_____no_output_____" ] ], [ [ "## 11. Plot Precision-Recall Curve", "_____no_output_____" ] ], [ [ "pres, rec, thresholds = precision_recall_curve(y_test, y_pred)\nfig = plt.figure(figsize = (6, 6))\nplt.plot(rec, pres)\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.title('Precision-Recall Curve')\nplt.show()", "_____no_output_____" ], [ "c_vals = np.arange(0.05, 1.5, 0.05)\ntest_accuracy = []\ntrain_accuracy = []\n\nfor c in c_vals:\n lr = LogisticRegression(solver='lbfgs', penalty='l2', C=c, max_iter=1000)\n lr.fit(X_train, y_train)\n test_accuracy.append(lr.score(X_test, y_test))\n train_accuracy.append(lr.score(X_train, y_train))\n\nfig = plt.figure(figsize=(8, 4))\nax1 = fig.add_subplot(1, 1, 1)\nax1.plot(c_vals, test_accuracy, '-g', label='Test Accuracy')\nax1.plot(c_vals, train_accuracy, '-b', label='Train Accuracy')\nax1.set(xlabel='C', ylabel='Accuracy')\nax1.set_title('Effect of C on Accuracy')\nax1.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## 12. Cross Validation", "_____no_output_____" ] ], [ [ "clf = LogisticRegression(solver='lbfgs', max_iter=1000)\ncv_scores = cross_val_score(clf, X_train, y_train, cv = 5)\n\nprint('Accuracy scores for the 5 folds: ', cv_scores)\nprint('Mean cross validation score: {:.3f}'.format(np.mean(cv_scores)))", "Accuracy scores for the 5 folds: [0.81609195 0.83356322 0.82850575 0.82436782 0.82114943]\nMean cross validation score: 0.825\n" ] ], [ [ "## 13. Is this really linear?", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors=7)\n\n# Then fit the model\nknn.fit(X_train, y_train)\n\n# How well did we do\nknn_7_score = knn.score(X_test, y_test)\n\nprint('Accuracy of KNN (k = 7): {:.3f}'.format(knn_7_score))", "Accuracy of KNN (k = 7): 0.999\n" ] ], [ [ "## 14. Random Forest", "_____no_output_____" ] ], [ [ "rf = RandomForestClassifier(n_estimators = 22, random_state = 40)\n\nrf.fit(X_train,y_train)\n\nrf_score = rf.score(X_test, y_test)\n\nprint('Accuracy of Random Forest: {:.3f}'.format(rf_score))", "Accuracy of Random Forest: 1.000\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4af6ceaf0b05295213f752340dc951d36597b41e
45,902
ipynb
Jupyter Notebook
BertCnnFinal.ipynb
cosminnedescu/Hate-Speech-Detection-in-Social-Media
693f707217d21179a877f561228e8f3c85fb3acc
[ "MIT" ]
null
null
null
BertCnnFinal.ipynb
cosminnedescu/Hate-Speech-Detection-in-Social-Media
693f707217d21179a877f561228e8f3c85fb3acc
[ "MIT" ]
null
null
null
BertCnnFinal.ipynb
cosminnedescu/Hate-Speech-Detection-in-Social-Media
693f707217d21179a877f561228e8f3c85fb3acc
[ "MIT" ]
1
2021-12-27T09:12:31.000Z
2021-12-27T09:12:31.000Z
54.257683
175
0.440112
[ [ [ "\"\"\"\n @Time : 15/12/2020 19:01\n @Author : Alaa Grable\n \"\"\"\n\n!pip install transformers==3.0.0\n!pip install emoji\nimport gc\n#import os\nimport emoji as emoji\nimport re\nimport string\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom transformers import AutoModel\nfrom transformers import BertModel, BertTokenizer\n\nclass BERT_Arch(nn.Module):\n\n def __init__(self, bert):\n super(BERT_Arch, self).__init__()\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.conv = nn.Conv2d(in_channels=13, out_channels=13, kernel_size=(3, 768), padding='valid')\n self.relu = nn.ReLU()\n # change the kernel size either to (3,1), e.g. 1D max pooling\n # or remove it altogether\n self.pool = nn.MaxPool2d(kernel_size=(3, 1), stride=1)\n self.dropout = nn.Dropout(0.1)\n # be careful here, this needs to be changed according to your max pooling\n # without pooling: 443, with 3x1 pooling: 416\n self.fc = nn.Linear(416, 3)\n self.flat = nn.Flatten()\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, sent_id, mask):\n _, _, all_layers = self.bert(sent_id, attention_mask=mask, output_hidden_states=True)\n # all_layers = [13, 32, 64, 768]\n x = torch.transpose(torch.cat(tuple([t.unsqueeze(0) for t in all_layers]), 0), 0, 1)\n del all_layers\n gc.collect()\n torch.cuda.empty_cache()\n x = self.pool(self.dropout(self.relu(self.conv(self.dropout(x)))))\n x = self.fc(self.dropout(self.flat(self.dropout(x))))\n return self.softmax(x)\n\n\ndef read_dataset():\n data = pd.read_csv(\"/content/BERT-CNN-Fine-Tuning-For-Hate-Speech-Detection-in-Online-Social-Media/labeled_data.csv\")\n data = data.drop(['count', 'hate_speech', 'offensive_language', 'neither'], axis=1)\n #data = data.loc[0:9599,:]\n print(len(data))\n return data['tweet'].tolist(), data['class']\n\n\ndef pre_process_dataset(values):\n new_values = list()\n # Emoticons\n emoticons = [':-)', ':)', '(:', '(-:', ':))', '((:', ':-D', ':D', 'X-D', 'XD', 'xD', 'xD', '<3', '</3', ':\\*',\n ';-)',\n ';)', ';-D', ';D', '(;', '(-;', ':-(', ':(', '(:', '(-:', ':,(', ':\\'(', ':\"(', ':((', ':D', '=D',\n '=)',\n '(=', '=(', ')=', '=-O', 'O-=', ':o', 'o:', 'O:', 'O:', ':-o', 'o-:', ':P', ':p', ':S', ':s', ':@',\n ':>',\n ':<', '^_^', '^.^', '>.>', 'T_T', 'T-T', '-.-', '*.*', '~.~', ':*', ':-*', 'xP', 'XP', 'XP', 'Xp',\n ':-|',\n ':->', ':-<', '$_$', '8-)', ':-P', ':-p', '=P', '=p', ':*)', '*-*', 'B-)', 'O.o', 'X-(', ')-X']\n\n for value in values:\n # Remove dots\n text = value.replace(\".\", \"\").lower()\n text = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", text)\n users = re.findall(\"[@]\\w+\", text)\n for user in users:\n text = text.replace(user, \"<user>\")\n urls = re.findall(r'(https?://[^\\s]+)', text)\n if len(urls) != 0:\n for url in urls:\n text = text.replace(url, \"<url >\")\n for emo in text:\n if emo in emoji.UNICODE_EMOJI:\n text = text.replace(emo, \"<emoticon >\")\n for emo in emoticons:\n text = text.replace(emo, \"<emoticon >\")\n numbers = re.findall('[0-9]+', text)\n for number in numbers:\n text = text.replace(number, \"<number >\")\n text = text.replace('#', \"<hashtag >\")\n text = re.sub(r\"([?.!,¿])\", r\" \", text)\n text = \"\".join(l for l in text if l not in string.punctuation)\n text = re.sub(r'[\" \"]+', \" \", text)\n new_values.append(text)\n return new_values\n\n\ndef data_process(data, labels):\n input_ids = []\n attention_masks = []\n bert_tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n for sentence in data:\n bert_inp = bert_tokenizer.__call__(sentence, max_length=36,\n padding='max_length', pad_to_max_length=True,\n truncation=True, return_token_type_ids=False)\n\n input_ids.append(bert_inp['input_ids'])\n attention_masks.append(bert_inp['attention_mask'])\n #del bert_tokenizer\n #gc.collect()\n #torch.cuda.empty_cache()\n input_ids = np.asarray(input_ids)\n attention_masks = np.array(attention_masks)\n labels = np.array(labels)\n return input_ids, attention_masks, labels\n\n\ndef load_and_process():\n data, labels = read_dataset()\n num_of_labels = len(labels.unique())\n input_ids, attention_masks, labels = data_process(pre_process_dataset(data), labels)\n\n return input_ids, attention_masks, labels\n\n\n# function to train the model\ndef train():\n model.train()\n\n total_loss, total_accuracy = 0, 0\n\n # empty list to save model predictions\n total_preds = []\n\n # iterate over batches\n total = len(train_dataloader)\n for i, batch in enumerate(train_dataloader):\n\n step = i+1\n percent = \"{0:.2f}\".format(100 * (step / float(total)))\n lossp = \"{0:.2f}\".format(total_loss/(total*batch_size))\n filledLength = int(100 * step // total)\n bar = '█' * filledLength + '>' *(filledLength < 100) + '.' * (99 - filledLength)\n print(f'\\rBatch {step}/{total} |{bar}| {percent}% complete, loss={lossp}, accuracy={total_accuracy}', end='')\n\n # push the batch to gpu\n batch = [r.to(device) for r in batch]\n sent_id, mask, labels = batch\n del batch\n gc.collect()\n torch.cuda.empty_cache()\n # clear previously calculated gradients\n model.zero_grad()\n\n # get model predictions for the current batch\n #sent_id = torch.tensor(sent_id).to(device).long()\n preds = model(sent_id, mask)\n\n # compute the loss between actual and predicted values\n loss = cross_entropy(preds, labels)\n\n # add on to the total loss\n total_loss += float(loss.item())\n\n # backward pass to calculate the gradients\n loss.backward()\n\n # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # update parameters\n optimizer.step()\n\n # model predictions are stored on GPU. So, push it to CPU\n #preds = preds.detach().cpu().numpy()\n\n # append the model predictions\n #total_preds.append(preds)\n total_preds.append(preds.detach().cpu().numpy())\n\n gc.collect()\n torch.cuda.empty_cache()\n\n # compute the training loss of the epoch\n avg_loss = total_loss / (len(train_dataloader)*batch_size)\n\n # predictions are in the form of (no. of batches, size of batch, no. of classes).\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n # returns the loss and predictions\n return avg_loss, total_preds\n\n\n# function for evaluating the model\ndef evaluate():\n print(\"\\n\\nEvaluating...\")\n\n # deactivate dropout layers\n model.eval()\n\n total_loss, total_accuracy = 0, 0\n\n # empty list to save the model predictions\n total_preds = []\n\n # iterate over batches\n total = len(val_dataloader)\n for i, batch in enumerate(val_dataloader):\n \n step = i+1\n percent = \"{0:.2f}\".format(100 * (step / float(total)))\n lossp = \"{0:.2f}\".format(total_loss/(total*batch_size))\n filledLength = int(100 * step // total)\n bar = '█' * filledLength + '>' * (filledLength < 100) + '.' * (99 - filledLength)\n print(f'\\rBatch {step}/{total} |{bar}| {percent}% complete, loss={lossp}, accuracy={total_accuracy}', end='')\n\n # push the batch to gpu\n batch = [t.to(device) for t in batch]\n\n sent_id, mask, labels = batch\n del batch\n gc.collect()\n torch.cuda.empty_cache()\n # deactivate autograd\n with torch.no_grad():\n\n # model predictions\n preds = model(sent_id, mask)\n\n # compute the validation loss between actual and predicted values\n loss = cross_entropy(preds, labels)\n\n total_loss += float(loss.item())\n #preds = preds.detach().cpu().numpy()\n\n #total_preds.append(preds)\n total_preds.append(preds.detach().cpu().numpy())\n\n gc.collect()\n torch.cuda.empty_cache()\n\n # compute the validation loss of the epoch\n avg_loss = total_loss / (len(val_dataloader)*batch_size)\n\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n return avg_loss, total_preds\n\n# Specify the GPU\n# Setting up the device for GPU usage\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint(device)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Load Data-set ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\ninput_ids, attention_masks, labels = load_and_process()\ndf = pd.DataFrame(list(zip(input_ids, attention_masks)), columns=['input_ids', 'attention_masks'])\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class distribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# class = class label for majority of CF users. 0 - hate speech 1 - offensive language 2 - neither\n# ~~~~~~~~~~ Split train data-set into train, validation and test sets ~~~~~~~~~~#\ntrain_text, temp_text, train_labels, temp_labels = train_test_split(df, labels,\n random_state=2018, test_size=0.2, stratify=labels)\n\nval_text, test_text, val_labels, test_labels = train_test_split(temp_text, temp_labels,\n random_state=2018, test_size=0.5, stratify=temp_labels)\n\ndel temp_text\ngc.collect()\ntorch.cuda.empty_cache()\n\ntrain_count = len(train_labels)\ntest_count = len(test_labels)\nval_count = len(val_labels)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# ~~~~~~~~~~~~~~~~~~~~~ Import BERT Model and BERT Tokenizer ~~~~~~~~~~~~~~~~~~~~~#\n# import BERT-base pretrained model\nbert = AutoModel.from_pretrained('bert-base-uncased')\n# bert = AutoModel.from_pretrained('bert-base-uncased')\n# Load the BERT tokenizer\n#tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tokenization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# for train set\ntrain_seq = torch.tensor(train_text['input_ids'].tolist())\ntrain_mask = torch.tensor(train_text['attention_masks'].tolist())\ntrain_y = torch.tensor(train_labels.tolist())\n\n# for validation set\nval_seq = torch.tensor(val_text['input_ids'].tolist())\nval_mask = torch.tensor(val_text['attention_masks'].tolist())\nval_y = torch.tensor(val_labels.tolist())\n\n# for test set\ntest_seq = torch.tensor(test_text['input_ids'].tolist())\ntest_mask = torch.tensor(test_text['attention_masks'].tolist())\ntest_y = torch.tensor(test_labels.tolist())\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create DataLoaders ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n\n# define a batch size\nbatch_size = 32\n\n# wrap tensors\ntrain_data = TensorDataset(train_seq, train_mask, train_y)\n\n# sampler for sampling the data during training\ntrain_sampler = RandomSampler(train_data)\n\n# dataLoader for train set\ntrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n\n# wrap tensors\nval_data = TensorDataset(val_seq, val_mask, val_y)\n\n# sampler for sampling the data during training\nval_sampler = SequentialSampler(val_data)\n\n# dataLoader for validation set\nval_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Freeze BERT Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# freeze all the parameters\nfor param in bert.parameters():\n param.requires_grad = False\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n# push the model to GPU\nmodel = model.to(device)\n\n# optimizer from hugging face transformers\nfrom transformers import AdamW\n\n# define the optimizer\noptimizer = AdamW(model.parameters(), lr=2e-5)\n\n#from sklearn.utils.class_weight import compute_class_weight\n\n# compute the class weights\n#class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels)\n\n#print(class_wts)\n\n# convert class weights to tensor\n#weights = torch.tensor(class_wts, dtype=torch.float)\n#weights = weights.to(device)\n\n# loss function\n#cross_entropy = nn.NLLLoss(weight=weights)\ncross_entropy = nn.NLLLoss()\n\n# set initial loss to infinite\nbest_valid_loss = float('inf')\n\n# empty lists to store training and validation loss of each epoch\n#train_losses = []\n#valid_losses = []\n\n#if os.path.isfile(\"/content/drive/MyDrive/saved_weights.pth\") == False:\n#if os.path.isfile(\"saved_weights.pth\") == False:\n # number of training epochs\nepochs = 3\ncurrent = 1\n# for each epoch\nwhile current <= epochs:\n\n print(f'\\nEpoch {current} / {epochs}:')\n\n # train model\n train_loss, _ = train()\n\n # evaluate model\n valid_loss, _ = evaluate()\n\n # save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n #torch.save(model.state_dict(), 'saved_weights.pth')\n\n # append training and validation loss\n #train_losses.append(train_loss)\n #valid_losses.append(valid_loss)\n\n print(f'\\n\\nTraining Loss: {train_loss:.3f}')\n print(f'Validation Loss: {valid_loss:.3f}')\n\n current = current + 1\n#else:\n #print(\"Got weights!\")\n # load weights of best model\n #model.load_state_dict(torch.load(\"saved_weights.pth\"))\n #model.load_state_dict(torch.load(\"/content/drive/MyDrive/saved_weights.pth\"), strict=False)\n\n# get predictions for test data\ngc.collect()\ntorch.cuda.empty_cache()\n\nwith torch.no_grad():\n preds = model(test_seq.to(device), test_mask.to(device))\n #preds = model(test_seq, test_mask)\n preds = preds.detach().cpu().numpy()\n\n\nprint(\"Performance:\")\n# model's performance\npreds = np.argmax(preds, axis=1)\nprint('Classification Report')\nprint(classification_report(test_y, preds))\n\nprint(\"Accuracy: \" + str(accuracy_score(test_y, preds)))", "Requirement already satisfied: transformers==3.0.0 in /usr/local/lib/python3.7/dist-packages (3.0.0)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (21.3)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (3.4.0)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (2019.12.20)\nRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (4.62.3)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (0.0.46)\nRequirement already satisfied: tokenizers==0.8.0-rc4 in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (0.8.0rc4)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (1.19.5)\nRequirement already satisfied: sentencepiece in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (0.1.96)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.0) (2.23.0)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers==3.0.0) (3.0.6)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==3.0.0) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==3.0.0) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==3.0.0) (2021.10.8)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==3.0.0) (3.0.4)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==3.0.0) (1.1.0)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==3.0.0) (1.15.0)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==3.0.0) (7.1.2)\nRequirement already satisfied: emoji in /usr/local/lib/python3.7/dist-packages (1.6.1)\ncuda\n24783\n\nEpoch 1 / 3:\nBatch 620/620 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nEvaluating...\nBatch 78/78 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nTraining Loss: 0.011\nValidation Loss: 0.007\n\nEpoch 2 / 3:\nBatch 620/620 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nEvaluating...\nBatch 78/78 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nTraining Loss: 0.007\nValidation Loss: 0.008\n\nEpoch 3 / 3:\nBatch 620/620 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nEvaluating...\nBatch 78/78 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0\n\nTraining Loss: 0.006\nValidation Loss: 0.009\nPerformance:\nClassification Report\n precision recall f1-score support\n\n 0 0.58 0.41 0.48 143\n 1 0.94 0.96 0.95 1919\n 2 0.88 0.89 0.89 417\n\n accuracy 0.92 2479\n macro avg 0.80 0.76 0.77 2479\nweighted avg 0.91 0.92 0.91 2479\n\nAccuracy: 0.918112141992739\n" ], [ "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4af6eb57109f22923a4570847dad52ac2821aba4
26,328
ipynb
Jupyter Notebook
notebooks/20-GEDI-look-at-processed-data.ipynb
Croydon-Brixton/gedi-biomass-mapping
bd6021a8515597d5ce14221afa47758803b4864a
[ "MIT" ]
null
null
null
notebooks/20-GEDI-look-at-processed-data.ipynb
Croydon-Brixton/gedi-biomass-mapping
bd6021a8515597d5ce14221afa47758803b4864a
[ "MIT" ]
null
null
null
notebooks/20-GEDI-look-at-processed-data.ipynb
Croydon-Brixton/gedi-biomass-mapping
bd6021a8515597d5ce14221afa47758803b4864a
[ "MIT" ]
null
null
null
35.578378
1,218
0.531829
[ [ [ "# Convenient jupyter setup\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from src.constants import GEDI_L2A_PATH\nfrom src.utils.os import list_content\nfrom src.utils.download import download\nfrom tqdm.autonotebook import tqdm\nimport geopandas as gpd\nsave_dir = GEDI_L2A_PATH/ \"v002\" / \"amazon_basin\"", "<ipython-input-3-f1060345d3a8>:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n from tqdm.autonotebook import tqdm\n" ], [ "feather_files = list(save_dir.glob(\"*/*.feather\"))\nprint(f\"There are {len(feather_files)} feather files.\")", "There are 5193 feather files.\n" ] ], [ [ "## Count number of total shots", "_____no_output_____" ], [ "Takeaway: \n>It takes about 2-3 seconds to read a simple feather dataframe into geopandas. \nThis means in total it will take about 3-4h to read in all the data.\n\n> From a rough look at the first 100 samples, there will be about 500 Mio - 1 B shots over the Amazon. \n> Exact number: 452'202'228 (450 Mio.)", "_____no_output_____" ], [ "Note: if we just want to get the lenght, we can also read via pandas:", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nn_shots = 0\nfor feather in tqdm(feather_files):\n n_shots += len(pd.read_feather(feather, columns=[\"quality_flag\"]))\n \nprint(n_shots)", "_____no_output_____" ], [ "print(n_shots)", "452202228\n" ] ], [ [ "## Look at a sample of the dataset", "_____no_output_____" ] ], [ [ "feather_files[0].stat().st_size / 1024 / 1024 ", "_____no_output_____" ], [ "sample = gpd.read_feather(feather_files[0])\nsample.head()", "_____no_output_____" ] ], [ [ "## Upload to PostGIS database", "_____no_output_____" ] ], [ [ "from sqlalchemy import create_engine\nimport sqlalchemy as db\nfrom src.constants import DB_CONFIG\n\nengine = create_engine(DB_CONFIG, echo=False) \ngedi_l2a = db.Table(\"level_2a\", db.MetaData(), autoload=True, autoload_with=engine)", "<ipython-input-9-5dbaf001cc78>:6: SAWarning: Did not recognize type 'geometry' of column 'geometry'\n gedi_l2a = db.Table(\"level_2a\", db.MetaData(), autoload=True, autoload_with=engine)\n" ], [ "for i, feather_file in enumerate(tqdm(feather_files[479:])):\n try:\n print(i+479)\n sample = gpd.read_feather(feather_file)\n sample[sample.quality_flag == 1]\n sample.to_postgis(name=\"level_2a\", \n if_exists=\"append\", \n con=engine, \n index=False, \n index_label=\"shot_number\")\n except Exception as e:\n \n print(e)\n continue", "_____no_output_____" ] ], [ [ "## Load from PostGIS", "_____no_output_____" ], [ "### Runtime comparision after uploading only `feather_files[0]`", "_____no_output_____" ] ], [ [ "%%time\ndf = pd.read_sql(gedi_l2a.select(), con=engine) # reads only data, not geometry", "CPU times: user 4.92 s, sys: 318 ms, total: 5.24 s\nWall time: 10.6 s\n" ], [ "%%time\ndf = gpd.read_postgis(gedi_l2a.select(), con=engine, geom_col=\"geometry\") # reads geometry as well", "CPU times: user 6 s, sys: 360 ms, total: 6.36 s\nWall time: 11.2 s\n" ], [ "%%time\nsample = pd.read_feather(feather_files[0], columns=[\"granule_name\"]) # read from feather format (no geometry)", "CPU times: user 83.4 ms, sys: 7.15 ms, total: 90.5 ms\nWall time: 133 ms\n" ], [ "%%time\nsample = gpd.read_feather(feather_files[0], columns=[\"geometry\"]) # read only geometry column from feather format", "CPU times: user 714 ms, sys: 20.3 ms, total: 735 ms\nWall time: 737 ms\n" ], [ "%%time\nsample = gpd.read_feather(feather_files[0])", "CPU times: user 732 ms, sys: 104 ms, total: 836 ms\nWall time: 773 ms\n" ] ], [ [ "### Test out sql query", "_____no_output_____" ] ], [ [ "sql = \"SELECT * FROM gedi_l2a\"\ndf = gpd.read_postgis(sql, con=engine)", "2021-06-10 13:25:13,029 INFO sqlalchemy.engine.Engine select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s\n2021-06-10 13:25:13,030 INFO sqlalchemy.engine.Engine [cached since 169.5s ago] {'name': 'SELECT * FROM gedi_l2a'}\n2021-06-10 13:25:13,036 INFO sqlalchemy.engine.Engine SELECT * FROM gedi_l2a\n2021-06-10 13:25:13,037 INFO sqlalchemy.engine.Engine [raw sql] {}\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4af6ec6bb6ddc202551ffd5c4f8b966272732901
5,942
ipynb
Jupyter Notebook
examples/notebook/sat/assignment_task_sizes_sat.ipynb
jdarlay/or-tools
a41cf1b50f9e777c273133840968cf50434f3bd5
[ "Apache-2.0" ]
1
2022-03-08T22:28:12.000Z
2022-03-08T22:28:12.000Z
examples/notebook/sat/assignment_task_sizes_sat.ipynb
jdarlay/or-tools
a41cf1b50f9e777c273133840968cf50434f3bd5
[ "Apache-2.0" ]
null
null
null
examples/notebook/sat/assignment_task_sizes_sat.ipynb
jdarlay/or-tools
a41cf1b50f9e777c273133840968cf50434f3bd5
[ "Apache-2.0" ]
null
null
null
33.011111
259
0.550151
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af70ac17464eb7a0c02163e06a6d2001a6bd6c5
12,774
ipynb
Jupyter Notebook
IAyTC/Notas/primer-perceptron.ipynb
BenchHPZ/UG-Compu
fa3551a862ee04b59a5ba97a791f39a77ce2df60
[ "MIT" ]
null
null
null
IAyTC/Notas/primer-perceptron.ipynb
BenchHPZ/UG-Compu
fa3551a862ee04b59a5ba97a791f39a77ce2df60
[ "MIT" ]
null
null
null
IAyTC/Notas/primer-perceptron.ipynb
BenchHPZ/UG-Compu
fa3551a862ee04b59a5ba97a791f39a77ce2df60
[ "MIT" ]
null
null
null
75.585799
9,016
0.796853
[ [ [ "# Ejercicio de clase", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# DAtos\nd1 = [(0.5, 1.0, 0),\n (1.2, 2.0, 0),\n (2.5, 3.2, 0),\n (3.0, 4.0, 0),\n (4.2, 5.6, 0)]\nd1 = np.array(d1)\n\nd2 = [(1.0, 3.5, 1),\n (1.7, 4.0, 1),\n (2.2, 4.6, 1),\n (3.5, 6.0, 1),\n (4.1, 6.5, 1)]\nd2 = np.array(d2)", "_____no_output_____" ], [ "\ndef condicion_fin(D, w):\n error = []\n \n for data in D:\n y = data[-2]\n x = np.append(data[0:-2], 1)\n \n sigma = 0\n for i in range(len(x)):\n sigma += x[i]*w[i]\n sigma += w[-1]\n error.append(abs(y - sigma))\n \n return np.mean(error)\n \n \n \n\ndef perceptron(D, nu, g, gp):\n n = len(D[0]) -1\n # pesos\n w = np.array(np.random.randint(-3000, 3001, n)/300)\n temp_w = []\n \n for _ in range(100000):\n temp_w.append(w)\n for data in D:\n y = data[-2]\n x = np.append(data[0:-2], 1)\n \n i_n = 0\n for i in range(n):\n i_n += w[i]*x[i]\n o = g(i_n)\n \n for k in range(n):\n w[i] = w[i] + nu*(y - o)* gp(i_n)*x[i]\n \n if condicion_fin(D, w) < 0.01:\n print(_)\n break\n \n plt.plot(D[:,0], D[:,0], 'o')\n \n aprox = lambda x: x*w[0] + w[1]\n paso = 10\n xs = np.linspace( min(D[:,0]), max(D[:,1]), paso, True)\n plt.plot(xs, [aprox(x) for x in xs])\n \n return w, temp_w", "_____no_output_____" ], [ "# Inputs\nnu = 0.001\ng = lambda x: 1/(1 - np.e**(-x))\ngp = lambda x: -np.e**x/(-1 + np.e**x)**2\n\n_, __ = perceptron(d1, nu, g, gp)\n_", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af7219f5dbece8b0a8d07703121ffe7ce1e69a3
758,856
ipynb
Jupyter Notebook
code/ynacc/08 Threads/Preprocess LM Data Threads_last.ipynb
jfilter/masters-thesis
39a3d9b862444507982cc4ccd98b6809cab72d82
[ "MIT" ]
5
2019-04-24T19:45:07.000Z
2020-12-29T06:40:58.000Z
code/ynacc/08 Threads/Preprocess LM Data Threads_last.ipynb
jfilter/masters-thesis
39a3d9b862444507982cc4ccd98b6809cab72d82
[ "MIT" ]
2
2019-11-05T17:17:38.000Z
2019-11-05T17:17:39.000Z
code/ynacc/08 Threads/Preprocess LM Data Threads_last.ipynb
jfilter/masters-thesis
39a3d9b862444507982cc4ccd98b6809cab72d82
[ "MIT" ]
null
null
null
178.891089
1,038
0.501273
[ [ [ "from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport swifter\n\nimport cleantext\n\npd.options.display.max_colwidth = 1000", "_____no_output_____" ], [ "OUT = Path('~/data/ynacc_proc/replicate/threads_last')\n\nBASE_PATH = Path('/mnt/data/datasets/ydata-ynacc-v1_0')\nANN1 = BASE_PATH/'ydata-ynacc-v1_0_expert_annotations.tsv'\nANN2 = BASE_PATH/'ydata-ynacc-v1_0_turk_annotations.tsv'\nUNL = BASE_PATH/'ydata-ynacc-v1_0_unlabeled_conversations.tsv'\nTRAIN_IDS = BASE_PATH/'ydata-ynacc-v1_0_train-ids.txt'", "_____no_output_____" ], [ "trainids = pd.read_csv(TRAIN_IDS, header=None)\ndf_an1 = pd.read_table(ANN1)\ndf_an1 = df_an1[df_an1['sdid'].isin(list(trainids[0]))]\ndf_an1 = df_an1[['sdid', 'text', 'commentindex']]\ndf_an1 = df_an1.drop_duplicates()\ndf_an1", "_____no_output_____" ], [ "df_an2 = pd.read_table(ANN2)\ndf_an2 = df_an2[df_an2['sdid'].isin(list(trainids[0]))]\ndf_an2 = df_an2[['sdid', 'text', 'commentindex']]\ndf_an2 = df_an2.drop_duplicates()\ndf_an2", "_____no_output_____" ], [ "df_notan = pd.read_csv(UNL, engine='python', sep='\\t', quoting=3, error_bad_lines=False)\ndf_notan = df_notan[['sdid', 'text', 'commentindex']]", "_____no_output_____" ], [ "# not needed anmoyre\n# df['text'] = df.apply(lambda x: 'xx_root_comment ' + x['text'] if pd.isnull(x['parentid']) else x['text'], axis=1)\n# df['parentid'] = df.apply(lambda x: x['commentid'] if pd.isnull(x['parentid']) else x['parentid'], axis=1)", "_____no_output_____" ], [ "df = pd.concat([df_an1, df_an2, df_notan])\n# clean up\ndf = df.dropna(subset=['text'])\ndf[\"commentindex\"] = pd.to_numeric(df[\"commentindex\"])\ndf", "_____no_output_____" ], [ "df['text'] = df['text'].swifter.apply(lambda x: cleantext.clean(x, lower=False, no_urls=True, no_emails=True, zero_digits=True))", "Pandas Apply: 100%|██████████| 238512/238512 [04:07<00:00, 965.60it/s] \n" ], [ "df = df.drop_duplicates()", "_____no_output_____" ], [ "# get list of all comennts per thread\nres = df.sort_values(by=['commentindex']).groupby('sdid').agg({'text': lambda x: list(x)}).reset_index()", "_____no_output_____" ], [ "res", "_____no_output_____" ], [ "# create all possible thread combinations\nnew_items = []\ndef create_threads(row):\n for i in range(1, len(row['text']) + 1):\n x = row['text'][:i]\n new = 'xx_thread_start ' + ' '.join([ 'xx_comment_start ' + (' xx_last ' + xx if xx == list(x)[-1] else xx) + ' xx_comment_end' for xx in list(x)]) + ' xx_thread_end'\n new_items.append({'text': new, 'sdid': row['sdid']})", "_____no_output_____" ], [ "for _, row in res.iterrows():\n create_threads(row)", "_____no_output_____" ], [ "final = pd.DataFrame(new_items)", "_____no_output_____" ], [ "final", "_____no_output_____" ], [ "# final['text'] = final['text'].swifter.apply(lambda x: clean(x, lower=False))", "_____no_output_____" ], [ "final.groupby('sdid').count()", "_____no_output_____" ], [ "final.shape", "_____no_output_____" ], [ "split_id = 130000\nfinal[\"sdid\"] = pd.to_numeric(final[\"sdid\"])\ntrain = final[final['sdid'] <= split_id][['text']]\nval = final[final['sdid'] > split_id][['text']]", "_____no_output_____" ], [ "train", "_____no_output_____" ], [ "val", "_____no_output_____" ], [ "Path('/home/group7/data/ynacc_proc/replicate/threads_last').mkdir(exist_ok=True) ", "_____no_output_____" ], [ "! ls /home/group7/data/ynacc_proc/replicate", "10000\t 30000\t\t40000_ner lmdata\t\t threads_last\r\n10000_ner 30000_ctx_ner\t50000\t lmdata_art_match tmp\r\n20000\t 30000_ctx_ner_fixed\t50000_ner lmmodels\r\n20000_ner 30000_ner\t\t60000\t lmmodels2\r\n20k\t 30000_threads\t60000_ner split\r\n20k_ner 40000\t\t60000_threads threads\r\n" ], [ "train.to_csv(OUT/'train.csv', index=False)", "_____no_output_____" ], [ "val.to_csv(OUT/'val.csv', index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af7374d60393d058c898958e82ccbadc3590719
438,530
ipynb
Jupyter Notebook
Lasso.ipynb
memphis-iis/datawhys-intern-notebooks-2021
d88a8225a2cae2f8fe86c8d72d8ae3bfd0b1a44b
[ "Apache-2.0" ]
null
null
null
Lasso.ipynb
memphis-iis/datawhys-intern-notebooks-2021
d88a8225a2cae2f8fe86c8d72d8ae3bfd0b1a44b
[ "Apache-2.0" ]
null
null
null
Lasso.ipynb
memphis-iis/datawhys-intern-notebooks-2021
d88a8225a2cae2f8fe86c8d72d8ae3bfd0b1a44b
[ "Apache-2.0" ]
null
null
null
314.583931
368,264
0.925341
[ [ [ "Copyright 2020 Natasha A. Sahr, Andrew M. Olney and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code.\n", "_____no_output_____" ], [ "# Ridge and Lasso Regression: L1 and L2 penalization", "_____no_output_____" ], [ "## Regularization\n\nUp to this point, we've focused on relatively small numbers of predictors in our models.\nWhen we have datasets with large numbers of predictors, we need to think about new techniques to deal with the additional complexity.\nPart of the reason is that our highly manual methods no longer scale well.\nImagine if you had to make and evaluate plots for 1000 variables!\nThe other part is that once we have many variables, the chances of them interacting with each other in very complicated ways gets increasingly larger.\n\nWe talked about one \"bad\" kind of interaction before, multicolinearity.\nMulticolinearity occurs when two variables mostly measure the same thing.\nThe problem with multicolinearity in linear models is that the variables involved will no longer have unique solutions for their estimated coefficients.\nWhat this means in practice is that multicolinearity is a small, but manageable problem for small datasets, but multicolinearity becomes a very serious problem for large datasets, at least for linear models, which arguably are the most important models in science.\n\nToday we will talk about two methods that address the complexity of having many variables, including multicolinearity.\nBoth of these methods use a \"big idea\" in data science called **regularization**.\nThe idea behind regularization is that you **penalize** complex models in favor of simpler ones.\nThese simpler models use fewer variables, making them easier to understand.\nIf you penalization is set up in the right way, the simpler models can also avoid multicolinearity problems.\nToday we will focus on ridge and lasso regression, but it is important to remember that many other models use similar regularization techniques. \nOnce you know to look for it, you will start to see it everywhere!\n\n## What you will learn\n\nIn the sections that follow, you will learn about ridge and lasso regularization and how they can help us assess a large number of variables in for candidacy in regression models by penalizing variables that don't contribute a large effect in the variability of the outcome. We will study the following:\n\n- Ridge regression with the L2 penalty\n- Lasso regression with the L1 penalty\n- Assessing model accuracy\n- Comparing regularized models \n- Selection of the tuning parameter $\\lambda$\n\n## When to use regularization/penalization in regression\n\nRegularization is a general strategy that applies a penalty in the optimization of a regression model. With the correct tuning parameter selection, it will prevent overfitting a model to a particular dataset and improve the potential for generalization to new datasets.\nRegularization becomes particularly important in regression where there are large numbers of predictors because it can mitigate multicollinearity and cause shrinkage (for L2) or encourage sparsity (for L1) of the coefficients for variables that contribute less to the prediction of the outcome.", "_____no_output_____" ], [ "## Vanilla logistic regression\n\nLet's start by applying logistic regression to some breast cancer data.\nThis model will serve as a baseline for comparison to the ridge and lasso models that come later.\nWe're going to get the breast cancer data from `sklearn` instead of loading a CSV.\nLibraries like `sklearn` frequently come with their own datasets for demonstration purposes.\n\n### Load data\n\nThe [data](https://scikit-learn.org/stable/datasets/index.html#breast-cancer-dataset) consists of the following variables as mean, standard error, and \"worst\" (mean of three largest variables) collected by digital imagery of a biopsy.\n\n| Variable | Type | Description |\n|:-------|:-------|:-------|\n|radius | Ratio | mean of distances from center to points on the perimeter|\n|texture | Ratio | standard deviation of gray-scale values|\n|perimeter | Ratio | perimeter of cancer|\n|area | Ratio | area of cancer|\n|smoothness | Ratio | local variation in radius lengths|\n|compactness | Ratio | perimeter^2 / area - 1.0|\n|concavity | Ratio | severity of concave portions of the contour|\n|concave points | Ratio | number of concave portions of the contour|\n|symmetry | Ratio | symmetry of cancer|\n|fractal dimension | Ratio | \"coastline approximation\" - 1|\n| class | Nominal (binary) | malignant (1) or benign (0)\n\n<div style=\"text-align:center;font-size: smaller\">\n <b>Source:</b> This dataset was taken from the <a href=\"https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)\">UCI Machine Learning Repository library\n </a>\n</div>\n<br>\n\nWe want to predict the presence/absence of cancer, so it makes sense to use logistic regression rather than linear regression.\nRidge and lasso penalties work the same way in both kinds of regression.\n\nFirst, import libraries for dataframes and to load the dataset: \n\n- `import pandas as pd`\n- `import sklearn.datasets as datasets`", "_____no_output_____" ], [ "Next we need to do some conversion to put the `sklearn` data into a dataframe, which is the form we are most comfortable with:\n\n- Create variable `cancer_sklearn`\n- Set it to `with datasets do load_breast_cancer using`", "_____no_output_____" ], [ "The next step is to put this `sklearn` data into a dataframe:\n\n- Create variable `dataframe`\n- Set it to `with pd create DataFrame using` a list containing\n - `from cancer_sklearn get data`\n - freestyle `columns=` followed by `from cancer_sklearn get feature_names`", "_____no_output_____" ], [ "You can use this approach to put any `sklearn` format dataset into a dataframe.\n\nBecause this data is too \"easy\", we need to make it more complicated to really show the benefits of ridge and lasso.\nRemember that multicolinearity is bad? \nLet's make it artificially multicolinear by duplicating the columns in the dataframe so that we have four side by side:\n\n- Set `dataframe` to `with pd do concat using` a list containing\n - A list containing\n - `dataframe` \n - `dataframe` \n - `dataframe` \n - `dataframe` \n - freestyle `axis=1`\n \nThe axis tells it to stack horizontally rather than vertically.", "_____no_output_____" ], [ "`sklearn` stores the predictors and the target (outcome) variable separately, so we need to `assign` it to the dataframe:\n\n- Set `dataframe` to `with dataframe do assign using` a list containing\n - freestyle `Target=` followed by `from cancer_sklearn get target`\n- `dataframe` (to display)", "_____no_output_____" ], [ "As you can see, we're now working with 120 predictor variables instead of the original 30.\nIt's a lot more than we'd like to have to examine manually. \n\n### Explore data\n\nBased on the earlier discussion, you might guess that this dataset has a problem with multicolinearity.\nAnd you'd be right!\n\nLet's make a quick heatmap to show this:\n\n- `import plotly.express as px`", "_____no_output_____" ], [ "And now a \"one line\" heatmap:\n\n- `with px do imshow using` as list containing\n - `with dataframe do corr using`\n - A freestyle block **with a notch on the right** containing `x=`, connected to `from dataframe get columns`\n - A freestyle block **with a notch on the right** containing `y=`, connected to `from dataframe get columns`", "_____no_output_____" ], [ "Anything light orange to yellow could give us positive colinearity problems, and anything dark purple to indigo could give us negative colinearity problems.\n\nDepending on the size of your screen, `plotly` may only show every second or third variable name.\nYou can use the Zoom tool to explore this correlation matrix more closely.", "_____no_output_____" ], [ "## Prepare train/test sets\n\nWe need to split the dataframe into `X`, our predictors, and `Y`, our target variable (breast cancer positive).\n\nDo the imports for splitting:\n\n- `import sklearn.model_selection as model_selection`", "_____no_output_____" ], [ "Create `X` by dropping the label from the dataframe:\n\n- Create variable `X`\n- Set it to `with dataframe do drop using` a list containing\n - freestyle `columns=[\"Target\"]`\n- `X` (to display)", "_____no_output_____" ], [ "Create `Y` by pulling just `Target` from the dataframe:\n\n- Create variable `Y`\n- Set it to `dataframe [ ] ` containing the following in a list\n - `\"Target\"`\n- `Y` (to display)", "_____no_output_____" ], [ "Now do the splits:\n\n- Create variable `splits`\n- Set it to `with model_selection do train_test_split using` a list containing\n - `X`\n - `Y`\n - freestyle `random_state=2` (this will make your random split the same as mine)\n \n**Notice we did not specify a test size; `sklearn` will use .25 by default**.", "_____no_output_____" ], [ "### Model 1: Logistic regression\n\nLet's do something we already suspect is not a great idea: regular logistic regression.\n\nFirst, the imports for regression, evaluation, preprocessing, and pipelines:\n\n- `import sklearn.linear_model as linear_model`\n- `import sklearn.metrics as metrics`\n- `import numpy as np`\n- `import sklearn.preprocessing as pp`\n- `import sklearn.pipeline as pipe`", "_____no_output_____" ], [ "#### Training\n\nWe're going to make a pipeline so we can scale and train in one step:\n\n- Create variable `std_clf`\n- Set it to `with pipe do make_pipeline using` a list containing\n - `with pp create StandardScaler using`\n - `with linear_model create LogisticRegression using` a list containing\n - freestyle `penalty=\"none\"`\n\n**The \"none\" penalty here is important because `sklearn` uses a ridge penalty by default.**", "_____no_output_____" ], [ "We can treat the whole pipeline as a classifier and call `fit` on it:\n\n- `with std_clf do fit using` a list containing\n - `in list splits get # 1` (this is Xtrain)\n - `with np do ravel using` a list containing\n - `in list splits get # 3` (this is Ytrain)", "_____no_output_____" ], [ "Now we can get predictions from the model for our test data:\n\n- Create variable `predictions`\n- Set it to `with std_clf do predict using` a list containing\n - `in list splits get # 2` (this is Xtest)\n- `predictions` (to display)", "_____no_output_____" ], [ "#### Evaluation\n\nTo get the accuracy:\n\n- `print create text with`\n - \"Accuracy:\"\n - `with metrics do accuracy_score using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions`", "_____no_output_____" ], [ "To get precision, recall, and F1:\n\n- `print with metrics do classification_report using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions`", "_____no_output_____" ], [ "## Regression with a Ridge (L2) Penalty", "_____no_output_____" ], [ "In ridge regression, also known as L2 penalization, the cost function is altered by adding a penalty equivalent to the square of the magnitude of the coefficients. This is equivalent to saying: for some $c > 0$, $\\sum_{j=0}^p \\beta_j^2 < 0$ for coefficients $\\beta_j, j=1,\\dots,p$. \n\nThe cost function for ridge regression is\n\n$$\\sum_{i=1}^N (y_i-\\hat{y_i})^2 = \\sum_{i=1}^N (y_i - \\sum_{j=0}^p \\beta_i x_{ij})^2 + \\lambda \\sum_{j=0}^p \\beta_j^2$$", "_____no_output_____" ], [ "When $\\lambda = 0$, we have is a linear regression model.\n\nThe $\\lambda$ regularizes the coefficients so the optimization function is penalized if the coefficients are large. This type of penalization leads to coefficients close to, but not exactly, zero. This feature of ridge regression shrinks the coefficients allowing for a reduction of model complexity and multicollinearity.", "_____no_output_____" ], [ "### Model 2: Logistic ridge regression (C=.75)\n\nAdding a ridge penalty is almost *exactly* like the model we did before.\nThere are two differences:\n\n- penalty=\"l2\"\n- C = .75\n\nThe ridge penalty is an l2 penalty (because it's squared).\nThe `C` value is the **amount** of the penalty.\nIn `sklearn` it is inverted, so smaller numbers mean more penalty.\n\n**Do yourself a favor and copy the blocks you've already done. You can save your notebook, right click it in the file browser, select \"Duplicate\", and then open that copy in 3 pane view by dragging the tab to the center right. Make sure you change variable names as directed.**\n\n**Here's how to duplicate:**\n\n![image.png](attachment:image.png)", "_____no_output_____" ], [ "**Here's 3 pane view:**\n\n![image.png](attachment:image.png)", "_____no_output_____" ], [ "#### Training\n\n- Create variable `std_clf_ridge75`\n- Set it to `with pipe do make_pipeline using` a list containing\n - `with pp create StandardScaler using`\n - `with linear_model create LogisticRegression using` a list containing\n - freestyle `penalty=\"l2\"`\n - freestyle `C=0.75`", "_____no_output_____" ], [ "We can treat the whole pipeline as a classifier and call `fit` on it:\n\n- `with std_clf_ridge75 do fit using` a list containing\n - `in list splits get # 1` (this is Xtrain)\n - `with np do ravel using` a list containing\n - `in list splits get # 3` (this is Ytrain)", "_____no_output_____" ], [ "Now we can get predictions from the model for our test data:\n\n- Create variable `predictions_ridge75`\n- Set it to `with std_clf_ridge75 do predict using` a list containing\n - `in list splits get # 2` (this is Xtest)", "_____no_output_____" ], [ "#### Evaluation\n\nTo get the accuracy:\n\n- `print create text with`\n - \"Accuracy:\"\n - `with metrics do accuracy_score using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_ridge75`", "_____no_output_____" ], [ "To get precision, recall, and F1:\n\n- `print with metrics do classification_report using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_ridge75`", "_____no_output_____" ], [ "We went from .923 accuracy and .92 weighted avg f1 to .965 accuracy and .97 weighted average f1, just by using the ridge penalty.", "_____no_output_____" ], [ "### Model 3: Logistic ridge regression (C=.25)\n\nThis model is the same as model 2 but with a different ridge penalty:\n\n- penalty=\"l2\"\n- C = .25", "_____no_output_____" ], [ "#### Training\n\n- Create variable `std_clf_ridge25`\n- Set it to `with pipe do make_pipeline using` a list containing\n - `with pp create StandardScaler using`\n - `with linear_model create LogisticRegression using` a list containing\n - freestyle `penalty=\"l2\"`\n - freestyle `C=0.25`", "_____no_output_____" ], [ "We can treat the whole pipeline as a classifier and call `fit` on it:\n\n- `with std_clf_ridge25 do fit using` a list containing\n - `in list splits get # 1` (this is Xtrain)\n - `with np do ravel using` a list containing\n - `in list splits get # 3` (this is Ytrain)", "_____no_output_____" ], [ "Now we can get predictions from the model for our test data:\n\n- Create variable `predictions_ridge25`\n- Set it to `with std_clf_ridge25 do predict using` a list containing\n - `in list splits get # 2` (this is Xtest)", "_____no_output_____" ], [ "#### Evaluation\n\nTo get the accuracy:\n\n- `print create text with`\n - \"Accuracy:\"\n - `with metrics do accuracy_score using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_ridge25`", "_____no_output_____" ], [ "To get precision, recall, and F1:\n\n- `print with metrics do classification_report using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_ridge25`", "_____no_output_____" ], [ "We went from .965 accuracy and .97 weighted average f1 to .972 accuracy and .97 weighted average f1, again just by using the ridge penalty but with a greater penalty.", "_____no_output_____" ], [ "### Comparing Ridge models\n\nLet's plot the coefficients of models 1 to 3 to show the effect of the ridge penalty on the coefficents.\nRemember, the penalty *shrinks* coefficients.\n\nDo the imports for plotting with layers:\n\n- `import plotly.graph_objects as go`\n\nand we need to create dummy x-axis for our coefficents:\n\n- Create variable `dummyX`\n- Set it to `with np do linspace using` a list containing:\n - `1`\n - `length of` `from dataframe get columns` - `1`\n - `length of` `from dataframe get columns` - `1`\n \n**That last part is supposed to be twice.\nSince there is one target column and the rest are predictors, we subtract 1.**", "_____no_output_____" ], [ "Create an empty figure:\n \n- Create `fig`\n- Set it to `with go create Figure using`", "_____no_output_____" ], [ "Add three scatterplots to `fig`:\n\n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf[1].coef_)`\n - freestyle `name='Logistic Regression'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='blue', opacity=0.25, size=30)`\n \n**For the next two, copy the first and make small changes.**\n\n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf_ridge75[1].coef_)`\n - freestyle `name='Logistic Ridge Regression C=.75'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='green', opacity=0.50, size=15)`\n \n \n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf_ridge25[1].coef_)`\n - freestyle `name='Logistic Ridge Regression C=.25'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='red', opacity=0.75, size=8)`", "_____no_output_____" ], [ "To get a sense of the shrinkage, use the magnifying glass tool in `plotly` to zoom in until the y axis is about -2 to 2.\nThen you can see that C=.25 penalty even more tightly shrunk that C=.75.", "_____no_output_____" ], [ "## Regression with a Lasso (L1) Penalty", "_____no_output_____" ], [ "In lasso regression, also known as L1 penalization, the cost function is altered by adding a penalty equivalent to the absolute value of the magnitude of the coefficients. This is equivalent to saying: for some $c > 0$, $|\\beta_j| < 0$ for coefficients $\\beta_j, j=1,\\dots,p$. \n\nThe cost function for lasso regression is\n\n$$\\sum_{i=1}^N (y_i-\\hat{y_i})^2 = \\sum_{i=1}^N (y_i - \\sum_{j=0}^p \\beta_i x_{ij})^2 + \\lambda \\sum_{j=0}^p |\\beta_j|$$", "_____no_output_____" ], [ "When $\\lambda = 0$, we have is a linear regression model.\n\nThe $\\lambda$ regularizes the coefficients so the optimization function is penalized if the coefficients are large. This type of penalization leads to exactly zero coefficients. This feature of lasso regression shrinks the coefficients allowing for a reduction of model complexity and multicollinearity and allows use to perform feature selection.", "_____no_output_____" ], [ "### Model 4: Logistic lasso regression (C=.75)\n\nAdding a lasso penalty is almost *exactly* like the model we did before.\nThere are three differences:\n\n- penalty=\"l1\"\n- C = .75\n- solver=\"liblinear\"\n\nThe lasso penalty is an l1 penalty (because it's absolute value). \nThe \"solver\" is the algorithm that implements the l1 penalty.", "_____no_output_____" ], [ "#### Training\n\n- Create variable `std_clf_lasso75`\n- Set it to `with pipe do make_pipeline using` a list containing\n - `with pp create StandardScaler using`\n - `with linear_model create LogisticRegression using` a list containing\n - freestyle `penalty=\"l1\"`\n - freestyle `C=0.75`\n - freestyle `solver=\"liblinear\"`", "_____no_output_____" ], [ "We can treat the whole pipeline as a classifier and call `fit` on it:\n\n- `with std_clf_lasso75 do fit using` a list containing\n - `in list splits get # 1` (this is Xtrain)\n - `with np do ravel using` a list containing\n - `in list splits get # 3` (this is Ytrain)", "_____no_output_____" ], [ "Now we can get predictions from the model for our test data:\n\n- Create variable `predictions_lasso75`\n- Set it to `with std_clf_lasso75 do predict using` a list containing\n - `in list splits get # 2` (this is Xtest)", "_____no_output_____" ], [ "#### Evaluation\n\nTo get the accuracy:\n\n- `print create text with`\n - \"Accuracy:\"\n - `with metrics do accuracy_score using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_lasso75`", "_____no_output_____" ], [ "To get precision, recall, and F1:\n\n- `print with metrics do classification_report using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_lasso75`", "_____no_output_____" ], [ "Interestingly, model 4 is the same as model 2. Both are .965 accuracy and .97 weighted average f1.", "_____no_output_____" ], [ "### Model 5: Logistic lasso regression (C=.25)\n\nThis model is the same as model 4 but with a different ridge penalty:\n\n- penalty=\"l1\"\n- C = .25\n- solver=\"liblinear\"", "_____no_output_____" ], [ "#### Training\n\n- Create variable `std_clf_lasso25`\n- Set it to `with pipe do make_pipeline using` a list containing\n - `with pp create StandardScaler using`\n - `with linear_model create LogisticRegression using` a list containing\n - freestyle `penalty=\"l1\"`\n - freestyle `C=0.25`\n - freestyle `solver=\"liblinear\"`", "_____no_output_____" ], [ "We can treat the whole pipeline as a classifier and call `fit` on it:\n\n- `with std_clf_lasso25 do fit using` a list containing\n - `in list splits get # 1` (this is Xtrain)\n - `with np do ravel using` a list containing\n - `in list splits get # 3` (this is Ytrain)", "_____no_output_____" ], [ "Now we can get predictions from the model for our test data:\n\n- Create variable `predictions_lasso25`\n- Set it to `with std_clf_lasso25 do predict using` a list containing\n - `in list splits get # 2` (this is Xtest)", "_____no_output_____" ], [ "#### Evaluation\n\nTo get the accuracy:\n\n- `print create text with`\n - \"Accuracy:\"\n - `with metrics do accuracy_score using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_lasso25`", "_____no_output_____" ], [ "To get precision, recall, and F1:\n\n- `print with metrics do classification_report using` a list containing\n - `in list splits get # 4` (this is `Ytest`)\n - `predictions_lasso25`", "_____no_output_____" ], [ "Model 5 (accuracy .979 and weighted avg f1 .98) is slightly better than model 3 (.972 accuracy and .97 weighted average f1).\nAdditionally, lasso has done something that ridge didn't do: it has shrunk many coefficients to zero.\nSo it's actually pretty amazing that lasso is slightly better than ridge after so many variables have been removed.\nTo see which ones, run the cell below:", "_____no_output_____" ] ], [ [ "m5coefficients = pd.DataFrame( {\"variable\":X.columns, \"coefficient\":np.ravel(std_clf_lasso25[1].coef_) })\nprint(m5coefficients.to_string())\nprint( 'Variables removed (zero coefficient):' , len( m5coefficients[m5coefficients['coefficient']==0.0] ) )", " variable coefficient\n0 mean radius 0.000000\n1 mean texture 0.000000\n2 mean perimeter 0.000000\n3 mean area 0.000000\n4 mean smoothness 0.000000\n5 mean compactness 0.000000\n6 mean concavity -0.040312\n7 mean concave points -0.003495\n8 mean symmetry 0.000000\n9 mean fractal dimension 0.000000\n10 radius error -0.001453\n11 texture error 0.000000\n12 perimeter error 0.000000\n13 area error 0.000000\n14 smoothness error 0.000000\n15 compactness error 0.000000\n16 concavity error 0.000000\n17 concave points error 0.000000\n18 symmetry error 0.000000\n19 fractal dimension error 0.000000\n20 worst radius 0.000000\n21 worst texture -0.415234\n22 worst perimeter 0.000000\n23 worst area -0.691993\n24 worst smoothness -0.158844\n25 worst compactness 0.000000\n26 worst concavity -0.027119\n27 worst concave points -0.169056\n28 worst symmetry -0.002233\n29 worst fractal dimension 0.000000\n30 mean radius 0.000000\n31 mean texture 0.000000\n32 mean perimeter 0.000000\n33 mean area 0.000000\n34 mean smoothness 0.000000\n35 mean compactness 0.000000\n36 mean concavity -0.060110\n37 mean concave points -0.007072\n38 mean symmetry 0.000000\n39 mean fractal dimension 0.000000\n40 radius error -0.067708\n41 texture error 0.000000\n42 perimeter error 0.000000\n43 area error 0.000000\n44 smoothness error 0.000000\n45 compactness error 0.000000\n46 concavity error 0.000000\n47 concave points error 0.000000\n48 symmetry error 0.000000\n49 fractal dimension error 0.000000\n50 worst radius -0.033552\n51 worst texture -0.157240\n52 worst perimeter 0.000000\n53 worst area -0.817252\n54 worst smoothness -0.053531\n55 worst compactness 0.000000\n56 worst concavity 0.000000\n57 worst concave points -0.080698\n58 worst symmetry -0.017573\n59 worst fractal dimension 0.000000\n60 mean radius 0.000000\n61 mean texture 0.000000\n62 mean perimeter 0.000000\n63 mean area 0.000000\n64 mean smoothness 0.000000\n65 mean compactness 0.000000\n66 mean concavity -0.043440\n67 mean concave points -0.114855\n68 mean symmetry 0.000000\n69 mean fractal dimension 0.000000\n70 radius error -0.131541\n71 texture error 0.000000\n72 perimeter error 0.000000\n73 area error 0.000000\n74 smoothness error 0.000000\n75 compactness error 0.000000\n76 concavity error 0.000000\n77 concave points error 0.000000\n78 symmetry error 0.000000\n79 fractal dimension error 0.000000\n80 worst radius -0.098488\n81 worst texture -0.118175\n82 worst perimeter 0.000000\n83 worst area -0.605095\n84 worst smoothness -0.006193\n85 worst compactness 0.000000\n86 worst concavity 0.000000\n87 worst concave points -0.229167\n88 worst symmetry -0.193285\n89 worst fractal dimension 0.000000\n90 mean radius 0.000000\n91 mean texture 0.000000\n92 mean perimeter 0.000000\n93 mean area 0.000000\n94 mean smoothness 0.000000\n95 mean compactness 0.000000\n96 mean concavity -0.027851\n97 mean concave points -0.662400\n98 mean symmetry 0.000000\n99 mean fractal dimension 0.000000\n100 radius error -0.211586\n101 texture error 0.000000\n102 perimeter error 0.000000\n103 area error 0.000000\n104 smoothness error 0.000000\n105 compactness error 0.000000\n106 concavity error 0.000000\n107 concave points error 0.000000\n108 symmetry error 0.000000\n109 fractal dimension error 0.000000\n110 worst radius -1.032479\n111 worst texture -0.154827\n112 worst perimeter 0.000000\n113 worst area -0.580124\n114 worst smoothness -0.306039\n115 worst compactness 0.000000\n116 worst concavity 0.000000\n117 worst concave points -0.201570\n118 worst symmetry -0.002189\n119 worst fractal dimension 0.000000\nVariables removed (zero coefficient): 84\n" ] ], [ [ "We started with 120 and zeroed out 84. \nObviously many of the variables remaining are duplicates; handling these is a more complex topic!\n\n### Comparing Lasso models\n\nAs before, let's plot the coefficients of models 1, 4, and 5 to show the effect of the lasso penalty on the coefficients.", "_____no_output_____" ], [ "Create an empty figure:\n\n- Set `fig` to `with go create Figure using`", "_____no_output_____" ], [ "Add three scatterplots to `fig`:\n\n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf[1].coef_)`\n - freestyle `name='Logistic Regression'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='blue', opacity=0.25, size=30)`\n \n**For the next two, copy the first and make small changes.**\n\n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf_lasso75[1].coef_)`\n - freestyle `name='Logistic Lasso Regression C=.75'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='green', opacity=0.50, size=15)`\n \n \n- `with fig do add_scatter using`\n - freestyle `x=dummyX`\n - freestyle `y=np.ravel(std_clf_lasso25[1].coef_)`\n - freestyle `name='Logistic Lasso Regression C=.25'`\n - freestyle `mode='markers'`\n - freestyle `marker=dict(color='red', opacity=0.75, size=8)`", "_____no_output_____" ], [ "Again, us the magnifying glass tool in `plotly` to zoom in until the y axis is about -1 to 1, and notice how many coefficients are zero for C=.75 and C=.25.\n\nWe can count how many are zero in each case as well:\n\n- `print create text with`\n - `\"C=.75:\"`\n - `with np do sum using` freestyle `std_clf_lasso75[1].coef_` = `0`\n- `print create text with`\n - `\"C=.25:\"`\n - `with np do sum using` freestyle `std_clf_lasso25[1].coef_` = `0`", "_____no_output_____" ], [ "Even a small amount of penalization, in this case, removed a lot of variables.\nLasso is a great way to simplify your model!", "_____no_output_____" ], [ "## Choosing $\\lambda$ (AKA C)", "_____no_output_____" ], [ "As we've seen, different values of the penalty parameter $\\lambda$ have different effects on our accuracy and other performance metrics.\nSo how do we choose $\\lambda$?\nThere are a number of different methods of finding a **metaparameter** like $\\lambda$, and these methods are fairly general and apply to other problems we've seen before, like choosing the optimal number of clusters or the optimal number of nearest neighbors.\nWe will revisit this idea in the future. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4af7377ecf31131074bd3c921e799ef3daf63c0f
63,250
ipynb
Jupyter Notebook
ipynb/20_natural_language_processing.ipynb
MarkVoitov/data-science-from-scratch
11ede3b4b388f2a26e7d73f13665535d340a3bd0
[ "MIT" ]
null
null
null
ipynb/20_natural_language_processing.ipynb
MarkVoitov/data-science-from-scratch
11ede3b4b388f2a26e7d73f13665535d340a3bd0
[ "MIT" ]
null
null
null
ipynb/20_natural_language_processing.ipynb
MarkVoitov/data-science-from-scratch
11ede3b4b388f2a26e7d73f13665535d340a3bd0
[ "MIT" ]
null
null
null
91.401734
43,636
0.814577
[ [ [ "import matplotlib.pyplot as plt\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict, Counter\nimport requests\nimport random", "_____no_output_____" ] ], [ [ "# Word cloud", "_____no_output_____" ] ], [ [ "data = [ (\"big data\", 100, 15), (\"Hadoop\", 95, 25), (\"Python\", 75, 50),\n (\"R\", 50, 40), (\"machine learning\", 80, 20), (\"statistics\", 20, 60),\n (\"data science\", 60, 70), (\"analytics\", 90, 3),\n (\"team player\", 85, 85), (\"dynamic\", 2, 90), (\"synergies\", 70, 0),\n (\"actionable insights\", 40, 30), (\"think out of the box\", 45, 10),\n (\"self-starter\", 30, 50), (\"customer focus\", 65, 15),\n (\"thought leadership\", 35, 35)]", "_____no_output_____" ], [ "def text_size(total):\n \"\"\"equals 8 if total is 0, 28 if total is 200\"\"\"\n return 8 + total / 200 * 20", "_____no_output_____" ], [ "for word, job_popularity, resume_popularity in data:\n plt.text(job_popularity, resume_popularity, word,\n ha='center', va='center',\n size=text_size(job_popularity + resume_popularity))\n\nplt.xlabel(\"Popularity on Job Postings\")\nplt.ylabel(\"Popularity on Resumes\")\nplt.axis([0, 100, 0, 100])\nplt.show()", "_____no_output_____" ] ], [ [ "# N-gram language models", "_____no_output_____" ] ], [ [ "def fix_unicode(text):\n return text.replace(u\"\\u2019\", \"'\")", "_____no_output_____" ], [ "def get_document():\n\n url = \"http://radar.oreilly.com/2010/06/what-is-data-science.html\"\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'html5lib')\n\n content = soup.find(\"div\", \"entry-content\") # find entry-content div\n regex = r\"[\\w']+|[\\.]\" # matches a word or a period\n\n document = []\n\n\n for paragraph in content(\"p\"):\n words = re.findall(regex, fix_unicode(paragraph.text))\n document.extend(words)\n\n return document", "_____no_output_____" ], [ "def generate_using_bigrams(transitions):\n current = \".\" # this means the next word will start a sentence\n result = []\n while True:\n next_word_candidates = transitions[current] # bigrams (current, _)\n current = random.choice(next_word_candidates) # choose one at random\n result.append(current) # append it to results\n if current == \".\": return \" \".join(result) # if \".\" we're done", "_____no_output_____" ], [ "trigrams = list(zip(document, document[1:], document[2:]))\ntrigram_transitions = defaultdict(list)\nstarts = []\n\nfor prev, current, next in trigrams:\n if prev == \".\": # if the previous \"word\" was a period\n starts.append(current) # then this is a start word\n trigram_transitions[(prev, current)].append(next)", "_____no_output_____" ], [ "def generate_using_trigrams(starts, trigram_transitions):\n current = random.choice(starts) # choose a random starting word\n prev = \".\" # and precede it with a '.'\n result = [current]\n while True:\n next_word_candidates = trigram_transitions[(prev, current)]\n next = random.choice(next_word_candidates)\n\n prev, current = current, next\n result.append(current)\n\n if current == \".\":\n return \" \".join(result)", "_____no_output_____" ] ], [ [ "# Grammars", "_____no_output_____" ] ], [ [ "grammar = {\n \"_S\" : [\"_NP _VP\"],\n \"_NP\" : [\"_N\",\n \"_A _NP _P _A _N\"],\n \"_VP\" : [\"_V\",\n \"_V _NP\"],\n \"_N\" : [\"data science\", \"Python\", \"regression\"],\n \"_A\" : [\"big\", \"linear\", \"logistic\"],\n \"_P\" : [\"about\", \"near\"],\n \"_V\" : [\"learns\", \"trains\", \"tests\", \"is\"]\n }", "_____no_output_____" ], [ "def is_terminal(token):\n return token[0] != \"_\"", "_____no_output_____" ], [ "def expand(grammar, tokens):\n for i, token in enumerate(tokens):\n\n # ignore terminals\n if is_terminal(token): continue\n\n # choose a replacement at random\n replacement = random.choice(grammar[token])\n\n if is_terminal(replacement):\n tokens[i] = replacement\n else:\n tokens = tokens[:i] + replacement.split() + tokens[(i+1):]\n return expand(grammar, tokens)\n\n # if we get here we had all terminals and are done\n return tokens", "_____no_output_____" ], [ "def generate_sentence(grammar):\n return expand(grammar, [\"_S\"])", "_____no_output_____" ] ], [ [ "# Remark: Gibbs sampling method", "_____no_output_____" ] ], [ [ "def roll_a_die():\n return random.choice([1,2,3,4,5,6])", "_____no_output_____" ], [ "def direct_sample():\n d1 = roll_a_die()\n d2 = roll_a_die()\n return d1, d1 + d2", "_____no_output_____" ], [ "def random_y_given_x(x):\n \"\"\"equally likely to be x + 1, x + 2, ... , x + 6\"\"\"\n return x + roll_a_die()", "_____no_output_____" ], [ "def random_x_given_y(y):\n if y <= 7:\n # if the total is 7 or less, the first die is equally likely to be\n # 1, 2, ..., (total - 1)\n return random.randrange(1, y)\n else:\n # if the total is 7 or more, the first die is equally likely to be\n # (total - 6), (total - 5), ..., 6\n return random.randrange(y - 6, 7)", "_____no_output_____" ], [ "def gibbs_sample(num_iters=100):\n x, y = 1, 2 # doesn't really matter\n for _ in range(num_iters):\n x = random_x_given_y(y)\n y = random_y_given_x(x)\n return x, y", "_____no_output_____" ], [ "def compare_distributions(num_samples=1000):\n counts = defaultdict(lambda: [0, 0])\n for _ in range(num_samples):\n counts[gibbs_sample()][0] += 1\n counts[direct_sample()][1] += 1\n return counts", "_____no_output_____" ] ], [ [ "# Topic modeling", "_____no_output_____" ] ], [ [ "def sample_from(weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0: return i # sum(weights[:(i+1)]) >= rnd", "_____no_output_____" ], [ "documents = [\n [\"Hadoop\", \"Big Data\", \"HBase\", \"Java\", \"Spark\", \"Storm\", \"Cassandra\"],\n [\"NoSQL\", \"MongoDB\", \"Cassandra\", \"HBase\", \"Postgres\"],\n [\"Python\", \"scikit-learn\", \"scipy\", \"numpy\", \"statsmodels\", \"pandas\"],\n [\"R\", \"Python\", \"statistics\", \"regression\", \"probability\"],\n [\"machine learning\", \"regression\", \"decision trees\", \"libsvm\"],\n [\"Python\", \"R\", \"Java\", \"C++\", \"Haskell\", \"programming languages\"],\n [\"statistics\", \"probability\", \"mathematics\", \"theory\"],\n [\"machine learning\", \"scikit-learn\", \"Mahout\", \"neural networks\"],\n [\"neural networks\", \"deep learning\", \"Big Data\", \"artificial intelligence\"],\n [\"Hadoop\", \"Java\", \"MapReduce\", \"Big Data\"],\n [\"statistics\", \"R\", \"statsmodels\"],\n [\"C++\", \"deep learning\", \"artificial intelligence\", \"probability\"],\n [\"pandas\", \"R\", \"Python\"],\n [\"databases\", \"HBase\", \"Postgres\", \"MySQL\", \"MongoDB\"],\n [\"libsvm\", \"regression\", \"support vector machines\"]\n]", "_____no_output_____" ], [ "K = 4\n\ndocument_topic_counts = [Counter()\n for _ in documents]\n\ntopic_word_counts = [Counter() for _ in range(K)]\n\ntopic_counts = [0 for _ in range(K)]\n\ndocument_lengths = [len(d) for d in documents]\n\ndistinct_words = set(word for document in documents for word in document)\nW = len(distinct_words)\n\nD = len(documents)", "_____no_output_____" ], [ "document_topic_counts[3][1]", "_____no_output_____" ], [ "def p_topic_given_document(topic, d, alpha=0.1):\n \"\"\"the fraction of words in document _d_\n that are assigned to _topic_ (plus some smoothing)\"\"\"\n\n return ((document_topic_counts[d][topic] + alpha) /\n (document_lengths[d] + K * alpha))", "_____no_output_____" ], [ "def p_word_given_topic(word, topic, beta=0.1):\n \"\"\"the fraction of words assigned to _topic_\n that equal _word_ (plus some smoothing)\"\"\"\n\n return ((topic_word_counts[topic][word] + beta) /\n (topic_counts[topic] + W * beta))", "_____no_output_____" ], [ "def topic_weight(d, word, k):\n \"\"\"given a document and a word in that document,\n return the weight for the k-th topic\"\"\"\n\n return p_word_given_topic(word, k) * p_topic_given_document(k, d)", "_____no_output_____" ], [ "def choose_new_topic(d, word):\n return sample_from([topic_weight(d, word, k)\n for k in range(K)])", "_____no_output_____" ], [ "random.seed(0)\ndocument_topics = [[random.randrange(K) for word in document]\n for document in documents]\n\nfor d in range(D):\n for word, topic in zip(documents[d], document_topics[d]):\n document_topic_counts[d][topic] += 1\n topic_word_counts[topic][word] += 1\n topic_counts[topic] += 1", "_____no_output_____" ], [ "for iter in range(1000):\n for d in range(D):\n for i, (word, topic) in enumerate(zip(documents[d],\n document_topics[d])):\n\n # remove this word / topic from the counts\n # so that it doesn't influence the weights\n document_topic_counts[d][topic] -= 1\n topic_word_counts[topic][word] -= 1\n topic_counts[topic] -= 1\n document_lengths[d] -= 1\n\n # choose a new topic based on the weights\n new_topic = choose_new_topic(d, word)\n document_topics[d][i] = new_topic\n\n # and now add it back to the counts\n document_topic_counts[d][new_topic] += 1\n topic_word_counts[new_topic][word] += 1\n topic_counts[new_topic] += 1\n document_lengths[d] += 1", "_____no_output_____" ], [ "for k, word_counts in enumerate(topic_word_counts):\n for word, count in word_counts.most_common():\n if count > 0: print(k, word, count)", "0 Java 3\n0 Big Data 3\n0 Hadoop 2\n0 HBase 1\n0 C++ 1\n0 Spark 1\n0 Storm 1\n0 programming languages 1\n0 MapReduce 1\n0 Cassandra 1\n0 deep learning 1\n1 HBase 2\n1 neural networks 2\n1 Postgres 2\n1 MongoDB 2\n1 machine learning 2\n1 Cassandra 1\n1 numpy 1\n1 decision trees 1\n1 deep learning 1\n1 databases 1\n1 MySQL 1\n1 NoSQL 1\n1 artificial intelligence 1\n1 scipy 1\n2 regression 3\n2 Python 2\n2 R 2\n2 libsvm 2\n2 scikit-learn 2\n2 mathematics 1\n2 support vector machines 1\n2 Haskell 1\n2 Mahout 1\n3 statistics 3\n3 probability 3\n3 Python 2\n3 R 2\n3 pandas 2\n3 statsmodels 2\n3 C++ 1\n3 artificial intelligence 1\n3 theory 1\n" ], [ "topic_names = [\"Big Data and programming languages\", \"databases\", \"machine learning\", \"statistics\"]", "_____no_output_____" ], [ "for document, topic_counts in zip(documents, document_topic_counts):\n print(document)\n for topic, count in topic_counts.most_common():\n if count > 0:\n print(topic_names[topic], count)\n print()", "['Hadoop', 'Big Data', 'HBase', 'Java', 'Spark', 'Storm', 'Cassandra']\nBig Data and programming languages 7\n\n['NoSQL', 'MongoDB', 'Cassandra', 'HBase', 'Postgres']\ndatabases 5\n\n['Python', 'scikit-learn', 'scipy', 'numpy', 'statsmodels', 'pandas']\ndatabases 2\nmachine learning 2\nstatistics 2\n\n['R', 'Python', 'statistics', 'regression', 'probability']\nstatistics 3\nmachine learning 2\n\n['machine learning', 'regression', 'decision trees', 'libsvm']\nmachine learning 2\ndatabases 2\n\n['Python', 'R', 'Java', 'C++', 'Haskell', 'programming languages']\nmachine learning 3\nBig Data and programming languages 3\n\n['statistics', 'probability', 'mathematics', 'theory']\nstatistics 3\nmachine learning 1\n\n['machine learning', 'scikit-learn', 'Mahout', 'neural networks']\nmachine learning 2\ndatabases 2\n\n['neural networks', 'deep learning', 'Big Data', 'artificial intelligence']\ndatabases 3\nBig Data and programming languages 1\n\n['Hadoop', 'Java', 'MapReduce', 'Big Data']\nBig Data and programming languages 4\n\n['statistics', 'R', 'statsmodels']\nstatistics 3\n\n['C++', 'deep learning', 'artificial intelligence', 'probability']\nstatistics 3\nBig Data and programming languages 1\n\n['pandas', 'R', 'Python']\nstatistics 3\n\n['databases', 'HBase', 'Postgres', 'MySQL', 'MongoDB']\ndatabases 5\n\n['libsvm', 'regression', 'support vector machines']\nmachine learning 3\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af75a94bc4f6129a67c91b1d15f0c89939943e0
44,403
ipynb
Jupyter Notebook
Project1_NBA_Salaries.ipynb
cjcheng22/Project1
e2ef0f04b5afca3b5e9d4c191ba532d2c63d3f0a
[ "MIT" ]
1
2022-03-08T13:18:39.000Z
2022-03-08T13:18:39.000Z
Project1_NBA_Salaries.ipynb
cjcheng22/Project1
e2ef0f04b5afca3b5e9d4c191ba532d2c63d3f0a
[ "MIT" ]
null
null
null
Project1_NBA_Salaries.ipynb
cjcheng22/Project1
e2ef0f04b5afca3b5e9d4c191ba532d2c63d3f0a
[ "MIT" ]
null
null
null
29.425447
111
0.349391
[ [ [ "# Dependencies\nimport pandas as pd\nimport os", "_____no_output_____" ], [ "# Save path to data set in a variable\n\ndata_file = os.path.join('.', 'Resources', 'nba_contracts_history.csv')\n", "_____no_output_____" ], [ "# Use Pandas to read data\ndata_file_df = pd.read_csv(data_file)\ndata_file_df", "_____no_output_____" ], [ "# Display a statistical overview of the DataFrame\ndata_file_df.describe()", "_____no_output_____" ], [ "#Finding NAME that has duplicate data by NAME and CONTRACT_START. (keeping the first record in dupes)\ndata_file_df[data_file_df.duplicated(subset=[\"NAME\"],keep=False)]\ndata_file_df_multionly=data_file_df[data_file_df.duplicated(subset=[\"NAME\"],keep=False)]\ndata_file_df_multionly.sort_values (by=['NAME','CONTRACT_START'], ascending=True)", "_____no_output_____" ], [ "data_file_df_multionly.to_csv (\".\\Resources\\data_file_df_multionly.csv\", index=False, header=True)", "_____no_output_____" ], [ "# Finding Salary increase\n#data_file_df_multionly [\"AVG_SALARY\"].head()", "_____no_output_____" ], [ "# Reference multiple columns within a DataFrame\n#data_file_df[[\"AVG_SALARY\", \"AGE\"]].head()", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ], [ [ "\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ], [ [ "\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ], [ [ "\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
4af7634fa5f0096d10eedd7e36d0ea0cf35cba54
38,114
ipynb
Jupyter Notebook
Jupyter Simulations/2. WB Cancer Classification/Synthetic Data Generation using GAN.ipynb
harpreetvirkk/DAFML
de4bef75e2e3bc93ea617effaf8635b43a61f4cd
[ "MIT" ]
2
2020-11-19T18:02:07.000Z
2020-12-10T15:30:42.000Z
Jupyter Simulations/2. WB Cancer Classification/Synthetic Data Generation using GAN.ipynb
harpreetvirkk/DAFML
de4bef75e2e3bc93ea617effaf8635b43a61f4cd
[ "MIT" ]
null
null
null
Jupyter Simulations/2. WB Cancer Classification/Synthetic Data Generation using GAN.ipynb
harpreetvirkk/DAFML
de4bef75e2e3bc93ea617effaf8635b43a61f4cd
[ "MIT" ]
3
2020-11-12T14:36:12.000Z
2020-12-22T08:39:23.000Z
45.212337
1,394
0.420344
[ [ [ "!pip install ctgan", "Defaulting to user installation because normal site-packages is not writeable\nCollecting ctgan\n Downloading ctgan-0.2.1-py2.py3-none-any.whl (15 kB)\nRequirement already satisfied: numpy<2,>=1.17.4 in /Library/Python/3.8/site-packages (from ctgan) (1.18.5)\nCollecting pandas<0.26,>=0.24\n Downloading pandas-0.25.3-cp38-cp38-macosx_10_9_x86_64.whl (10.3 MB)\n\u001b[K |████████████████████████████████| 10.3 MB 12.0 MB/s eta 0:00:01\n\u001b[?25hCollecting torch<2,>=1.0\n Downloading torch-1.6.0-cp38-none-macosx_10_9_x86_64.whl (97.5 MB)\n\u001b[K |████████████████████████████████| 97.5 MB 8.1 MB/s eta 0:00:012 |▏ | 368 kB 2.9 MB/s eta 0:00:34 |████▌ | 13.8 MB 699 kB/s eta 0:02:00 |█████▏ | 15.6 MB 699 kB/s eta 0:01:58 |███████████▍ | 34.7 MB 14.4 MB/s eta 0:00:05 |█████████████▌ | 41.0 MB 14.4 MB/s eta 0:00:04 |██████████████ | 42.8 MB 14.4 MB/s eta 0:00:04 |█████████████████▌ | 53.2 MB 27.2 MB/s eta 0:00:02 |███████████████████ | 57.6 MB 11.3 MB/s eta 0:00:04 |████████████████████ | 60.9 MB 11.3 MB/s eta 0:00:04 |█████████████████████▎ | 64.9 MB 14.9 MB/s eta 0:00:03 |████████████████████████████ | 85.5 MB 9.8 MB/s eta 0:00:02\n\u001b[?25hCollecting scikit-learn<0.23,>=0.21\n Downloading scikit_learn-0.22.2.post1-cp38-cp38-macosx_10_9_x86_64.whl (7.2 MB)\n\u001b[K |████████████████████████████████| 7.2 MB 8.0 MB/s eta 0:00:01\n\u001b[?25hCollecting torchvision<1,>=0.4.2\n Downloading torchvision-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl (387 kB)\n\u001b[K |████████████████████████████████| 387 kB 11.2 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: python-dateutil>=2.6.1 in /Library/Python/3.8/site-packages (from pandas<0.26,>=0.24->ctgan) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /Library/Python/3.8/site-packages (from pandas<0.26,>=0.24->ctgan) (2020.1)\nCollecting future\n Downloading future-0.18.2.tar.gz (829 kB)\n\u001b[K |████████████████████████████████| 829 kB 13.6 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: joblib>=0.11 in /Library/Python/3.8/site-packages (from scikit-learn<0.23,>=0.21->ctgan) (0.16.0)\nRequirement already satisfied: scipy>=0.17.0 in /Library/Python/3.8/site-packages (from scikit-learn<0.23,>=0.21->ctgan) (1.4.1)\nRequirement already satisfied: pillow>=4.1.1 in /Users/anonymousanon/Library/Python/3.8/lib/python/site-packages (from torchvision<1,>=0.4.2->ctgan) (7.2.0)\nRequirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/site-packages (from python-dateutil>=2.6.1->pandas<0.26,>=0.24->ctgan) (1.15.0)\nBuilding wheels for collected packages: future\n Building wheel for future (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491056 sha256=48c4c96353738f28f1084e2344835df19b4a0a01493031a82757867bcbea81ff\n Stored in directory: /Users/anonymousanon/Library/Caches/pip/wheels/8e/70/28/3d6ccd6e315f65f245da085482a2e1c7d14b90b30f239e2cf4\nSuccessfully built future\nInstalling collected packages: pandas, future, torch, scikit-learn, torchvision, ctgan\n\u001b[33m WARNING: The scripts futurize and pasteurize are installed in '/Users/anonymousanon/Library/Python/3.8/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n\u001b[33m WARNING: The scripts convert-caffe2-to-onnx and convert-onnx-to-caffe2 are installed in '/Users/anonymousanon/Library/Python/3.8/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n\u001b[33m WARNING: The script ctgan is installed in '/Users/anonymousanon/Library/Python/3.8/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\nSuccessfully installed ctgan-0.2.1 future-0.18.2 pandas-0.25.3 scikit-learn-0.22.2.post1 torch-1.6.0 torchvision-0.7.0\n" ], [ "import pandas as pd\npd.set_option('display.max_columns', 500)", "_____no_output_____" ], [ "data = pd.read_csv('./data/data.csv')\ndel data['Unnamed: 32']", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "discrete_columns = list(data.columns)\ndiscrete_columns", "_____no_output_____" ], [ "data.isnull().values", "_____no_output_____" ], [ "from ctgan import CTGANSynthesizer\n\nctgan = CTGANSynthesizer()\nctgan.fit(data, discrete_columns, epochs=50)", "_____no_output_____" ], [ "samples = ctgan.sample(len(data))", "_____no_output_____" ], [ "samples", "_____no_output_____" ], [ "samples.to_csv('./data/synthetic.csv', index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af7655e3ae8445ddd7b7f36e2c4dd8d8c22c13b
282,101
ipynb
Jupyter Notebook
notebooks/FMS_model.ipynb
steppi/adeft_indra
e9958582798b62440f4beb97f681a81d1c7dd38c
[ "BSD-2-Clause" ]
null
null
null
notebooks/FMS_model.ipynb
steppi/adeft_indra
e9958582798b62440f4beb97f681a81d1c7dd38c
[ "BSD-2-Clause" ]
null
null
null
notebooks/FMS_model.ipynb
steppi/adeft_indra
e9958582798b62440f4beb97f681a81d1c7dd38c
[ "BSD-2-Clause" ]
null
null
null
51.761651
15,725
0.591724
[ [ [ "import json\nimport pickle\n\nfrom indra.literature.adeft_tools import universal_extract_text\nfrom indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id\n\nfrom indra_db.util.content_scripts import get_text_content_from_pmids\nfrom indra_db.util.content_scripts import get_stmts_with_agent_text_like\nfrom indra_db.util.content_scripts import get_text_content_from_stmt_ids\n\nfrom adeft.discover import AdeftMiner\nfrom adeft.gui import ground_with_gui\nfrom adeft.modeling.label import AdeftLabeler\nfrom adeft.modeling.classify import AdeftClassifier\nfrom adeft.disambiguate import AdeftDisambiguator\n\nfrom adeft_indra.s3 import model_to_s3\nfrom adeft_indra.ground import gilda_ground", "_____no_output_____" ], [ "shortforms = ['FMS']\ngenes = ['CSF1R']\nfamilies = {}\ngroundings = [f'HGNC:{get_hgnc_id(gene)}' for gene in genes]\nfor family, members in families.items():\n genes.extend(members)\n groundings.append(f'FPLX:{family}')\nwith open('../data/entrez_all_pmids.json', 'r') as f:\n all_pmids = json.load(f)", "_____no_output_____" ], [ "entrez_texts = []\nentrez_refs = set()\nfor gene, grounding in zip(genes, groundings):\n try:\n pmids = all_pmids[gene]\n except KeyError:\n continue\n _, content = get_text_content_from_pmids(pmids)\n entrez_texts.extend([(universal_extract_text(text), grounding)\n for text in content.values() if text])\n entrez_refs.update(content.keys())", "_____no_output_____" ], [ "miners = dict()\nall_texts = set()\nfor shortform in shortforms:\n stmts = get_stmts_with_agent_text_like(shortform)[shortform]\n _, content = get_text_content_from_stmt_ids(stmts)\n shortform_texts = [universal_extract_text(text, contains=shortforms)\n for ref, text in content.items() if text and ref not in entrez_refs]\n miners[shortform] = AdeftMiner(shortform)\n miners[shortform].process_texts(shortform_texts)\n all_texts |= set(shortform_texts)", "_____no_output_____" ] ], [ [ "It's then necessary to check if Acromine produced the correct results. We must fix errors manually", "_____no_output_____" ] ], [ [ "top = miners['FMS'].top()", "_____no_output_____" ], [ "top", "_____no_output_____" ], [ "longforms0 = miners['FMS'].get_longforms()", "_____no_output_____" ], [ "list(enumerate(longforms0))", "_____no_output_____" ], [ "longforms0 = [(longform, score) for i, (longform, score) in enumerate(longforms0)\n if i not in [3, 7]]", "_____no_output_____" ], [ "list(enumerate(top))", "_____no_output_____" ], [ "longforms0.extend((longform, score) for i, (longform, score) in enumerate(top)\n if i in [14, 15])", "_____no_output_____" ], [ "longforms = longforms0", "_____no_output_____" ], [ "longforms.sort(key=lambda x: -x[1])", "_____no_output_____" ], [ "longforms, scores = zip(*longforms)", "_____no_output_____" ], [ "longforms", "_____no_output_____" ], [ "grounding_map = {}\nnames = {}\nfor longform in longforms:\n grounding = gilda_ground(longform)\n if grounding[0]:\n grounding_map[longform] = f'{grounding[0]}:{grounding[1]}'\n names[grounding_map[longform]] = grounding[2]", "_____no_output_____" ], [ "grounding_map", "_____no_output_____" ], [ "names", "_____no_output_____" ], [ "grounding_map, names, pos_labels = ground_with_gui(longforms, scores, grounding_map=grounding_map, names=names)", "_____no_output_____" ], [ "result = (grounding_map, names, pos_labels)", "_____no_output_____" ], [ "result", "_____no_output_____" ], [ "grounding_map, names, pos_labels = ({'fibromyalgia': 'MESH:D005356',\n 'fibromyalgia syndrome': 'MESH:D005356',\n 'fimasartan': 'CHEBI:CHEBI:136044',\n 'fluorous mixture synthesis': 'ungrounded',\n 'functional magnetic stimulation': 'MESH:D055909',\n 'functional mesoporous silica': 'ungrounded',\n 'fundamental motor skills': 'ungrounded',\n 'fundamental movement skills': 'ungrounded'},\n {'MESH:D005356': 'Fibromyalgia',\n 'CHEBI:CHEBI:136044': 'fimasartan',\n 'MESH:D055909': 'Magnetic Field Therapy'},\n ['CHEBI:CHEBI:136044', 'MESH:D005356', 'MESH:D055909'])", "_____no_output_____" ], [ "names['HGNC:2433'] = 'CSF1R'", "_____no_output_____" ], [ "grounding_dict = {'FMS': grounding_map}", "_____no_output_____" ], [ "classifier = AdeftClassifier('FMS', pos_labels=pos_labels)", "_____no_output_____" ], [ "param_grid = {'C': [100.0], 'max_features': [10000]}", "_____no_output_____" ], [ "labeler = AdeftLabeler(grounding_dict)", "_____no_output_____" ], [ "corpus = labeler.build_from_texts(shortform_texts)", "_____no_output_____" ], [ "corpus.extend(entrez_texts)", "_____no_output_____" ], [ "texts, labels = zip(*corpus)", "_____no_output_____" ], [ "classifier.cv(texts, labels, param_grid, cv=5, n_jobs=8)", "INFO: [2020-01-27 16:25:03] /Users/albertsteppi/adeft/adeft/modeling/classify.py - Beginning grid search in parameter space:\n{'C': [100.0], 'max_features': [10000]}\n/Users/albertsteppi/.virtualenvs/py37/lib/python3.7/site-packages/sklearn/model_selection/_split.py:657: Warning: The least populated class in y has only 2 members, which is too few. The minimum number of members in any class cannot be less than n_splits=5.\n % (min_groups, self.n_splits)), Warning)\nINFO: [2020-01-27 16:25:20] /Users/albertsteppi/adeft/adeft/modeling/classify.py - Best f1 score of 0.875077399380805 found for parameter values:\n{'logit__C': 100.0, 'tfidf__max_features': 10000}\n" ], [ "classifier.stats", "_____no_output_____" ], [ "disamb = AdeftDisambiguator(classifier, grounding_dict, names)", "_____no_output_____" ], [ "d = disamb.disambiguate(shortform_texts)", "_____no_output_____" ], [ "a = [text for pred, text in zip(d, shortform_texts)if pred[0] == 'HGNC:2433']", "_____no_output_____" ], [ "a[2]", "_____no_output_____" ], [ "disamb.dump('FMS', '../results')", "_____no_output_____" ], [ "from adeft.disambiguate import load_disambiguator, load_disambiguator_directly", "_____no_output_____" ], [ "disamb.classifier.training_set_digest", "_____no_output_____" ], [ "model_to_s3(disamb)", "_____no_output_____" ], [ "d.disambiguate(texts[0])", "_____no_output_____" ], [ "print(d.info())", "Disambiguation model for ARG\n\nProduces the disambiguations:\n\t(-)-Arctigenin*\tCHEBI:CHEBI:79\n\tABL2*\tHGNC:77\n\tAREG*\tHGNC:651\n\tRERE*\tHGNC:9965\n\targinine*\tCHEBI:CHEBI:29016\n\nTraining data had class balance:\n\tAREG*\t157\n\tABL2*\t88\n\tRERE*\t39\n\targinine*\t15\n\t(-)-Arctigenin*\t2\n\nClassification Metrics:\n\tF1 score:\t0.81188\n\tPrecision:\t0.82394\n\tRecall:\t\t0.8205\n\n* Positive labels\nSee Docstring for explanation\n\n" ], [ "a = load_disambiguator('AR')", "_____no_output_____" ], [ "a.disambiguate('Androgen')", "_____no_output_____" ], [ "logit = d.classifier.estimator.named_steps['logit']", "_____no_output_____" ], [ "logit.classes_", "_____no_output_____" ], [ "model_to_s3(disamb)", "_____no_output_____" ], [ "classifier.feature_importances()['FPLX:RAC']", "_____no_output_____" ], [ "d = load_disambiguator('ALK', '../results')", "WARNING: [2020-01-24 11:18:53] /Users/albertsteppi/adeft/adeft/download/download.py - Shortform TEC has multiple adeft modelsThis may lead to unexpected behavior\nWARNING: [2020-01-24 11:18:53] /Users/albertsteppi/adeft/adeft/download/download.py - Shortform EAG has multiple adeft modelsThis may lead to unexpected behavior\n" ], [ "d.info()", "_____no_output_____" ], [ "print(d.info())", "Disambiguation model for ALK\n\nProduces the disambiguations:\n\tALK*\tHGNC:427\n\tAlkaline Phosphatase*\tMESH:D000469\n\tRSTK1*\tFPLX:RSTK1\n\nTraining data had class balance:\n\tALK*\t1181\n\tRSTK1*\t155\n\tAlkaline Phosphatase*\t6\n\nClassification Metrics:\n\tF1 score:\t0.97493\n\tPrecision:\t0.97367\n\tRecall:\t\t0.97767\n\n* Positive labels\nSee Docstring for explanation\n\n" ], [ "model_to_s3(d)", "_____no_output_____" ], [ "d = load_disambiguator('TAK', '../results')", "WARNING: [2020-01-24 11:34:16] /Users/albertsteppi/adeft/adeft/download/download.py - Shortform TEC has multiple adeft modelsThis may lead to unexpected behavior\nWARNING: [2020-01-24 11:34:16] /Users/albertsteppi/adeft/adeft/download/download.py - Shortform EAG has multiple adeft modelsThis may lead to unexpected behavior\n" ], [ "print(d.info())", "Disambiguation model for TAK\n\nProduces the disambiguations:\n\tMAP3K7*\tHGNC:6859\n\tTakayasu Arteritis*\tMESH:D013625\n\nTraining data had class balance:\n\tMAP3K7*\t327\n\tUngrounded*\t241\n\tUngrounded\t36\n\tTakayasu Arteritis*\t13\n\nClassification Metrics:\n\tF1 score:\t0.94043\n\tPrecision:\t0.93685\n\tRecall:\t\t0.94658\n\n* Positive labels\nSee Docstring for explanation\n\n" ], [ "model_to_s3(d)", "_____no_output_____" ], [ "from adeft import available_shortforms", "_____no_output_____" ], [ "print(d.info())", "Disambiguation model for TAK\n\nProduces the disambiguations:\n\tMAP3K7*\tHGNC:6859\n\tTakayasu Arteritis*\tMESH:D013625\n\nTraining data had class balance:\n\tMAP3K7*\t327\n\tUngrounded*\t241\n\tUngrounded\t36\n\tTakayasu Arteritis*\t13\n\nClassification Metrics:\n\tF1 score:\t0.94043\n\tPrecision:\t0.93685\n\tRecall:\t\t0.94658\n\n* Positive labels\nSee Docstring for explanation\n\n" ], [ "d.classifier.feature_importances()", "_____no_output_____" ], [ "from adeft import __version__", "_____no_output_____" ], [ "__version__", "_____no_output_____" ], [ "from adeft.disambiguate import load_disambiguator_directly", "_____no_output_____" ], [ "d = load_disambiguator_directly('../results/TEK/')", "_____no_output_____" ], [ "print(d.info())", "Disambiguation model for TEK\n\nProduces the disambiguations:\n\tTEK*\tHGNC:11724\n\nTraining data had class balance:\n\tTEK*\t217\n\tUngrounded\t9\n\nClassification Metrics:\n\tF1 score:\t0.99316\n\tPrecision:\t0.98646\n\tRecall:\t\t1.0\n\n* Positive labels\nSee Docstring for explanation\n\n" ], [ "model_to_s3(d)", "_____no_output_____" ], [ "d.grounding_dict", "_____no_output_____" ], [ "!python -m adeft.download --update", "100% [......................................................] 1181008 / 1181008" ], [ "from adeft import available_shortforms", "_____no_output_____" ], [ "len(available_shortforms)", "_____no_output_____" ], [ "available_shortforms", "_____no_output_____" ], [ "'TEC' in available_shortforms", "_____no_output_____" ], [ "'TECs' in available_shortforms", "_____no_output_____" ], [ "!python -m adeft.download --update", "Shortform TEC has multiple adeft modelsThis may lead to unexpected behavior\nShortform TEC has multiple adeft modelsThis may lead to unexpected behavior\n100% [........................................................] 194559 / 194559Traceback (most recent call last):\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/albertsteppi/adeft/adeft/download/__main__.py\", line 18, in <module>\n download_models(update=args.update)\n File \"/Users/albertsteppi/adeft/adeft/download/download.py\", line 65, in download_models\n out=resource_path)\n File \"/Users/albertsteppi/.virtualenvs/py37/lib/python3.7/site-packages/wget.py\", line 526, in download\n (tmpfile, headers) = ulib.urlretrieve(binurl, tmpfile, callback)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 247, in urlretrieve\n with contextlib.closing(urlopen(url, data)) as fp:\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 222, in urlopen\n return opener.open(url, data, timeout)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 531, in open\n response = meth(req, response)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 641, in http_response\n 'http', request, response, code, msg, hdrs)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 569, in error\n return self._call_chain(*args)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 503, in _call_chain\n result = func(*args)\n File \"/usr/local/Cellar/python/3.7.5/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py\", line 649, in http_error_default\n raise HTTPError(req.full_url, code, msg, hdrs, fp)\nurllib.error.HTTPError: HTTP Error 404: Not Found\n" ], [ "!python -m adeft.download --update", "100% [......................................................] 1181008 / 1181008" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af778d19ff90efc0f621e90ffd2125428024d15
6,712
ipynb
Jupyter Notebook
morphology.ipynb
codydunne/arterial-vis
5e9295a4d536053ec62b900897198a591127716b
[ "MIT" ]
null
null
null
morphology.ipynb
codydunne/arterial-vis
5e9295a4d536053ec62b900897198a591127716b
[ "MIT" ]
null
null
null
morphology.ipynb
codydunne/arterial-vis
5e9295a4d536053ec62b900897198a591127716b
[ "MIT" ]
null
null
null
21.863192
168
0.54857
[ [ [ "# ArterialVis Morphology Embedding and Animation", "_____no_output_____" ], [ "## Import the ArterialVis morphology module", "_____no_output_____" ] ], [ [ "from arterialvis.download import make_output_dir\nfrom arterialvis.morphology import *", "_____no_output_____" ] ], [ [ "## Get a list of all morphology files", "_____no_output_____" ] ], [ [ "print(get_files.__doc__)\nfiles = get_files()", "_____no_output_____" ] ], [ [ "## Create a directory to cache analytics and store outputs", "_____no_output_____" ] ], [ [ "print(make_output_dir.__doc__)\noutput = make_output_dir(files[0])", "_____no_output_____" ] ], [ [ "## Render a simplified morphology, colorcoded by group if available", "_____no_output_____" ] ], [ [ "print(build_grouped_graph.__doc__)\nbuild_grouped_graph(files[0],output=output)", "_____no_output_____" ], [ "print(build_compound_graph.__doc__)\nbuild_compound_graph(files[0],output=os.path.join(output))", "_____no_output_____" ] ], [ [ "## Dashboard for comparison\n\nThe following function is not included as a code cell in-notebook because **you must pause execution of this cell in order to continue on with the notebook**.\n\n`build_comparison_dashboard()`", "_____no_output_____" ] ], [ [ "build_comparison_dashboard()", "_____no_output_____" ] ], [ [ "## 3D Rendering of morphology (with colorcoding if available)", "_____no_output_____" ] ], [ [ "print(get_edgelist.__doc__)\nedgelist = get_edgelist(files[0],output=output)\nprint(generate_graph.__doc__)\nG = generate_graph(edgelist,output=output)\nprint(draw_graph.__doc__)\ndraw_graph(get_3d_traces(G, edgelist),output=output)", "_____no_output_____" ] ], [ [ "## Simplifying a graph\n\nIt is possible to remove all interstitial nodes and only retain bifucations and leaves by removing all nodes with a degree of 2 using the `simplifyGraph` command:", "_____no_output_____" ] ], [ [ "print(simplifyGraph.__doc__)\nsparse = simplifyGraph(G,output=output)\ndraw_graph(\n get_3d_traces(\n G = sparse,\n edgelist = edgelist,\n nodeSize=5\n ),\n output=os.path.join(output, 'simplified'))", "_____no_output_____" ] ], [ [ "## Converting 3D morphology to a 2D graph", "_____no_output_____" ], [ "### For a simplified morphology", "_____no_output_____" ] ], [ [ "print(draw_graph.__doc__)\ndraw_graph(\n get_2d_traces(\n G=sparse,\n edgelist=edgelist,\n nodesize=5\n ),\n output=os.path.join(output, 'simplified')\n)", "_____no_output_____" ], [ "print(build_animation.__doc__)\nbuild_animation(\n G=sparse,\n edgelist=edgelist,\n output=os.path.join(output,'sparse_animation'))", "_____no_output_____" ] ], [ [ "### For the complete morphology (Note: SLOW)", "_____no_output_____" ] ], [ [ "print(extract_real_abstract.__doc__)\nreal_edgelist, abstract_edgelist, extended_edgelist = extract_real_abstract(\n G=G,\n edgelist=edgelist,\n output=os.path.join(output, 'complete'))", "_____no_output_____" ], [ "draw_graph(\n get_3d_traces(\n G=G,\n edgelist=real_edgelist\n ),\n output=os.path.join(output,'complete'))", "_____no_output_____" ] ], [ [ "## Generating an animation between complex 3D and 2D representation", "_____no_output_____" ] ], [ [ "build_animation(G=G, edgelist=edgelist, output=os.path.join(output,'complex_animation'))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4af781aa80aa5524758c0ae68370f82a0efb090c
81,991
ipynb
Jupyter Notebook
melanoma-classification-using-ensemble-learning.ipynb
bhatiaharshit07/SIIM-ISIC-Melanoma-Classification
41edbd1b8104980d64e20f27d23fc8d66f3d61a6
[ "Apache-2.0" ]
1
2021-04-03T20:43:00.000Z
2021-04-03T20:43:00.000Z
melanoma-classification-using-ensemble-learning.ipynb
bhatiaharshit07/SIIM-ISIC-Melanoma-Classification
41edbd1b8104980d64e20f27d23fc8d66f3d61a6
[ "Apache-2.0" ]
null
null
null
melanoma-classification-using-ensemble-learning.ipynb
bhatiaharshit07/SIIM-ISIC-Melanoma-Classification
41edbd1b8104980d64e20f27d23fc8d66f3d61a6
[ "Apache-2.0" ]
null
null
null
81,991
81,991
0.757181
[ [ [ "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport gc\nimport json\nimport math\nimport cv2\nimport PIL\nfrom PIL import Image\nimport seaborn as sns\nsns.set(style='darkgrid')\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import to_categorical\nfrom keras import layers\nfrom keras.applications import ResNet50,MobileNet, DenseNet201, InceptionV3, NASNetLarge, InceptionResNetV2, NASNetMobile\nfrom keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import cohen_kappa_score, accuracy_score\nimport scipy\nfrom tqdm import tqdm\nimport tensorflow as tf\nfrom keras import backend as K\nimport gc\nfrom functools import partial\nfrom sklearn import metrics\nfrom collections import Counter\nimport json\nimport itertools\n\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tqdm import tqdm\nfrom sklearn.decomposition import PCA\n\n%matplotlib inline", "Using TensorFlow backend.\n" ], [ "sub = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv')", "_____no_output_____" ], [ "import os\nprint(os.listdir(\"../input/siim-isic-melanoma-classification\"))", "['tfrecords', 'train.csv', 'train', 'test', 'test.csv', 'sample_submission.csv', 'jpeg']\n" ], [ "#Loading Train and Test Data\ntrain = pd.read_csv(\"../input/siim-isic-melanoma-classification/train.csv\")\ntest = pd.read_csv(\"../input/siim-isic-melanoma-classification/test.csv\")\nprint(\"{} images in train set.\".format(train.shape[0]))\nprint(\"{} images in test set.\".format(test.shape[0]))", "33126 images in train set.\n10982 images in test set.\n" ], [ "train.head()", "_____no_output_____" ], [ "test.head()", "_____no_output_____" ] ], [ [ "Let's look at the distribution of teh target:", "_____no_output_____" ] ], [ [ "np.mean(train.target)", "_____no_output_____" ] ], [ [ "So this is a binary classification problem with highly imbalanced data.", "_____no_output_____" ], [ "Let's take a look at a few images.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,5))\nsns.countplot(x='target', data=train,\n order=list(train['target'].value_counts().sort_index().index) ,\n color='cyan')", "_____no_output_____" ], [ "train['target'].value_counts()", "_____no_output_____" ], [ "train.columns", "_____no_output_____" ], [ "z=train.groupby(['target','sex'])['benign_malignant'].count().to_frame().reset_index()\nz.style.background_gradient(cmap='Reds') ", "_____no_output_____" ], [ "sns.catplot(x='target',y='benign_malignant', hue='sex',data=z,kind='bar')", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Convolution2D,Conv2D\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras.optimizers import SGD\nfrom keras.callbacks import TensorBoard\nfrom keras import applications", "_____no_output_____" ] ], [ [ "**TRAINING**", "_____no_output_____" ] ], [ [ "import time ", "_____no_output_____" ], [ "start=time.time()\ntrain_images = np.load('../input/rgb-3500-96/train_images_rgb_3500_96.npy')\nend=time.time()\nprint(f\"\\nTime to load train images: {round(end-start,5)} seconds.\")\nprint('Train_images shape: ',train_images.shape)", "\nTime to load train images: 0.36773 seconds.\nTrain_images shape: (3874, 96, 96, 3)\n" ], [ "start=time.time()\ntest_images = np.load('../input/test-images-rgb-10000-96/test_images_rbg_10000_96.npy')\nend=time.time()\nprint(f\"\\nTime to load test images: {round(end-start,5)} seconds.\")\nprint('Test_images shape: ',test_images.shape)", "\nTime to load test images: 1.03321 seconds.\nTest_images shape: (10982, 96, 96, 3)\n" ], [ "#target data\ntrain_labels =np.load('../input/rgb-3500-96/train_labels_rgb_3500_96.npy')\nprint('Train_labels shape: ',train_labels.shape)", "Train_labels shape: (3874,)\n" ], [ "#spliting train data\nfrom sklearn.model_selection import train_test_split\nx_train,x_val,y_train,y_val=train_test_split(train_images,train_labels,test_size=0.3)", "_____no_output_____" ], [ "print('x_train shape: ',x_train.shape)\nprint('x_val shape: ',x_val.shape)", "x_train shape: (2711, 96, 96, 3)\nx_val shape: (1163, 96, 96, 3)\n" ] ], [ [ "**DATA AUGMENTATION**", "_____no_output_____" ] ], [ [ "augs = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\naugs.fit(x_train)", "_____no_output_____" ] ], [ [ "**MODELLING**", "_____no_output_____" ] ], [ [ "#VGG-16 MODEL NO. 1\nfrom keras.applications.vgg16 import VGG16\n\n \nmodel = Sequential()\nmodel.add(ZeroPadding2D((1,1),input_shape=(32,32,3)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(4096, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(4096, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.summary()\n\nmodel.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])", "/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:7: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation=\"relu\")`\n import sys\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(64, (3, 3), activation=\"relu\")`\n if __name__ == '__main__':\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:13: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation=\"relu\")`\n del sys.path[0]\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:15: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(128, (3, 3), activation=\"relu\")`\n from ipykernel import kernelapp as app\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:19: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(256, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:21: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(256, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:23: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(256, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:27: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:29: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:31: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:35: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:37: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:39: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(512, (3, 3), activation=\"relu\")`\n" ], [ "#XCEPTION MODEL NO. 2\nfrom keras.layers import Dropout, DepthwiseConv2D, MaxPooling2D, concatenate\nfrom keras.models import Model\n\ninp = Input(shape = (96,96, 3))\nx = inp\nx = Conv2D(32, (3, 3), strides = 2, padding = \"same\", activation = \"relu\")(x)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx = Conv2D(64, (3, 3), strides = 1, padding = \"same\", activation = \"relu\")(x)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\n\nx1 = DepthwiseConv2D((3, 3), (1, 1), padding = \"same\", activation = \"relu\")(x)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx1 = DepthwiseConv2D((3, 3), (1, 1), padding = \"same\", activation = \"relu\")(x1)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx1 = MaxPooling2D((2, 2), strides = 1)(x1)\n\nx = concatenate([x1, Conv2D(64, (2, 2), strides = 1)(x)])\n\nx1 = Activation(\"relu\")(x)\nx1 = Conv2D(256, (3, 3), strides = 1, padding = \"same\", activation = \"relu\")(x1)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx1 = DepthwiseConv2D((3, 3), strides = 1, padding = \"same\", activation = \"relu\")(x1)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx1 = DepthwiseConv2D((3, 3), strides = 1, padding = \"same\")(x1)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx1 = MaxPooling2D((2, 2), strides = 1)(x1)\n\nx = concatenate([x1, Conv2D(256, (2, 2), strides = 1)(x)])\n\n\nx = Activation(\"relu\")(x)\nx = Conv2D(256, (3, 3), strides = 1, padding = \"same\", activation = \"relu\")(x)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx = Conv2D(128, (3, 3), strides = 1, padding = \"same\", activation = \"relu\")(x)\nx = BatchNormalization(axis = 3)(x)\nx = Dropout(0.4)(x)\nx = Flatten()(x)\n\nx = Dense(1, activation = \"sigmoid\")(x)\n\n\nmodel2 = Model(inp, x)\nmodel2.compile(optimizer = \"adam\", loss = \"binary_crossentropy\", metrics = [\"accuracy\"])\nmodel2.summary()", "Model: \"model_1\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_2 (InputLayer) (None, 96, 96, 3) 0 \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 48, 48, 32) 896 input_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 48, 48, 32) 128 conv2d_14[0][0] \n__________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 48, 48, 32) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 48, 48, 64) 18496 dropout_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_2 (BatchNor (None, 48, 48, 64) 256 conv2d_15[0][0] \n__________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 48, 48, 64) 0 batch_normalization_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_3 (BatchNor (None, 48, 48, 64) 256 dropout_4[0][0] \n__________________________________________________________________________________________________\ndropout_5 (Dropout) (None, 48, 48, 64) 0 batch_normalization_3[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_1 (DepthwiseCo (None, 48, 48, 64) 640 dropout_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, 48, 48, 64) 256 dropout_5[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_2 (DepthwiseCo (None, 48, 48, 64) 640 depthwise_conv2d_1[0][0] \n__________________________________________________________________________________________________\ndropout_6 (Dropout) (None, 48, 48, 64) 0 batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_6 (MaxPooling2D) (None, 47, 47, 64) 0 depthwise_conv2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 47, 47, 64) 16448 dropout_6[0][0] \n__________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 47, 47, 128) 0 max_pooling2d_6[0][0] \n conv2d_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, 47, 47, 128) 512 concatenate_1[0][0] \n__________________________________________________________________________________________________\ndropout_7 (Dropout) (None, 47, 47, 128) 0 batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 47, 47, 128) 0 concatenate_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, 47, 47, 128) 512 dropout_7[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 47, 47, 256) 295168 activation_1[0][0] \n__________________________________________________________________________________________________\ndropout_8 (Dropout) (None, 47, 47, 128) 0 batch_normalization_6[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_3 (DepthwiseCo (None, 47, 47, 256) 2560 conv2d_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, 47, 47, 128) 512 dropout_8[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_4 (DepthwiseCo (None, 47, 47, 256) 2560 depthwise_conv2d_3[0][0] \n__________________________________________________________________________________________________\ndropout_9 (Dropout) (None, 47, 47, 128) 0 batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_7 (MaxPooling2D) (None, 46, 46, 256) 0 depthwise_conv2d_4[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 46, 46, 256) 131328 dropout_9[0][0] \n__________________________________________________________________________________________________\nconcatenate_2 (Concatenate) (None, 46, 46, 512) 0 max_pooling2d_7[0][0] \n conv2d_18[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 46, 46, 512) 0 concatenate_2[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 46, 46, 256) 1179904 activation_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_8 (BatchNor (None, 46, 46, 256) 1024 conv2d_19[0][0] \n__________________________________________________________________________________________________\ndropout_10 (Dropout) (None, 46, 46, 256) 0 batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 46, 46, 128) 295040 dropout_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_9 (BatchNor (None, 46, 46, 128) 512 conv2d_20[0][0] \n__________________________________________________________________________________________________\ndropout_11 (Dropout) (None, 46, 46, 128) 0 batch_normalization_9[0][0] \n__________________________________________________________________________________________________\nflatten_2 (Flatten) (None, 270848) 0 dropout_11[0][0] \n__________________________________________________________________________________________________\ndense_4 (Dense) (None, 1) 270849 flatten_2[0][0] \n==================================================================================================\nTotal params: 2,218,497\nTrainable params: 2,216,513\nNon-trainable params: 1,984\n__________________________________________________________________________________________________\n" ], [ "#DENSENET MODEL NO. 3\nfrom tensorflow.keras.applications import DenseNet201\nimport tensorflow.keras.layers as L\ndnet201 = DenseNet201(\n input_shape=(96,96, 3),\n include_top=False\n)\ndnet201.trainable = True\n\nmodel3 = tf.keras.Sequential([\n dnet201,\n L.GlobalAveragePooling2D(),\n L.Dense(1, activation='sigmoid')\n])\nmodel3.compile(\n optimizer='adam',\n loss = 'binary_crossentropy',\n metrics=['accuracy']\n)\n\nmodel3.summary()", "Downloading data from https://github.com/keras-team/keras-applications/releases/download/densenet/densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5\n" ], [ "batch_size=128\nepochs=30\n\nhistory = model.fit(x_train,\n y_train,\n batch_size=batch_size,\n nb_epoch=epochs,\n verbose=1,\n validation_data=(x_val,y_val))", "_____no_output_____" ], [ "batch_size=128\nepochs=15\n\nhistory2 = model2.fit(x_train,\n y_train,\n batch_size=batch_size,\n nb_epoch=epochs,\n verbose=1,\n validation_data=(x_val,y_val))", "_____no_output_____" ], [ "batch_size=128\nepochs=30\n\nhistory3 = model3.fit(x_train,\n y_train, \n batch_size=batch_size,\n nb_epoch=epochs,\n verbose=1,\n validation_data=(x_val,y_val))", "_____no_output_____" ], [ "model.save(\"vgg16.h5\")", "_____no_output_____" ], [ "model2.save(\"xception.h5\")", "_____no_output_____" ], [ "model3.save(\"densenet.h5\") ", "_____no_output_____" ] ], [ [ "**EVALUATION**", "_____no_output_____" ] ], [ [ "scores = model.evaluate(x_val, y_val, verbose=0)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])", "_____no_output_____" ], [ "scores = model2.evaluate(x_val, y_val, verbose=0)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])", "_____no_output_____" ], [ "scores = model3.evaluate(x_val, y_val, verbose=0)\nprint('Test loss_3:', scores[0])\nprint('Test accuracy_3:', scores[1])", "_____no_output_____" ] ], [ [ "**PREDICTION**", "_____no_output_____" ] ], [ [ "y_test_prob = model.predict(test_images)\npred_df = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob)})\npred_df.to_csv('submission_vgg.csv',header=True, index=False)\npred_df.head(10)", "_____no_output_____" ], [ "y_test_prob2 = model2.predict(test_images)\npred_df2 = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob2)})\npred_df2.to_csv('submission_xception.csv',header=True, index=False)\npred_df2.head(10)", "_____no_output_____" ], [ "y_test_prob3 = model3.predict(test_images)\npred_df3 = pd.DataFrame({'image_name': test['image_name'], 'target': np.concatenate(y_test_prob3)})\npred_df3.to_csv('submission_dense.csv',header=True, index=False)\npred_df3.head(10)", "_____no_output_____" ] ], [ [ "**ENSEMBLE**", "_____no_output_____" ] ], [ [ "en = pd.DataFrame({'image_name':test['image_name'], 'target':(0.3*pred_df['target'] + 0.3*pred_df2['target'] + 0.3*pred_df3['target'])})\nen.to_csv('ensemble1.csv',header=True, index=False)\nen.head(10)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4af78c00f3471c625d6d8443cb28e63b3b6d55cd
529
ipynb
Jupyter Notebook
pset_challenging_ext/exercises/nb/p82.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_challenging_ext/exercises/nb/p82.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_challenging_ext/exercises/nb/p82.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
19.592593
123
0.502836
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af792ca8f289db763df32d285ad0cf2748dea8b
785,440
ipynb
Jupyter Notebook
research/develop/2017-05-01-luipillmann-intro-to-reimbursements.ipynb
richardljs/serenata-de-amor
58432efbed9ab4e4b43f9e2306ecba85f199f0a3
[ "MIT" ]
59
2018-10-03T18:46:31.000Z
2022-01-05T22:39:17.000Z
research/develop/2017-05-01-luipillmann-intro-to-reimbursements.ipynb
richardljs/serenata-de-amor
58432efbed9ab4e4b43f9e2306ecba85f199f0a3
[ "MIT" ]
16
2018-10-03T21:36:50.000Z
2021-04-12T22:10:16.000Z
research/develop/2017-05-01-luipillmann-intro-to-reimbursements.ipynb
richardljs/serenata-de-amor
58432efbed9ab4e4b43f9e2306ecba85f199f0a3
[ "MIT" ]
20
2018-10-03T19:14:57.000Z
2021-04-12T20:50:44.000Z
255.427642
144,938
0.889075
[ [ [ "# Intro to reimbursements: overview with visualization\n\nThis notebook provides an overview of the `2017-03-15-reimbursements.xz` dataset, which contains broad data regarding CEAP usage in all terms since 2009. \n\nIt aims to provide an example of basic analyses and visualization by exploring topics such as:\n\n- Average monthly spending per congressperson along the years\n- Seasonality in reimbursements\n- Reimbursements by type of spending\n- Which party has the most spending congressmen?\n- Which state has the most spending congressmen?\n- Who were the most hired suppliers by amount paid?\n- Which were the most expensive individual reimbursements?\n\nQuestions are not explicitly answered. Charts and tables are provided for free interpretation, some of them with brief commentaries from the author.\n\n**Obs**.: original analysis was made considering data from 2009 to 2017 (mainly until 2016). One might want to filter by terms (e.g. 2010-2014) to make more realistic comparisons (spenditures by state, party, congressperson, etc.). Code cell #4 provides an example of how it could be done.\n\n---", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\n\n%matplotlib inline\n\n# Charts styling\nplt.style.use('ggplot')\nrcParams['figure.figsize'] = 15, 8\nmatplotlib.rcParams.update({'font.size': 14})\n#rcParams['font.family'] = 'Georgia'\n\n# Type setting for specific columns\n#DTYPE = dict(cnpj=np.str, cnpj_cpf=np.str, ano=np.int16, term=np.str)\n\n# Experimenting with 'category' type to reduce df size\nDTYPE =dict(cnpj_cpf=np.str,\\\n year=np.int16,\\\n month=np.int16,\\\n installment='category',\\\n term_id='category',\\\n term='category',\\\n document_type='category',\\\n subquota_group_id='category',\\\n subquota_group_description='category',\\\n #subquota_description='category',\\\n subquota_number='category',\\\n state='category',\\\n party='category')", "_____no_output_____" ], [ "reimbursements = pd.read_csv('../data/2017-03-15-reimbursements.xz', \\\n dtype=DTYPE, low_memory=False, parse_dates=['issue_date'])", "_____no_output_____" ], [ "# Creates a DataFrame copy with fewer columns\nr = reimbursements[['year', 'month', 'total_net_value', 'party', 'state', 'term', 'issue_date',\\\n 'congressperson_name', 'subquota_description','supplier', 'cnpj_cpf']]\nr.head()", "_____no_output_____" ] ], [ [ "## Filters depending on the scope of analysis\nHere, filters by state, party, years, etc. can be applied.\n\nObs.: chart commentaries provided might not remain valid depending on filters chosen. ", "_____no_output_____" ] ], [ [ "# Filters only most recent years (from 2015)\n#r = r[(r.year == 2015) | (r.year == 2016) | (r.year == 2017)]\n\n#r.head()", "_____no_output_____" ] ], [ [ "## Questions & answers", "_____no_output_____" ], [ "### Evolution of average monthly spending along the years\nAre congressmen spending more today in relation to past years?", "_____no_output_____" ], [ "#### How many congressmen in each year?", "_____no_output_____" ] ], [ [ "years = r.year.unique()\n\n# Computes unique names in each year and saves into a pd.Series\nd = dict()\nfor y in years:\n d[y] = r[r.year == y].congressperson_name.nunique()\n\ns = pd.Series(d)\ns", "_____no_output_____" ], [ "s.plot(kind='bar')\nplt.title('Qtdy of congressmen listed per year')", "_____no_output_____" ] ], [ [ "##### Commentary\nGreater number of congressmen in 2011 and 2015 is due to term transitions which occur during the year.\n\n---", "_____no_output_____" ], [ "#### How much did they spend, in average, per month in each year?\nThis analysis takes into consideration the following elements:\n\n- Main data: \n - Monthly average spending per congressman during each year\n- Relevant aspects for trend comparison:\n - CEAP limit for each year (i.e. the maximum allowed quota increased during the years)\n - Inflation indexes (i.e. prices of goods raised during the years)", "_____no_output_____" ], [ "##### Evolution of inflation (IPCA)", "_____no_output_____" ] ], [ [ "# Source: http://www.ibge.gov.br/home/estatistica/indicadores/precos/inpc_ipca/defaultseriesHist.shtm\nipca_years = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016] \nipca_indexes = [0.0431, 0.0590, 0.0650, 0.0583, 0.0591, 0.0641, 0.1067, 0.0629]\n\nipca = pd.DataFrame({\n 'year': ipca_years,\n 'ipca': ipca_indexes\n})\n\n\n# Filters only by years in dataset\nipca = ipca[ipca['year'].isin(r.year.unique())].set_index('year')\nipca.head()", "_____no_output_____" ] ], [ [ "##### Maximum quota allowed (CEAP limits)\nThere is information available for maximum CEAP for 2009 and 2017. Therefore, a simple compound growth rate (CAGR) is calculated from 2009 to 2017. Values for years in between are assumed to be a linear composition of the growth rate.", "_____no_output_____" ] ], [ [ "states = ['AC', 'AL', 'AM', 'AP', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MG', 'MS', 'MT', 'PA', 'PB', 'PE', 'PI', 'PR', 'RJ', 'RN', 'RO', 'RR', 'RS', 'SC', 'SE', 'SP', 'TO']\n\n# Source: http://www2.camara.leg.br/a-camara/estruturaadm/diretorias/dirgeral/estrutura-1/deapa/portal-da-posse/ceap-1\nceap_2009 = [40711.32, 37318.73, 39734.17, 39554.50, 35540.51, 38705.50, 27977.66, 34080.83, 32317.69, 38429.49, 32856.38, 36949.65, 35924.24, 38499.17, 38319.91, 37992.68, 37344.18, 35412.67, 32550.32, 38963.25, 39828.33, 41612.80, 37256.00, 36337.92, 36578.43, 33730.95, 35993.76]\n\n# Source: http://www2.camara.leg.br/comunicacao/assessoria-de-imprensa/cota-parlamentar\nceap_2017 = [44632.46, 40944.10, 43570.12, 43374.78, 39010.85, 42451.77, 30788.66, 37423.91, 35507.06, 42151.69, 36092.71, 40542.84, 39428.03, 42227.45, 42032.56, 41676.80, 40971.77, 38871.86, 35759.97, 42731.99, 43672.49, 45612.53, 40875.90, 39877.78, 40139.26, 37043.53, 39503.61]\n\nceap_limit_states = pd.DataFrame({\n 'ceap_2009': ceap_2009,\n 'ceap_2017': ceap_2017\n}, index=states)\n\nceap_limit_states.head()", "_____no_output_____" ], [ "all_years = ipca_years\n\n# Calculates CAGR according to data available (CEAP@2009 and CEAP@2017), using the CEAP average among states\ncagr = ((ceap_limit_states.ceap_2017.mean() / ceap_limit_states.ceap_2009.mean())**(1./(2017-2009)) - 1)\n\n# Computes estimated CEAP values for years in between 2009 and 2017 using CAGR\nceap_values = []\nfor i in range(2017-2009):\n if i == 0:\n ceap_values.append(ceap_limit_states.ceap_2009.mean())\n elif i == (r.year.nunique() - 1):\n ceap_values.append(ceap_limit_states.ceap_2017.mean())\n else:\n ceap_values.append(ceap_values[i-1] * (1 + cagr))\n \n# Creates df with all years\nceap_limit_years = pd.DataFrame({\n 'year': all_years,\n 'max_avg_ceap': ceap_values\n})\n\n# Filters only by years in dataset\nceap_limit_years = ceap_limit_years[ceap_limit_years['year'].isin(r.year.unique())].set_index('year')\nceap_limit_years.head()", "_____no_output_____" ], [ "# Groups by name summing up spendings\na = r.groupby(['year']).sum().drop('month', 1)\na['congressmen_qty'] = s\na['avg_monthly_value_per_congressmen'] = a['total_net_value'] / a['congressmen_qty'] / 12\na = a.drop(2017, 0) # Neglets 2017\n\n# Adds columns for CEAP limits and IPCA indexes\na['max_avg_ceap'] = ceap_limit_years['max_avg_ceap']\na['pct_of_quota_used'] = (a['avg_monthly_value_per_congressmen'] / a['max_avg_ceap']) * 100\na['ipca'] = ipca['ipca']\na['acc_ipca'] = (a['ipca'] + 1).cumprod() - 1\na", "_____no_output_____" ], [ "# Procedure to handle secondary Y axis\nfig0, ax0 = plt.subplots()\nax1 = ax0.twinx()\n\ny0 = a[['avg_monthly_value_per_congressmen', 'max_avg_ceap']].plot(kind='line', ax=ax0)#, label='Itens vendidos')\ny1 = (a['acc_ipca']*100).plot(kind='line', secondary_y=False, style='g--', ax=ax1)#, label='Preço unitário')\ny0.legend(loc=2) # bar legend to the left\ny1.legend(loc=1) # line legend to the right\n\ny0.set_ylim((0,50000))\n#y1.set_ylim((0,50000))\ny0.set_ylabel('CEAP usage and limit (R$)')\ny1.set_ylabel('Accumulated IPCA index (%)')\n\nplt.title('Avg. monthly congressmen spending vs. maximum quota and inflation idx.')\nplt.show()\nplt.close()", "_____no_output_____" ] ], [ [ "##### Commentary\nAlthough average spending has increased along the years, it can be due to both aspects considered: raises in prices and expanded limit for reimbursements.\n\nThe next chart shows how spending has increased with respect to quota limits.", "_____no_output_____" ] ], [ [ "a.pct_of_quota_used.plot()\nplt.ylim((0,100))\nplt.title('Fluctuation of monthly CEAP spending per congressperson (% of max. quota)')", "_____no_output_____" ] ], [ [ "##### Commentary\nThe chart shows that average spending has increased more than quota limits were raised (from ca. 40% to 60% of quota usage). This might be due to the steep rise in inflation levels, as observed in the previous chart.\n\n---", "_____no_output_____" ], [ "### Average monthly spending per congressperson along the years\nThis table shows the data above detailed per congressperson.", "_____no_output_____" ] ], [ [ "# Groups by name summing up spendings\na = r.groupby(['congressperson_name', 'year'])\\\n .sum()\\\n .drop('month', 1)\n\n# Computes average spending per month and unstacks\na['monthly_total_net_value'] = a['total_net_value'] / 12\na = a.drop('total_net_value', 1).unstack()\n\n# Creates subtotal column to the right\na['mean'] = a.mean(axis=1)\n\na.head()", "_____no_output_____" ] ], [ [ "### Seasonality in reimbursements\nOut of curiosity,in which period of the year more reimbursements were issued?", "_____no_output_____" ] ], [ [ "r.groupby('month')\\\n .sum()\\\n .total_net_value\\\n .sort_index()\\\n .plot(kind='bar', rot=0)\n \nplt.title('Fluctuation of reimbursements issued by months (R$)')", "_____no_output_____" ] ], [ [ "### Reimbursements by type of spending\nFor what are congressmen most using their quota?", "_____no_output_____" ] ], [ [ "r.groupby('subquota_description')\\\n .sum()\\\n .total_net_value\\\n .sort_values(ascending=True)\\\n .plot(kind='barh')\n \nplt.title('Total spent by type of service (R$)')", "_____no_output_____" ] ], [ [ "##### Commentary\nThis chart makes it clear what is prioritized by congressmen: publicity of their activity. Voters might judge whether this choice is reasonable or not.\n\n---", "_____no_output_____" ], [ "### Which party has the most spending congressmen?", "_____no_output_____" ], [ "##### How many congressmen in each party?", "_____no_output_____" ] ], [ [ "parties = r.party.unique()\nparties", "_____no_output_____" ], [ "# Computes unique names in each state and saves into a pd.Series\nd = dict()\nfor p in parties:\n d[p] = r[r.party == p].congressperson_name.nunique()\n\ns = pd.Series(d)\ns", "_____no_output_____" ] ], [ [ "#### How much did congressmen from each party spend in the year, in average? ", "_____no_output_____" ] ], [ [ "t = r.groupby('party').sum()\nt = t.drop(['year', 'month'], 1) # Removes useless columns\n\nt['congressmen_per_party'] = s\nyears = r.year.nunique()", "_____no_output_____" ], [ "t['monthly_value_per_congressperson'] = t['total_net_value'] / t['congressmen_per_party'] / (12*years)\nt.sort_values(by='monthly_value_per_congressperson', ascending=False).head()", "_____no_output_____" ], [ "t.monthly_value_per_congressperson\\\n .sort_values(ascending=False)\\\n .plot(kind='bar')\n\nplt.title('Average monthly reimbursements per congressperson by party (R$)')", "_____no_output_____" ] ], [ [ "##### Commentary\nIt is important to note that many congressmen change parties frequently. Therefore, anyone interested in drawing conclusions regarding parties might want to analyse the data in further detail than it is presented here.\n\n---", "_____no_output_____" ], [ "### Which state has the most spending congressmen?", "_____no_output_____" ], [ "##### How many congressmen in each state?", "_____no_output_____" ] ], [ [ "states = r.state.unique()\nstates", "_____no_output_____" ], [ "# Computes unique names in each party and saves into a pd.Series\nd = dict()\nfor s in states:\n d[s] = r[r.state == s].congressperson_name.nunique()\n\ns = pd.Series(d)\ns", "_____no_output_____" ] ], [ [ "#### How much did congressmen from each party spend in the year, in average? ", "_____no_output_____" ], [ "##### (!) Important: CEAP maximum value differs among states\nAs already commented previously, CEAP max. quota varies among state, according to: http://www2.camara.leg.br/comunicacao/assessoria-de-imprensa/cota-parlamentar, ", "_____no_output_____" ] ], [ [ "# CEAP maximum values from 2017\nceap_states = ceap_limit_states.drop('ceap_2009',1)\nceap_states.columns = ['monthly_max_ceap'] # Renames column to be compatible to code below\nceap_states.head()", "_____no_output_____" ], [ "t = r.groupby('state').sum()\nt = t.drop(['year', 'month'], 1) # Removes useless columns\n\nt['congressmen_per_state'] = s\nt['monthly_max_ceap'] = ceap_states\nyears = r.year.nunique()", "_____no_output_____" ], [ "t['monthly_value_per_congressperson'] = t['total_net_value'] / t['congressmen_per_state'] / (12*years)\nt['ceap_usage'] = (t['monthly_value_per_congressperson'] / t['monthly_max_ceap']) * 100\n\nt.sort_values(by='ceap_usage', ascending=False).head()", "_____no_output_____" ], [ "t.ceap_usage\\\n .sort_values(ascending=False)\\\n .plot(kind='bar', rot=0)\n\nplt.title('Average monthly CEAP usage per congressperson by state (% of max. quota)')", "_____no_output_____" ] ], [ [ "#### Comparison between given state and the country's average", "_____no_output_____" ] ], [ [ "t.head()", "_____no_output_____" ], [ "country_average = t.ceap_usage.mean()\ncountry_average", "_____no_output_____" ], [ "# Parametrizes single state analysis\nstate = 'SP'\nstate_average = t.loc[state].ceap_usage\nstate_average", "_____no_output_____" ], [ "s = pd.Series()\ns['average_all_states'] = country_average\ns[state] = state_average\ns", "_____no_output_____" ], [ "s.plot(kind='bar', rot=0)\nplt.title('Average monthly CEAP usage per congressperson: ' + state + ' vs. rest of the country (% of max. quota)')", "_____no_output_____" ] ], [ [ "### Who were the top spenders of all time in absolute terms?", "_____no_output_____" ] ], [ [ "r.groupby('congressperson_name')\\\n .sum()\\\n .total_net_value\\\n .sort_values(ascending=False)\\\n .head(10)", "_____no_output_____" ], [ "r.groupby('congressperson_name')\\\n .sum()\\\n .total_net_value\\\n .sort_values(ascending=False)\\\n .head(30)\\\n .plot(kind='bar')\n\nplt.title('Total reimbursements issued per congressperson (all years)')", "_____no_output_____" ] ], [ [ "##### Commentary\nBecause the dataset comprises 2009-2017, it might not be reasonable to draw any hard conclusions by looking to this chart alone. Some congressmen might have been elected for longer periods and that would reflect on higher reimbursement total values.\n\nFor a more detailed - hence coherent - analysis, one might want to make this comparison for each term (e.g. 2010-2014). That would better identify \"top spenders\" by comparing congressmen spendings on the same time period.\n\nAnother interesting analysis can be made by expanding the chart to all congressmen, not only the top 30. This enables a richer look at how discrepant top spenders are from the rest. To do that, just change `.head(30)\\` argument in the previous cell.\n\n---", "_____no_output_____" ], [ "### Who were the most hired suppliers by amount paid?\nThis analysis identifies suppliers by their unique CNPJ. It is worth noting that, commonly, some telecom carriers use different CNPJ for its subsidiaries in different states (e.g. TIM SP, TIM Sul, etc).", "_____no_output_____" ] ], [ [ "sp = r.groupby(['cnpj_cpf', 'supplier', 'subquota_description'])\\\n .sum()\\\n .drop(['year', 'month'], 1)\\\n .sort_values(by='total_net_value', ascending=False)\n\nsp.reset_index(inplace=True) \nsp = sp.set_index('cnpj_cpf')\n\nsp.head()", "_____no_output_____" ], [ "cnpj = r.groupby('cnpj_cpf')\\\n .sum()\\\n .drop(['year', 'month'], 1)\\\n .sort_values(by='total_net_value', ascending=False)\n\ncnpj.head()", "_____no_output_____" ], [ "# Adds supplier name besides total_net_value in cnpj df\n\ncnpj['supplier'] = '' # Creates empty column\ncnpj = cnpj.head(1000) # Gets only first 1000 for this analysis", "_____no_output_____" ], [ "# Looks up for supplier names in sp df and fills cnpj df (it might take a while to compute...)\n\nfor i in range(len(cnpj)):\n try:\n cnpj.set_value(cnpj.index[i], 'supplier', sp.loc[cnpj.index[i]].supplier.iloc[0])\n except:\n cnpj.set_value(cnpj.index[i], 'supplier', sp.loc[cnpj.index[i]].supplier)\n\ncnpj.head(10)", "_____no_output_____" ], [ "# Fixes better indexing to plot in a copy\nsp2 = cnpj.set_index('supplier')\n\nsp2.head(30)\\\n .plot(kind='bar')\n\nplt.title('Most hired suppliers (unique CNPJ) by total amount paid (R$)')", "_____no_output_____" ] ], [ [ "##### Commentary\nIn general, telecom carries were the suppliers with higher concentration of reimbursements. It is worth noting, however, that Telecommunication subquota accounts for only 8% of the reimbursents. This might suggest a 'long tail' pattern for other subquota types such as publicity, which accounts for 28% of all reimbursements.\n\nAnother aspect worth noting is the fact that some individual suppliers (\"pessoas físicas\") appear as top 15 suppliers (e.g. Mr. Douglas da Silva and Mrs. Joceli do Nascimento). One might wonder if such concentration of reimbursements for single-person suppliers is reasonable.", "_____no_output_____" ] ], [ [ "pct_telecom = r[r['subquota_description'] == 'Telecommunication'].total_net_value.sum() / r.total_net_value.sum()\npct_telecom", "_____no_output_____" ], [ "pct_publicity = r[r['subquota_description'] == 'Publicity of parliamentary activity'].total_net_value.sum() / r.total_net_value.sum()\npct_publicity", "_____no_output_____" ] ], [ [ "#### Congressmen that hired the top supplier and how much they paid", "_____no_output_____" ] ], [ [ "r.groupby(['cnpj_cpf', 'congressperson_name'])\\\n .sum()\\\n .sort_values(by='total_net_value', ascending=False)\\\n .loc['02558157000162']\\\n .total_net_value\\\n .head(20)", "_____no_output_____" ] ], [ [ "### Which are the most expensive individual reimbursements?", "_____no_output_____" ] ], [ [ "r = r.sort_values(by='total_net_value', ascending=False)\nr.head(20)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4af7a47b1d178fd12dc11104b7b40c1088e8389e
11,243
ipynb
Jupyter Notebook
examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb
Bobbyorr007/hub
efadb65f27b89058a23ac68475996b18dc4dc1b7
[ "Apache-2.0" ]
1
2022-03-12T11:58:50.000Z
2022-03-12T11:58:50.000Z
examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb
Bobbyorr007/hub
efadb65f27b89058a23ac68475996b18dc4dc1b7
[ "Apache-2.0" ]
null
null
null
examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb
Bobbyorr007/hub
efadb65f27b89058a23ac68475996b18dc4dc1b7
[ "Apache-2.0" ]
null
null
null
44.615079
480
0.567286
[ [ [ "**Copyright 2021 The TensorFlow Hub Authors.**\n\nLicensed under the Apache License, Version 2.0 (the \"License\");", "_____no_output_____" ] ], [ [ "# Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "_____no_output_____" ] ], [ [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/hub/blob/master/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/senteval_for_universal_sentence_encoder_cmlm.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>", "_____no_output_____" ], [ "#Universal Sentence Encoder SentEval demo\nThis colab demostrates the [Universal Sentence Encoder CMLM model](https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1) using the [SentEval](https://github.com/facebookresearch/SentEval) toolkit, which is a library for measuring the quality of sentence embeddings. The SentEval toolkit includes a diverse set of downstream tasks that are able to evaluate the generalization power of an embedding model and to evaluate the linguistic properties encoded.\n\nRun the first two code blocks to setup the environment, in the third code block you can pick a SentEval task to evaluate the model. A GPU runtime is recommended to run this Colab.\n\nTo learn more about the Universal Sentence Encoder CMLM model, see https://openreview.net/forum?id=WDVD4lUCTzU.", "_____no_output_____" ] ], [ [ "#@title Install dependencies\n!pip install --quiet \"tensorflow-text==2.8.*\"\n!pip install --quiet torch==1.8.1", "_____no_output_____" ] ], [ [ "## Download SentEval and task data\nThis step download SentEval from github and execute the data script to download the task data. It may take up to 5 minutes to complete.", "_____no_output_____" ] ], [ [ "#@title Install SentEval and download task data\n!rm -rf ./SentEval\n!git clone https://github.com/facebookresearch/SentEval.git\n!cd $PWD/SentEval/data/downstream && bash get_transfer_data.bash > /dev/null 2>&1", "_____no_output_____" ] ], [ [ "#Execute a SentEval evaulation task\nThe following code block executes a SentEval task and output the results, choose one of the following tasks to evaluate the USE CMLM model:\n\n```\nMR\tCR\tSUBJ\tMPQA\tSST\tTREC\tMRPC\tSICK-E\n```\n\nSelect a model, params and task to run. The rapid prototyping params can be used for reducing computation time for faster result.\n\nIt typically takes 5-15 mins to complete a task with the **'rapid prototyping'** params and up to an hour with the **'slower, best performance'** params.\n\n```\nparams = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}\nparams['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,\n 'tenacity': 3, 'epoch_size': 2}\n```\n\nFor better result, use the slower **'slower, best performance'** params, computation may take up to 1 hour:\n\n```\nparams = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}\nparams['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 16,\n 'tenacity': 5, 'epoch_size': 6}\n```\n\n", "_____no_output_____" ] ], [ [ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport sys\nsys.path.append(f'{os.getcwd()}/SentEval')\n\nimport tensorflow as tf\n\n# Prevent TF from claiming all GPU memory so there is some left for pytorch.\ngpus = tf.config.list_physical_devices('GPU')\nif gpus:\n # Memory growth needs to be the same across GPUs.\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nimport tensorflow_hub as hub\nimport tensorflow_text\nimport senteval\nimport time\n\nPATH_TO_DATA = f'{os.getcwd()}/SentEval/data'\nMODEL = 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1' #@param ['https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1', 'https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1']\nPARAMS = 'rapid prototyping' #@param ['slower, best performance', 'rapid prototyping']\nTASK = 'CR' #@param ['CR','MR', 'MPQA', 'MRPC', 'SICKEntailment', 'SNLI', 'SST2', 'SUBJ', 'TREC']\n\nparams_prototyping = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}\nparams_prototyping['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,\n 'tenacity': 3, 'epoch_size': 2}\n\nparams_best = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}\nparams_best['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 16,\n 'tenacity': 5, 'epoch_size': 6}\n\nparams = params_best if PARAMS == 'slower, best performance' else params_prototyping\n\npreprocessor = hub.KerasLayer(\n \"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3\")\nencoder = hub.KerasLayer(\n \"https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1\")\n\ninputs = tf.keras.Input(shape=tf.shape(''), dtype=tf.string)\noutputs = encoder(preprocessor(inputs))\n\nmodel = tf.keras.Model(inputs=inputs, outputs=outputs)\n\ndef prepare(params, samples):\n return\n\ndef batcher(_, batch):\n batch = [' '.join(sent) if sent else '.' for sent in batch]\n return model.predict(tf.constant(batch))[\"default\"]\n\n\nse = senteval.engine.SE(params, batcher, prepare)\nprint(\"Evaluating task %s with %s parameters\" % (TASK, PARAMS))\nstart = time.time()\nresults = se.eval(TASK)\nend = time.time()\nprint('Time took on task %s : %.1f. seconds' % (TASK, end - start))\nprint(results)\n", "_____no_output_____" ] ], [ [ "#Learn More\n\n* Find more text embedding models on [TensorFlow Hub](https://tfhub.dev)\n* See also the [Multilingual Universal Sentence Encoder CMLM model](https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base-br/1)\n* Check out other [Universal Sentence Encoder models](https://tfhub.dev/google/collections/universal-sentence-encoder/1)\n\n## Reference\n\n* Ziyi Yang, Yinfei Yang, Daniel Cer, Jax Law, Eric Darve. [Universal Sentence Representations Learning with Conditional Masked Language Model. November 2020](https://openreview.net/forum?id=WDVD4lUCTzU)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af7aaf6e92ab11b4ec1d4a79f111624fa5dd5fb
245,212
ipynb
Jupyter Notebook
Untitled.ipynb
alexander-kirillov/PointRend
ce1a399f14369629abe2cc09d43529a247177559
[ "MIT" ]
208
2019-12-23T02:01:03.000Z
2022-03-01T02:18:57.000Z
Untitled.ipynb
Babaee/PointRend
bba6613a1f2de2e3b38c2d57ff616640091fc2b2
[ "MIT" ]
6
2019-12-24T09:55:07.000Z
2021-11-30T02:38:23.000Z
Untitled.ipynb
Babaee/PointRend
bba6613a1f2de2e3b38c2d57ff616640091fc2b2
[ "MIT" ]
44
2019-12-23T01:47:05.000Z
2022-03-31T13:55:23.000Z
499.413442
87,724
0.803823
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image", "_____no_output_____" ], [ "def _if_near(point, mask, nearest_neighbor):\n nn = nearest_neighbor\n w,h = mask.shape\n x,y = point\n mask = np.pad(mask,nn,'edge')\n x += nn\n y += nn\n if(w+nn>x and h+nn>y):\n x_i,y_i = int(x+0.5),int(y+0.5)\n #return True\n near = mask[x_i-nn:x_i+nn,y_i-nn:y_i+nn]\n if near.max()-near.min() != 0:\n if(x<w and y<h):\n return True\n return False\n\n\n# ***\n# *n* It's an example of 1-neighbor\n# ***\n#\n# *****\n# *****\n# **n** It's an example of 2-neighbor\n# *****\n# *****\n#\n# Did you get any of that?\n\ndef _get_edge_k_neighbor(img,k):\n '''\n I will say the idea is identical to the\n the original _is_near, but this implement save the\n temporal result and thus speed up the whole\n process by a massive margin when a big amount of\n points requires calculation.\n\n This will return a array sized (w,h), \n store the max-min value in its neighbor.\n '''\n w,h = img.shape\n padded = np.pad(img, k, 'edge')\n # this is the result image array\n res = np.zeros(img.shape)\n \n # This is the main process\n for i in range(w):\n for j in range(h):\n neighbor = padded[i:i+2*k,j:j+2*k]\n _max = neighbor.max()\n _min = neighbor.min()\n res[i-k,j-k] = (_max-_min)\n \n return res\n\ndef _new_if_near(point, edge_k_neighbor):\n x, y = point\n x, y = int(x), int(y)\n return edge_k_neighbor[x][y]>0\n \n\ndef getpoint(mask_img, k, beta, training = True, nearest_neighbor=3, new_if_near = True):\n w,h = mask_img.shape\n N = int(beta*k*w*h)\n xy_min = [0, 0]\n xy_max = [w-1, h-1]\n points = np.random.uniform(low=xy_min, high=xy_max, size=(N,2))\n #print(points)\n if(beta>1 or beta<0): \n print(\"beta should be in range [0,1]\")\n return NULL\n \n # for the training, the mask is a hard mask\n if training == True:\n if beta ==0: return points\n res = []\n if new_if_near:\n edge_k_neighbor = _get_edge_k_neighbor(mask_img,nearest_neighbor)\n for p in points:\n if _new_if_near(p,edge_k_neighbor):\n res.append(p)\n else:\n for p in points:\n if _if_near(p,mask_img,nearest_neighbor):\n res.append(p)\n\n others = int((1-beta)*k*w*h)\n not_edge_points = np.random.uniform(low=xy_min, high=xy_max, size=(others,2))\n for p in not_edge_points:\n res.append(p)\n return res\n \n # for the inference, the mask is a soft mask\n if training == False:\n res = []\n for i in range(w):\n for j in range(h):\n if mask_img[i,j] > 0:\n res.append((i,j))\n return res\n ", "_____no_output_____" ], [ "def _generate_mask(size, func = lambda x:x*x):\n w,h = size\n res = np.zeros((w,h))\n for x in range(w):\n for y in range(h):\n if y> func(x): res[x,y] = 255\n return res", "_____no_output_____" ], [ "my_mask = _generate_mask((14,14), )", "_____no_output_____" ], [ "plt.imshow(my_mask)", "_____no_output_____" ], [ "%%timeit\n#plt.imshow(my_mask,cmap=\"Purples\")\npoints = getpoint(mask_img=my_mask,k=1000,beta=0.8,nearest_neighbor=2,new_if_near=True)\n# points = list(zip(*points))\n# plt.scatter(points[1],points[0],c='black',s=4)\n", "267 ms ± 12.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ], [ "plt.imshow(my_mask,cmap=\"Purples\")\npoints = getpoint(my_mask,1,1,nearest_neighbor=2)\npoints = list(zip(*points))\nplt.scatter(points[1],points[0],c='black',s=4)\n", "_____no_output_____" ], [ "plt.imshow(my_mask,cmap=\"Purples\")\npoints = getpoint(my_mask,10,1,nearest_neighbor=2)\npoints = list(zip(*points))\nplt.scatter(points[1],points[0],c='black',s=4)", "_____no_output_____" ], [ "my_mask = np.asarray(Image.open(\"tree_mask.jpg\").resize((32,32)))\nmy_mask = my_mask[:,:,0]", "_____no_output_____" ], [ "my_mask.shape", "_____no_output_____" ], [ "plt.imshow(my_mask,cmap=\"Purples\")\npoints = getpoint(my_mask,1,1,nearest_neighbor=1)\npoints = list(zip(*points))\nplt.scatter(points[1],points[0],c='black',s=4)", "_____no_output_____" ], [ "from pointGenerate import getpoint\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np", "_____no_output_____" ], [ "resolution = 128\nsz = (resolution,resolution)\nmy_mask = np.asarray(Image.open(\"tree_mask.jpg\").resize(sz))\nmy_img = np.asarray(Image.open(\"tree.jpg\").resize(sz))\nmy_mask = my_mask[:,:,0]\n\npoints = getpoint(my_mask,0.25,0.95,nearest_neighbor=1)\npoints = list(zip(*points))\n\nplt.subplot(121)\nplt.imshow(my_mask,cmap=\"Purples\")\nplt.scatter(points[1],points[0],c='black',s=4)\nplt.title('k=0.25, beta=0.95')\n\nplt.subplot(122)\nplt.imshow(my_img,cmap=\"Purples\")\nplt.scatter(points[1],points[0],c='black',s=4)\nplt.title('k=0.25, beta=0.95')\n\nplt.savefig('resolution=128.jpg',dpi=400)\n", "_____no_output_____" ], [ "points = getpoint(my_mask,1,-0.95,nearest_neighbor=1)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "n = [196,1960,19600,196000]\nt1 = [6.11,61.8,609,6280]\nv1 = [0.122,2.92,14.3,99.9]\nt2 = [1.3,3.93,28.2,267]\nv2 = [0.0084,0.383,0.643,12.7]", "_____no_output_____" ], [ "fig, ax2 = plt.subplots(1, 1)\n\n\nax2.set_xscale(\"log\")\nax2.set_yscale(\"log\")\nax2.set_adjustable(\"datalim\")\nax2.plot(n, t1, \"o-\", label = 'original algorithm')\nax2.plot(n, t2, \"go-\", label = 'improved algorithm')\n#ax2.set_xlim(1e-1, 1e2)\n#ax2.set_ylim(1e-1, 1e3)\nplt.ylabel('Time (ms)')\nplt.xlabel('Number of points')\nax2.set_aspect(1)\nax2.set_title(\"Performance improvement\")\nplt.legend()\nplt.savefig('performance improvement.png',dpi=300)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af7bbade9e5701452db50f58036e0db1fe8afdd
126,391
ipynb
Jupyter Notebook
.ipynb_checkpoints/DC Python Tutorial 2-checkpoint.ipynb
CEE-4540/python-tutorial-juandirection
e9a75b3ac3ab745244ff7798b5f86c6f9769bb6f
[ "MIT" ]
null
null
null
.ipynb_checkpoints/DC Python Tutorial 2-checkpoint.ipynb
CEE-4540/python-tutorial-juandirection
e9a75b3ac3ab745244ff7798b5f86c6f9769bb6f
[ "MIT" ]
null
null
null
.ipynb_checkpoints/DC Python Tutorial 2-checkpoint.ipynb
CEE-4540/python-tutorial-juandirection
e9a75b3ac3ab745244ff7798b5f86c6f9769bb6f
[ "MIT" ]
null
null
null
66.000522
23,032
0.752285
[ [ [ "# DC Python Tutorial 2: 10-19\n \nHint: If you are typing a function name and want to know what the options are for completing what you are typing, just hit the tab key for a menu of options.\n\nHint: If you want to see the source code associated with a function, you can do the following\nimport inspect\ninspect.getsource(foo) \n\nWhere \"foo\" is the function that you'd like to learn about. \n\nEach cell in Jupyter is either code or markdown (select in the drop down menu above). You can learn about markdown language from the help menu. Markdown allows you to create very nicely formatted text including Latex equations.\n$$c = \\sqrt{a^2 + b^2}$$\n\nEach cell is either in edit mode (select this cell and press the enter key) or in display mode (press shift enter). Shift Enter also executes the code in the cell.\n\nWhen you open a Jupyter notebook it is convenient to go to the cell menu and select Run All so that all results are calculated and displayed.\n\nThe Python Kernel remembers all definitions (functions and variables) as they are defined based on execution of the cells in the Jupyter notebook. Thus if you fail to execute a cell, the parameters defined in that cell won't be available. Similarly, if you define a parameter and then delete that line of code, that parameter remains defined until you go to the Kernel menu and select restart. It is good practice to select Restart & Run All from the Kernel menu after completing an assignment to make sure that everything in your notebook works correctly and that you haven't deleted an essential line of code!", "_____no_output_____" ] ], [ [ "#Here we import packages that we will need for this notebook. You can find out about these packages in the Help menu.\n\n\n# although math is \"built in\" it needs to be imported so it's functions can be used.\nimport math\n\nfrom scipy import constants, interpolate\n\n#see numpy cheat sheet https://www.dataquest.io/blog/images/cheat-sheets/numpy-cheat-sheet.pdf\n#The numpy import is needed because it is renamed here as np.\nimport numpy as np\n\n#Pandas is used to import data from spreadsheets\nimport pandas as pd\n \nimport matplotlib.pyplot as plt\n\n# sys and os give us access to operating system directory paths and to sys paths.\nimport sys, os\n\n# If you place your GitHub directory in your documents folder and \n# clone both the design challenge notebook and the AguaClara_design repo, then this code should all work.\n# If you have your GitHub directory at a different location on your computer, \n# then you will need to adjust the directory path below.\n# add the path to your GitHub directory so that python can find files in other contained folders.\npath1 = '~'\npath2 = 'Documents'\npath3 = 'GitHub'\npath4 = os.path.join(path1, path2, path3)\nmyGitHubdir = os.path.expanduser(path4)\nif myGitHubdir not in sys.path:\n sys.path.append(myGitHubdir)\n\n# add imports for AguaClara code that will be needed\n# physchem has functions related to hydraulics, fractal flocs, flocculation, sedimentation, etc.\nfrom aide_design import physchem as pc\n\n# pipedatabase has functions related to pipe diameters\nfrom aide_design import pipedatabase as pipe\n\n# units allows us to include units in all of our calculations\nfrom aide_design.units import unit_registry as u\n\nfrom aide_design import utility as ut\n", "_____no_output_____" ] ], [ [ "---\n\n## Resources in getting started with Python \nHere are some basic [Python functions](http://docs.python.org/3/library/functions.html) that might be helpful to look through. ", "_____no_output_____" ], [ "## Transitioning From Matlab To Python\n\n**Indentation** - When writing functions or using statements, Python recognizes code blocks from the way they are indented. A code block is a group of statements that, together, perform a task. A block begins with a header that is followed by one or more statements that are indented with respect to the header. The indentation indicates to the Python interpreter, and to programmers that are reading the code, that the indented statements and the preceding header form a code block.\n\n**Suppressing Statements** - Unlike Matlab, you do not need a semi-colon to suppress a statement in Python;\n\n**Indexing** - Matlab starts at index 1 whereas Python starts at index 0. \n\n**Functions** - In Matlab, functions are written by invoking the keyword \"function\", the return parameter(s), the equal to sign, the function name and the input parameters. A function is terminated with \"end\". \n\n`function y = average(x)\nif ~isvector(x)\n error('Input must be a vector')\nend\ny = sum(x)/length(x); \nend`\n\nIn Python, functions can be written by using the keyword \"def\", followed by the function name and then the input parameters in paranthesis followed by a colon. A function is terminated with \"return\". \n\n`def average(x):\n if ~isvector(x)\n raise VocationError(\"Input must be a vector\")\n return sum(x)/length(x); `\n \n**Statements** - for loops and if statements do not require the keyword \"end\" in Python. The loop header in Matlab varies from that of Python. Check examples below:\n\nMatlab code\n\n`s = 10; \nH = zeros(s); \nfor c = 1:s \n for r = 1:s \n H(r,c) = 1/(r+c-1); \n end\nend`\n\nPython code\n\n`s = 10 \nH = []\nfor (r in range(s)):\n for (c in range(s)):\n H[r][c].append(1/(r+c-1)`\n \n \n**Printing** - Use \"print()\" in Python instead of \"disp\" in Matlab.\n\n**Helpful Documents**\n\n[Numpy for Matlab Users](https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html)\n\n[Stepping from Matlab to Python](http://stsievert.com/blog/2015/09/01/matlab-to-python/)\n\n[Python for Matlab Users, UC Boulder](http://researchcomputing.github.io/meetup_fall_2014/pdfs/fall2014_meetup13_python_matlab.pdf)", "_____no_output_____" ], [ "---\n\n## Arrays and Lists\n\nPython has no native array type. Instead, it has lists, which are defined using [ ]:", "_____no_output_____" ] ], [ [ "a = [0,1,2,3]", "_____no_output_____" ] ], [ [ "Python has a number of helpful commands to modify lists, and you can read more about them [here](https://docs.python.org/2/tutorial/datastructures.html).", "_____no_output_____" ], [ "In order to use lists as arrays, numpy (numpy provides tools for working with **num**bers in **py**thon) provides an array data type that is defined using ( ). ", "_____no_output_____" ] ], [ [ "a_array = np.array(a)", "_____no_output_____" ], [ "a_array", "_____no_output_____" ] ], [ [ "Pint, which adds unit capabilities to Python, (see section on units below) is compatible with NumPy, so it is possible to add units to arrays and perform certain calculations with these arrays. We recommend using NumPy arrays rather than lists because NumPy arrays can handle units. Additionally, use functions from NumPy if possible instead of function from the math package when possible because the math package does not yet handle units. Units are added by multiplying the number by the unit raised to the appropriate power. The pint unit registry was imported above as \"u\" and thus the units for milliliters are defined as u.mL.", "_____no_output_____" ] ], [ [ "a_array_units = a_array * u.m", "_____no_output_____" ], [ "a_array_units", "_____no_output_____" ] ], [ [ "In order to make a 2D array, you can use the same [NumPy array command](https://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html).", "_____no_output_____" ] ], [ [ "b = np.array([[0,1,2],[3,4,5],[6,7,8]])*u.mL\nb", "_____no_output_____" ] ], [ [ "Indexing is done by row and then by column. To call all of the elements in a row or column, use a colon. As you can see in the following example, indexing in python begins at zero. So `b[:,1]` is calling all rows in the second column", "_____no_output_____" ] ], [ [ "b[:,1]", "_____no_output_____" ] ], [ [ "If you want a specific range of values in an array, you can also use a colon to slice the array, with the number before the colon being the index of the first element, and the number after the colon being **one greater** than the index of the last element.", "_____no_output_____" ] ], [ [ "b[1:3,0]", "_____no_output_____" ] ], [ [ "For lists and 1D arrays, the `len()` command can be used to determine the length. Note that the length is NOT equal to the index of the last element because the indexes are zero based. The len function can be used with lists and arrays. For multiple dimension arrays the `len()` command returns the length of the first dimension.", "_____no_output_____" ] ], [ [ "len(a)", "_____no_output_____" ], [ "len(b)", "_____no_output_____" ] ], [ [ "For any higher dimension of array, `numpy.size()` can be used to find the total number of elements and `numpy.shape()` can be used to learn the dimensions of the array.", "_____no_output_____" ] ], [ [ "np.size(b)", "_____no_output_____" ], [ "np.shape(b)", "_____no_output_____" ] ], [ [ "For a listing of the commands you can use to manipulate numpy arrays, refer to the [scipy documentation](https://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html).", "_____no_output_____" ], [ "Sometimes, it is helpful to have an array of elements that range from zero to a specified number. This can be useful, for example, in creating a graph. To create an array of this type, use [numpy.arange](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html).", "_____no_output_____" ] ], [ [ "crange = np.arange(10)", "_____no_output_____" ], [ "crange", "_____no_output_____" ], [ "cdetailedrange = np.arange(5,10,0.1)", "_____no_output_____" ], [ "cdetailedrange", "_____no_output_____" ] ], [ [ "---\n\n## Units\n\nUnits are essential to engineering calculations. Units provide a quick check on all of our calculations to help reduce the number of errors in our analysis. Getting the right dimensions back from a calculation doesn't prove that the answer is correct, but getting the wrong dimensions back does prove that the answer is wrong! Unit errors from incorrect conversions are common when using apps that don't calculate with units. Engineering design work should always include units in the calculations. \n\nWe use the [pint package](https://pint.readthedocs.io/) to add unit capabilities to our calculations in Python. We have imported the `pint.UnitRegistry` as 'u' and thus all of pint's units can be used by placing a 'u.' in front of the unit name. Meters are `u.m`, seconds are `u.s`, etc. Most units are simple values that can be used just like other terms in algebraic equations. The exception to this are units that have an offset. For example, in the equation PV=nRT, temperature must be given with units that have value of zero at absolute zero. We would like to be able to enter 20 degC into that equation and have it handle the units correctly. But you can't convert from degC to Kelvin by simply multiplying by a conversion factor. Thus for temperature the units have to be handled in a special way.\n\nTemperatures require use of the u.Quantity function to enter the value and the units of temperature separated by a ',' rather than by a multiplication symbol. This is because it doesn't make sense to multiply by a temperature unit because temperatures (that aren't absolute temperatures) have both a slope and a nonzero intercept.\n\nYou can find [constants that are defined in pint](https://github.com/hgrecco/pint/blob/master/pint/constants_en.txt) at the github page for pint.\n\nBelow is a simple calculation illustrating the use of units to calculate the flow through a vertical pipe given a velocity and an inner diameter. We will illustrate how to calculate pipe diameters further ahead in the tutorial.", "_____no_output_____" ] ], [ [ "V_up = 1*u.mm/u.s\nD_reactor = 1*u.inch\nA_reactor = pc.area_circle(D_reactor)\nQ_reactor = V_up*A_reactor\nQ_reactor", "_____no_output_____" ] ], [ [ "The result isn't formatted very nicely. We can select the units we'd like to display by using the `.to` method.", "_____no_output_____" ] ], [ [ "Q_reactor.to(u.mL/u.s)", "_____no_output_____" ] ], [ [ "We can also force the display to be in the metric base units", "_____no_output_____" ] ], [ [ "Q_reactor.to_base_units()", "_____no_output_____" ] ], [ [ "If you need to strip units from a quantity (for example, for calculations using funtions that don't support units) you can use the `.magnitude` method. It is important that you force the quantity to be in the correct units before stripping the units.", "_____no_output_____" ] ], [ [ "Q_reactor.to(u.mL/u.s).magnitude", "_____no_output_____" ] ], [ [ "### Significant digits\nPython will happily display results with 17 digits of precision. We'd like to display a reasonable number of significant digits so that we don't get distracted with 14 digits of useless information. We created a [sig function in the AguaClara_design repository](https://github.com/AguaClara/AguaClara_design/blob/master/utility.py) that allows you to specify the number of significant digits to display. You can couple this with the print function to create a well formatted solution to a calculation. The sig function also displays the accompanying units. \n\nThe sig function call is `ut.sig(value, sigfig)`. ", "_____no_output_____" ], [ "### Example problem and solution.\nCalculate the number of moles of methane in a 20 L container at 15 psi above atmospheric pressure with a temperature of 30 C.", "_____no_output_____" ] ], [ [ "# First assign the values given in the problem to variables.\nP = 15 * u.psi + 1 * u.atm\nT = u.Quantity(30,u.degC)\nV = 20 * u.L\n# Use the equation PV=nRT and solve for n, the number of moles.\n# The universal gas constant is available in pint.\nnmolesmethane = (P*V/(u.R*T.to(u.kelvin))).to_base_units()\nprint('There are '+ut.sig(nmolesmethane,3)+' of methane in the container.')\nnmolesmethane", "There are 1.62 mol of methane in the container.\n" ] ], [ [ "---\n\n## Functions \n\nWhen it becomes necessary to do the same calculation multiple times, it is useful to create a function to facilitate the calculation in the future.\n\n- Function blocks begin with the keyword def followed by the function name and parentheses ( ).\n- Any input parameters or arguments should be placed within these parentheses. \n- The code block within every function starts with a colon (:) and is indented.\n- The statement return [expression] exits a function and returns an expression to the user. A return statement with no arguments is the same as return None.\n- (Optional) The first statement of a function can the documentation string of the function or docstring, writeen with apostrophes ' '.\n\nBelow is an example of a function that takes three inputs, pressure, volume, and temperature, and returns the number of moles. ", "_____no_output_____" ] ], [ [ "# Creating a function is easy in Python\ndef nmoles(P,V,T):\n return (P*V/(u.R*T.to(u.kelvin))).to_base_units()", "_____no_output_____" ] ], [ [ "Try using the new function to solve the same problem as above. You can reuse the variables. You can use the new function call inside the print statement.", "_____no_output_____" ] ], [ [ "print('There are '+ut.sig(nmoles(P,V,T),3)+' of methane in the container.')", "There are 1.62 mol of methane in the container.\n" ] ], [ [ "---\n\n## Density Function\nWe will create and graph functions describing density and viscosity of water as a function of temperature. We will use the [scipy 1D interpolate function](https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#d-interpolation-interp1d) to create smooth interpolation between the known data points to generate a smooth function.", "_____no_output_____" ], [ "`density_water`, defined in [`physchem`](https://github.com/AguaClara/AguaClara_design/blob/master/physchem.py), is a function that returns a fluid's density at a given temperature. It has one input parameter, temperature (in Celsius).", "_____no_output_____" ] ], [ [ "# Here is an example of how you could define the function yourself if you chose.\n\n# Below are corresponding arrays of temperature and water density with appropriate units attached.\n\n# The 1d interpolation function will use a cubic spline.\nTarray = u.Quantity([0,5,10,20,30,40,50,60,70,80,90,100],u.degC)\nrhoarray = [999.9,1000,999.7,998.2,995.7,992.2,988.1,983.2,977.8,971.8,965.3,958.4]*u.kg/u.m**3\ndef DensityWater(T):\n \n rhointerpolated=interpolate.interp1d(Tarray, rhoarray, kind='cubic')\n rho=rhointerpolated(T.to(u.degC))\n return rho*u.kg/u.m**3\n\n# You can get the density of water for any temperature using this function call.\nprint('The density of water at '+ut.sig(u.Quantity(20,u.degC),3) +' is '+ut.sig(DensityWater(u.Quantity(20,u.degC)),4)+'.')", "The density of water at 20.0 celsius is 998.2 kg/m³.\n" ] ], [ [ "---\n\n## Pipe Database\n\nThe [`pipedatabase`](https://github.com/AguaClara/AguaClara_design/blob/master/pipedatabase.py) file in the `AguaClara_design` has many useful functions concerning pipe sizing. It provides functions that calculate actual pipe inner and outer diameters given the nominal diameter of the pipe. Note that nominal diameter just means the diameter that it is called (hence the discriptor \"nominal\") and thus a 1 inch nominal diameter pipe might not have any dimensions that are actually 1 inch!", "_____no_output_____" ] ], [ [ "# The OD function in pipedatabase returns the outer diameter of a pipe given the nominal diameter, ND. \npipe.OD(6*u.inch)", "_____no_output_____" ] ], [ [ "The ND_SDR_available function returns the nominal diameter of a pipe that has an inner diameter equal to or greater than the requested inner diameter [SDR, standard diameter ratio](http://www.engineeringtoolbox.com/sdr-standard-dimension-ratio-d_318.html). Below we find the smallest available pipe that has an inner diameter of at least 7 cm", "_____no_output_____" ] ], [ [ "IDmin = 7 * u.cm\nSDR = 26\nND_my_pipe = pipe.ND_SDR_available(IDmin,SDR)\nND_my_pipe ", "_____no_output_____" ] ], [ [ "The actual inner diameter of this pipe is", "_____no_output_____" ] ], [ [ "ID_my_pipe = pipe.ID_SDR(ND_my_pipe,SDR)\nprint(ut.sig(ID_my_pipe.to(u.cm),2))", "8.2 cm\n" ] ], [ [ "We can display the available nominal pipe sizes that are in our database.", "_____no_output_____" ] ], [ [ "pipe.ND_all_available()", "_____no_output_____" ] ], [ [ "---\n\n## Physchem\nThe 'AguaClara_design' [physchem](https://github.com/AguaClara/AguaClara_design/blob/master/physchem.py) has many useful fluids functions including Reynolds number, head loss equation, orifice equations, viscosity etc. ", "_____no_output_____" ], [ "---\n\n## Viscosity Functions", "_____no_output_____" ] ], [ [ "#Define the temperature of the fluid so that we can calculate the kinematic viscosity\ntemperature = u.Quantity(20,u.degC)\n#Calculate the kinematic viscosity using the function in physchem which we access using \"pc\"\nnu=pc.viscosity_kinematic(temperature)\nprint('The kinematic viscosity of water at '+ut.sig(temperature,2)+' is '+ut.sig(nu,3))", "The kinematic viscosity of water at 20 celsius is 1.00e-6 m²/s\n" ] ], [ [ "---\n\n\n## Our First Graph!\n\nWe will use [matplotlib](https://matplotlib.org/) to create a graph of water density as a function of temperature. [Here](https://matplotlib.org/users/pyplot_tutorial.html) is a quick tutorial on graphing. ", "_____no_output_____" ] ], [ [ "# Create a list of 100 numbers between 0 and 100 and then assign the units of degC to the array. \n# This array will be the x values of the graph.\n\nGraphTarray = u.Quantity(np.arange(100),u.degC)\n\n#Note the use of the .to method below to display the results in a particular set of units.\nplt.plot(GraphTarray, pc.viscosity_kinematic(GraphTarray).to(u.mm**2/u.s), '-')\nplt.xlabel('Temperature (degrees Celcius)')\nplt.ylabel('Viscosity (mm^2/s)')\nplt.show()", "_____no_output_____" ] ], [ [ "### Reynolds number\nWe will use the physchem functions to calculate the Reynolds number for flow through a pipe.", "_____no_output_____" ] ], [ [ "Q = 5*u.L/u.s\nD = pipe.ID_SDR(4*u.inch,26)\n\nReynolds_pipe = pc.re_pipe(Q,D,nu)\nReynolds_pipe", "_____no_output_____" ] ], [ [ "Now use the sig function to display calulated values to a user specified number of significant figures. ", "_____no_output_____" ] ], [ [ "print('The Reynolds number is '+ut.sig(pc.re_pipe(Q,D,nu),3))", "The Reynolds number is 6.01e+4\n" ] ], [ [ "Here is a table of a few of the equations describing pipe flow and their physchem function counterparts. ", "_____no_output_____" ], [ "## Assorted Fluids Functions\n\n| Equation Name | Equation | Physchem function |\n|---------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------:|\n| Reynolds Number | $Re= \\frac{{4Q}}{{\\pi D\\nu }}$ | `re_pipe(FlowRate, Diam, Nu)` |\n| Swamee-Jain Turbulent Friction factor | ${\\rm{f}} = \\frac{{0.25}}{{{{\\left[ {\\log \\left( {\\frac{\\varepsilon }{{3.7D}} + \\frac{{5.74}}{{{{{\\mathop{\\rm Re}\\nolimits} }^{0.9}}}}} \\right)} \\right]}^2}}}$ | `fric(FlowRate, Diam, Nu, PipeRough)` |\n| Laminar Friction factor | ${\\rm{f}} = \\frac{64}{Re}$ | |\n| Hagen Pousille laminar flow head loss | ${h_{\\rm{f}}} = \\frac{{32\\mu LV}}{{\\rho g{D^2}}} = \\frac{{128\\mu LQ}}{{\\rho g\\pi {D^4}}}$ | |\n| Darcy Weisbach head loss | ${h_{\\rm{f}}} = {\\rm{f}}\\frac{8}{{g{\\pi ^2}}}\\frac{{L{Q^2}}}{{{D^5}}}$ | `headloss_fric(FlowRate, Diam, Length, Nu, PipeRough)` |\n| Swamee-Jain equation for diameter | $0.66\\left ( \\varepsilon ^{1.25}\\left ( \\frac{LQ^{2}}{gh_{f}} \\right )^{4.75}+\\nu Q^{9.4}\\left ( \\frac{L}{gh_{f}} \\right )^{5.2} \\right )^{0.04}$| `diam_swamee(FlowRate, HeadLossFric, Length, Nu, PipeRough)` |", "_____no_output_____" ] ], [ [ "# create a plot that shows both the original data values (plotted as points) \n# and the smooth curve that shows the density function.\n# Note that Tarray and rhoarray were defined much earlier in this tutorial.\n\n#We will plot the data points using circles 'o' and the smooth function using a line '-'.\n\nplt.plot(Tarray, rhoarray, 'o', GraphTarray, (DensityWater(GraphTarray)), '-')\n# For an x axis log scale use plt.semilogx(Tarray, rhoarray, 'o', xnew, f2(xnew), '-')\n# For a y axis log scale use plt.semilogy(Tarray, rhoarray, 'o', xnew, f2(xnew), '-')\n# For both axis log scale use plt.loglog(Tarray, rhoarray, 'o', xnew, f2(xnew), '-')\n\n\n#Below we create the legend and axis labels\nplt.legend(['data', 'cubic'], loc='best')\nplt.xlabel('Temperature (degrees Celcius)', fontsize=20)\nplt.ylabel('Density (kg/m^3)', fontsize=20)\n\n\n#Now we show the graph and we are done!\nplt.show() ", "_____no_output_____" ] ], [ [ "# Design Challenge 1, learning Python, Jupyter, and some AguaClara Design Functions", "_____no_output_____" ], [ "### 1) \nCalculate the minimum inner diameter of a PVC pipe that can carry a flow of at least 10 L/s for the town of Ojojona. The population is 4000 people. The water source is a dam with a surface elevation of 1500 m. The pipeline connects the reservoir to the discharge into a distribution tank at an elevation of 1440 m. The pipeline length is 2.5 km. The pipeline is made with PVC pipe with an SDR (standard diameter ratio) of 26.\n\nThe pipeline inlet at the dam is a square edge with a minor loss coefficient (${K_e}$) of 0.5. The discharge at the top of the distribution tank results in a loss of all of the kinetic energy and thus the exit minor loss coefficient is 1. See the minor loss equation below.\n\n${h_e} = {K_e}\\frac{{{V^2}}}{{2g}}$\n\nThe water temperature ranges from 10 to 30 Celsius. The roughness of a PVC pipe is approximately 0.1 mm. Use the fluids functions to calculate the minimum inner pipe diameter to carry this flow from the dam to the distribution tank.\n\nReport the following \n* critical design temperature\n* kinematic viscosity (maximum viscosity will occur at the lowest temperature)\n* the minimum inner pipe diameter (in mm). \nUse complete sentences to report the results and use 2 significant digits (use the sig function).", "_____no_output_____" ] ], [ [ "SDR = 26\nQ = 10 * u.L/u.s\ndelta_elevation = 1500 * u.m - 1440 * u.m\nL_pipe = 2.5 * u.km\n# am using 0 minor losses because pipe diameter function fails if not zero.\nK_minor = 1.5\n# The maximum viscosity will occur at the lowest temperature.\nT_crit = u.Quantity(10,u.degC)\nnu = pc.viscosity_kinematic(T_crit)\ne = 0.1 * u.mm\npipeline_ID_min = pc.diam_pipe(Q,delta_elevation,L_pipe,nu,e,K_minor)\nprint('The critical water temperature for this design is '+ str(T_crit)+'.')\nprint('The kinematic viscosity of water is '+ut.sig(nu,2)+'.')\nprint('The minimum pipe inner diameter is '+ ut.sig(pipeline_ID_min.to(u.mm),2)+'.')", "The critical water temperature for this design is 10 degC.\nThe kinematic viscosity of water is 1.3e-6 m²/s.\nThe minimum pipe inner diameter is 97 mm.\n" ] ], [ [ "### 2)\nFind the nominal diameter of a PVC pipe that is SDR 26. SDR means standard diameter ratio. The thickness of the pipe wall is 1/SDR of the outside diameter. The pipedatabase file has a useful function that returns nominal diameter given SDR and inner diameter. ", "_____no_output_____" ] ], [ [ "pipeline_ND = pipe.ND_SDR_available(pipeline_ID_min,SDR)\nprint('The nominal diameter of the pipeline is '+ut.sig(pipeline_ND,2)+' ('+ut.sig(pipeline_ND.to(u.mm),2)+').')", "The nominal diameter of the pipeline is 4.0 in (1.0e+2 mm).\n" ] ], [ [ "### 3) \nWhat is the actual inner diameter of this pipe in mm? Compare this with the [reported inner diameter for SDR-26 pipe](http://www.cresline.com/pdf/cresline-northwest/pvcpressupipeline_Re/CNWPVC-26.pdf) to see if our pipe database is reporting the correct value.", "_____no_output_____" ] ], [ [ "pipeline_ID = pipe.ID_SDR(pipeline_ND,SDR)\ncresline_ID = 4.154*u.inch\nprint('The inner diameter of the pipe is '+ut.sig(pipeline_ID.to(u.mm),3)+'.')\nprint('Cresline reports the inner diameter is '+ut.sig(cresline_ID.to(u.mm),3)+'.')", "The inner diameter of the pipe is 106 mm.\nCresline reports the inner diameter is 106 mm.\n" ] ], [ [ "### 4) \nWhat is the maximum flow rate that can be carried by this pipe at the coldest design temperature?\nDisplay the flow rate in L/s using the .to method.", "_____no_output_____" ] ], [ [ "pipeline_Q_max = pc.flow_pipe(pipeline_ID,delta_elevation,L_pipe,nu,e,K_minor)\nprint('The maximum flow rate at '+ut.sig(T_crit,2)+' is '+ut.sig(pipeline_Q_max.to(u.L/u.s),4)+'.')", "The maximum flow rate at 10 celsius is 13.24 l/s.\n" ] ], [ [ "### 5) \nWhat is the Reynolds number and friction factor for this maximum flow? Assign these values to variable names so you can plot them later on the Moody diagram.", "_____no_output_____" ] ], [ [ "pipeline_Re = pc.re_pipe(pipeline_Q_max,pipeline_ID,nu)\nfPipe = pc.fric(pipeline_Q_max,pipeline_ID,nu,e)\nprint('The Reynolds number and friction factor for the pipeline flow are '+ut.sig(pipeline_Re,2)+' and '+ut.sig(fPipe,2)+' respectively.')", "The Reynolds number and friction factor for the pipeline flow are 1.2e+5 and 0.022 respectively.\n" ] ], [ [ "### 6) \nCheck to see if the fluids functions are internally consistent by calculating the head loss given the flow rate that you calculated and comparing that head loss with the elevation difference. Display enough significant digits to see the difference in the two values. Note that the Moody diagram has an accuracy of about ±5% for smooth pipes and ±10% for rough pipes [Moody, 1944](http://user.engineering.uiowa.edu/~me_160/lecture_notes/MoodyLFpaper1944.pdf).", "_____no_output_____" ] ], [ [ "HLCheck = pc.headloss(pipeline_Q_max,pipeline_ID,L_pipe,nu,e,K_minor)\nprint('The head loss is '+ut.sig(HLCheck,3)+' and that is close to the elevation difference of '+ut.sig(delta_elevation,3)+'.')", "The head loss is 60.5 m and that is close to the elevation difference of 60.0 m.\n" ] ], [ [ "### 7) \nHow much more water (both volumetric and mass rate) will flow through the pipe at the maximum water temperature of 30 C? Take into account both the change in viscosity (changes the flow rate) and the change in density (changes the mass rate). Report the flow rates in L/s.", "_____no_output_____" ] ], [ [ "Tmax = u.Quantity(30,u.degC)\nnuhot = pc.viscosity_kinematic(Tmax)\npipeline_Q_maxhot = pc.flow_pipe(pipeline_ID,delta_elevation,L_pipe,nuhot,e,K_minor)\nQDelta = pipeline_Q_maxhot-pipeline_Q_max\nMassFlowDelta = (pipeline_Q_maxhot*DensityWater(Tmax)-pipeline_Q_max*DensityWater(T_crit)).to_base_units()\nprint('The increase in flow rate at '+ut.sig(Tmax,2)+' is '+ut.sig(QDelta.to(u.L/u.s),2)+'.')\nprint('The increase in mass rate at '+ut.sig(Tmax,2)+' is '+ut.sig(MassFlowDelta,2)+'.')", "The increase in flow rate at 30 celsius is 0.24 l/s.\nThe increase in mass rate at 30 celsius is 0.19 kg/s.\n" ] ], [ [ "### 8)\nWhy is the flow increase due to this temperature change so small given that viscosity actually changed significantly (see the calculation below)?", "_____no_output_____" ] ], [ [ "print('The viscosity ratio for the two temperatures was '+ut.sig(pc.viscosity_kinematic(Tmax)/pc.viscosity_kinematic(T_crit),2)+'.')", "The viscosity ratio for the two temperatures was 0.62.\n" ] ], [ [ "The flow is turbulent and thus viscosity has little influence on the flow rate.", "_____no_output_____" ], [ "### 9)\nSuppose an AguaClara plant is designed to be built up the hill from the distribution tank. The transmission line will need to be lengthened by 30 m and the elevation of the inlet to the entrance tank will be 1450 m. The rerouting will also require the addition of 3 elbows with a minor loss coefficient of 0.3 each. What is the new maximum flow from the water source?", "_____no_output_____" ] ], [ [ "delta_elevationnew = 1500*u.m - 1450*u.m\nL_pipenew = 2.5*u.km + 30*u.m\nKnew = 1.5+3*0.3\npipeline_Q_maxnew = pc.flow_pipe(pipeline_ID,delta_elevationnew,L_pipenew,nu,e,Knew)\nprint('The new maximum flow rate at '+ut.sig(T_crit,2)+' is '+ut.sig(pipeline_Q_maxnew.to(u.L/u.s),4)+'.')", "The new maximum flow rate at 10 celsius is 11.95 l/s.\n" ] ], [ [ "### 10)\nHow much less water will flow through the transmission line after the line is rerouted?", "_____no_output_____" ] ], [ [ "print('The reduction in flow is '+ut.sig((pipeline_Q_max-pipeline_Q_maxnew).to(u.L/u.s),2)+'.')", "The reduction in flow is 1.3 l/s.\n" ] ], [ [ "<div class=\"alert alert-block alert-danger\">\n\nWe noticed that many of you are having some difficulty with naming convention and syntax.\n\n\nPlease refer to the following for Github [Standards Page](https://github.com/AguaClara/aide_design/wiki/Standards) for naming standards. \n\nAdditionally, here is a Github [Variable Naming Guide](https://github.com/AguaClara/aide_design/wiki/Variable-Naming) that will be useful for creating variable names. ", "_____no_output_____" ], [ "### 11)\nThere exists a function within the physchem file called `pc.fric(FlowRate, Diam, Nu, PipeRough)` that returns the friction factor for both laminar and turbulent flow. In this problem, you will be creating a new function which you shall call `fofRe()` that takes the Reynolds number and the dimensionless pipe roughness (ε/D) as inputs.\n\nRecall that the format for defining a function is \n\n`def fofRe(input1, input2):\n f = buncha stuff\n return f`\n\nSince the equation for calculating the friction factor is different for laminar and turbulent flow (with the transition Reynolds number being defined within the physchem file), you will need to use an `if, else` statement for the two conditions. The two friction factor equations are given in the **Assorted Fluids Functions** table. ", "_____no_output_____" ], [ "### 12) \n\nCreate a beautiful Moody diagram. Include axes labels and show a legend that clearly describes each plot. The result should look like the picture of the graph below.![](Moody.png)", "_____no_output_____" ], [ "### 12a)\nYou will be creating a Moody diagram showing Reynolds number vs friction factor for multiple dimensionless pipe roughnesses. The first step to do this is to define the number of dimensionless pipe roughnesses you want to plot. We will plot 8 curves for the following values: 0, 0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1. We will plot an additional curve, which will be a straight line, for laminar flow, since it is not dependent on the pipe roughness value (see the Moody diagram above).\n\n* Create an array for the dimensionless pipe roughness values, using `np.array([])`.\n* Specify the amount of data points you want to plot for each curve. We will be using 50 points.\n\nBecause the Moody diagram is a log-log plot, we need to ensure that all 50 points on the diagram we are creating are equally spaced in log-space. Use the `np.logspace(input1, input2, input3)` function to create an array for turbulent Reynolds numbers and an array for laminar Reynolds numbers.\n* `input1` is the exponent for the lower bound of the range. For example, if you want your lower bound to be 1000, your input should be `math.log10(1000)` which is equal to 3.\n* `input2` is the exponent for the upper bound of the range. Format this input as you have formatted `input1`.\n* `input3` is the number of data points you are using for each curve.\n\n**12a) Deliverables**\n* Array of dimentionless pipe roughnesses. Call this array `eGraph`.\n* Variable defining the amount of points on each pipe roughness curve\n* Two arrays created using `np.logspace` which for turbulent and laminar Reynolds numbers, which will be the x-axis values for the Moody diagram\n\nNote: The bounds for the laminar Reynolds numbers array should span between 670 and the predefined transition number used in Problem 11. The bounds for the turbulent Reynolds numbers array should span between 3,500 and 100,000,000. These ranges are chosen to make the curves fit well within the graph and to intentionally omit data in the transition range between laminar and turbulent flows.", "_____no_output_____" ], [ "### 12b)\n\nNow you will create the y-axis values for turbulent flow (based on dimensionless pipe roughness) and laminar flow (not based on dimensionless pipe roughness). To do this, you will use the `fofRe()` function you wrote in Problem 11 to find the friction factors. \n\nBegin by creating an empty 2-dimensional array that will be populated by the turbulent-flow friction factors for each dimensionless pipe roughness. Use `np.zeros(number of rows, number of columns)`. The number of rows should be the number of dimensionless pipe roughness values (`len(eGraph)`), while the number of columns should be the number of data points per curve as defined above.\n\nPopulating this array with friction factor values will require two `for` loops, one to iterate through rows and one to iterate through columns. Recall that `for` loop syntax is as follows:\n\n`example = np.zeros((40, 30))\nfor i in range(0, 40):\n for j in range(0, 30):\n example[i,j] = function(buncha[i],stuff[j])`\n \nwhere `buncha` and `stuff` are arrays.\n\nYou will repeat this process to find the friction factors for laminar flow. The only difference between the turbulent and laminar friction flow arrays will be that the laminar array will only have one dimension since it does not affected by the dimensionless pipe roughness. Start by creating an empty 1-dimensional array and then use a single `for` loop.\n \n**12b) Deliverables**\n* One 2-D array containing friction factor values for each dimensionless pipe roughness for turbulent flow.\n* One 1-D array containing friction factor values for laminar flow.", "_____no_output_____" ], [ "### 12c)\n\nNow, we are ready to start making the Moody diagram!!!!!1!!! The plot formatting is included for you in the cell below. You will add to this cell the code that will actually plot the arrays you brought into existence in 12a) and 12b) with a legend. For the sake of your own sanity, please only add code where specified.\n\n* First, plot your arrays. See the plots in the tutorial above for the syntax. Recall that each dimensionless pipe roughness is a separate row within the 2-D array you created. To plot these roughnesses as separate curves, use a `for` loop to iterate through the rows of your array. To plot all columns in a particular row, use the `[1,:]` call on an array, where 1 is the row you are calling.\n\n\n* Plotting the laminar flow curve does not require a `for` loop because it is a 1-D array.\n * Use a linewidth of 4 for all curves.\n\n\n\n* Now plot the data point you calculated in DC Python Tutorial 1, conveniently located a few problems above this one. Use the Reynolds number and friction factor obtained in Problem 5. Because this is a single point, it should be plotted as a circle instead of a line. Because a line composed of a single point does not exist.\n\n\n* You will need to make a legend for the graph using `leg = plt.legend(stringarray, loc = 'best')`\n * The first input, `stringarray`, must be an array composed of strings instead of numbers. The array you created which contains the dimensionless pipe roughness values (`eGraph`) can be converted into a string array for your legend (`eGraph.astype('str'))`. You will need to add 'Laminar' and 'Pipeline' as strings to the new ` eGraph ` string array. Perhaps you will find `np.append(basestring, [('string1','string2')])` to be useful ;)\n \n", "_____no_output_____" ] ], [ [ "#Set the size of the figure to make it big!\nplt.figure('ax',(10,8))\n\n\n#--------------------------------------------------------------------------------------\n#---------------------WRITE CODE BELOW-------------------------------------------------\n#--------------------------------------------------------------------------------------\n\n\n\n\n\n\n\n\n\n#--------------------------------------------------------------------------------------\n#---------------------WRITE CODE ABOVE-------------------------------------------------\n#--------------------------------------------------------------------------------------\n\n#LOOK AT ALL THIS COOL CODE!\nplt.yscale('log')\nplt.xscale('log')\nplt.grid(b=True, which='major', color='k', linestyle='-', linewidth=0.5)\n\n#Set the grayscale of the minor gridlines. Note that 1 is white and 0 is black.\nplt.grid(b=True, which='minor', color='0.5', linestyle='-', linewidth=0.5)\n\n#The next 2 lines of code are used to set the transparency of the legend to 1. \n#The default legend setting was transparent and was cluttered.\n\n\nplt.xlabel('Reynolds number', fontsize=30)\nplt.ylabel('Friction factor', fontsize=30)\n\nplt.show() ", "_____no_output_____" ] ], [ [ "### 13) \nResearchers in the AguaClara laboratory collected the following head loss data through a 1/8\" diameter tube that was 2 m long using water at 22°C. The data is in a comma separated data (.csv) file named ['Head_loss_vs_Flow_dosing_tube_data.csv'](https://github.com/AguaClara/CEE4540_DC/blob/master/Head_loss_vs_Flow_dosing_tube_data.csv). Use the pandas read csv function (`pd.read_csv('filename.csv')`) to read the data file. Display the data so you can see how it is formatted.", "_____no_output_____" ], [ "### 14)\nUsing the data table from Problem 13, assign the head loss **and flow rate** data to separate 1-D arrays. Attach the correct units. `np.array` can extract the data by simply inputting the text string of the column header. Here is example code to create the first array:\n\n`HL_data=np.array(head_loss_data['Head loss (m)'])*u.m`\n\nIn the example, `head_loss_data` is the variable name to which the csv file was assigned.", "_____no_output_____" ], [ "### 15)\nCalculate and report the maximum and minimum Reynolds number for this data set. Use the tube and temperature parameters specified in Problem 13. Use the `min` and `max` functions which take arrays as their inputs.", "_____no_output_____" ], [ "### 16)\nYou will now create a graph of headloss vs flow for the tube mentioned in the previous problems. This graph will have two sets of data: the real data contained within the csv file and some theoretical data. The theoretical data is what we would expect the headloss through the tube to be in an ideal world for any given flow. When calculating the theoretical headloss, assume that minor losses are negligible. Plot the data from the csv file as individual data points and the theoretical headloss as a continuous curve. Make the y-axis have units of cm and the x-axis have units of mL/s. \n\nA few hints.\n* To find the theoretical headloss, you will first need to create an array of different flow values. While you could use the values in the csv file that you extracted in Problem 14, we would instead like you to create an array of 50 equally-spaced flow values. These values shall be between the minimum and maximum flows in the csv file.\n* You can use the `np.linspace(input1, input2, input3)` function to create this set of equally-spaced flows. Inputs for `np.linspace` are the same as they were for `np.logspace`, which was used in Problem 12a). Linspace does not work with units; you will need to remove the units (using `.magnitude`) from the inputs to `np.logspace` and then reattach the correct units of flow after creating the array.\n* The `pc.headloss_fric` function can handle arrays as inputs, so that makes it easy to produce the theoretical headloss array once you have finished your equally-spaced flow array.\n* When using `plt.plot`, make sure to convert the flow and headloss data to the desired units.\n\n", "_____no_output_____" ], [ "The theoretical model doesn't fit the data very well. We assumed that major losses dominated. But that assumption was wrong. So let's try a more sophisticated approach where we fit minor losses to the data. Below we demonstrate the use of the [scipy curve_fit method](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html#scipy.optimize.curve_fit) to fit the minor loss coefficient given this data set. In this example, `Q_data` is the flow rate array for the csv file from problem 13. You should re-name this variable below to whatever you titled this variable.", "_____no_output_____" ] ], [ [ "from scipy.optimize import curve_fit\n\n# Define a new function that calculates head loss given the flow rate \n# and the parameter that we want to use curve fitting to estimate\n# Define the other known values inside the function because we won't be passing those parameters to the function.\n\ndef HL_curvefit(FlowRate, KMinor):\n # The tubing is smooth AND pipe roughness isn't significant for laminar flow.\n PipeRough = 0*u.mm\n L_tube = 2*u.m\n T_data = u.Quantity(22,u.degC)\n nu_data = pc.viscosity_kinematic(T_data)\n D_tube = 1/8*u.inch \n # pass all of the parameters to the head loss function and then strip the units so \n # the curve fitting function can handle the data.\n return (pc.headloss(FlowRate, D_tube, L_tube, nu_data, PipeRough, KMinor)).magnitude\n\n# The curve fit function will need bounds on the unknown parameters to find a real solution. \n# The bounds for K minor are 0 and 20. \n\n# The curve fit function returns a list that includes the optimal parameters and the covariance.\n\npopt, pcov = curve_fit(HL_curvefit, Q_data, HL_data, bounds=[[0.],[20]])\n\nK_minor_fit = popt[0]\n\n# Plot the raw data\nplt.plot(Q_data.to(u.mL/u.s), HL_data.to(u.cm), 'o', label='data')\n\n# Plot the curve fit equation. \nplt.plot(Q_data.to(u.mL/u.s), ((HL_curvefit(Q_data, *popt))*u.m).to(u.cm), 'r-', label='fit')\nplt.xlabel('Flow rate (mL/s)')\nplt.ylabel('Head loss (cm)')\nplt.legend()\nplt.show()\n\n#Calculate the root mean square error to estimate the goodness of fit of the model to the data\nRMSE_Kminor = (np.sqrt(np.var(np.subtract((HL_curvefit(Q_data, *popt)),HL_data.magnitude)))*u.m).to(u.cm)\nprint('The root mean square error for the model fit when adjusting the minor loss coefficient was '+ut.sig(RMSE_Kminor,2))", "_____no_output_____" ] ], [ [ "### 17)\nRepeat the analysis from the previous cell, but this time assume that the minor loss coefficient is zero and that diameter is the unknown parameter. The bounds specified in the line beginning with `popt, pcov` should be changed from the previous question (which had bounds from 0 to 20) to the new bounds of 0.001 to 0.01. \n\nHint: Don't think too much about this, you only need to change the name of the defined function (perhaps \"`HL_curvefit2`\"?) and adjust its inputs/values. Please make use of the fantasticly useful copy-paste functionality.", "_____no_output_____" ], [ "### 18\nChanges to which of the two parameters, minor loss coefficient or tube diameter, results in a better fit to the data?", "_____no_output_____" ], [ "### 19\nWhat did you find most difficult about learning to use Python? Create a brief example as an extension to this tutorial to help students learn the topic that you found most difficult.", "_____no_output_____" ], [ "## Final Pointer\nIt is good practice to select Restart & Run All from the Kernel menu after completing an assignment to make sure that everything in your notebook works correctly and that you haven't deleted an essential line of code! \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4af7bda6b9cf91a113c6082683abc85d255e5495
20,813
ipynb
Jupyter Notebook
Exercises/CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
camilleAmaury/DeepLearningExercise
5c328f871fa9db8fbeec951ea8e4df433b8b1c04
[ "CNRI-Python", "Info-ZIP" ]
null
null
null
Exercises/CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
camilleAmaury/DeepLearningExercise
5c328f871fa9db8fbeec951ea8e4df433b8b1c04
[ "CNRI-Python", "Info-ZIP" ]
null
null
null
Exercises/CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
camilleAmaury/DeepLearningExercise
5c328f871fa9db8fbeec951ea8e4df433b8b1c04
[ "CNRI-Python", "Info-ZIP" ]
null
null
null
38.975655
216
0.528996
[ [ [ "# Convolutional Neural Network\n\nThis notebook was created by Camille-Amaury JUGE, in order to better understand CNN principles and how they work.\n\n(it follows the exercices proposed by Hadelin de Ponteves on Udemy : https://www.udemy.com/course/le-deep-learning-de-a-a-z/)\n\n## Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# scikit\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n# keras\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import Convolution2D, MaxPooling2D, Flatten\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image", "_____no_output_____" ] ], [ [ "## Model\n\n### Basic Convolutional Network\n\nSince our images are in RGB mode, we need to have an array of dimension 3 (one dimension for each color mode).\n\nWe will also begin as a small but efficient size of feature map = 32 (should multiplicate by 2 if stacking multiple layer).\n\nThen, the feature dectetor will be 3*3 pixels.\n\nFinally, we always use the relu function in order to break linearity of features map. This improve the quality of the features.\n\nThus, we apply max pooling in order to keep the important information while reducing the size of inputs (even if we lose some informations). We will use a 2*2 matrix which will reduce by 4 the features size.\n\nTo conclude, the flattening layer will help the features to be flatten in order to be used by other kind of neural networks.", "_____no_output_____" ] ], [ [ "def create_convolutional_layer(clf):\n # convolution layer\n clf.add(Convolution2D(filters=32, kernel_size=(3,3), strides=(1,1),\n input_shape=(64,64,3), activation=\"relu\"))\n # Pooling (here max)\n clf.add(MaxPooling2D(pool_size=(2,2)))\n # Flattening \n clf.add(Flatten())\n return clf", "_____no_output_____" ], [ "def hidden_layer(clf):\n clf.add(Dense(units=128, activation=\"relu\"))\n clf.add(Dense(units=1, activation=\"sigmoid\"))\n return clf", "_____no_output_____" ], [ "clf = Sequential()\nclf = create_convolutional_layer(clf)\nclf = hidden_layer(clf)\nclf.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])", "_____no_output_____" ] ], [ [ "### Image Creation\n\nWe are going to increase the number of images by using a keras method which apply a lot of filters, rotation on existing images in order to avoid overfitting and prepare for lot of different factors.\n\nFolders need to be well organized before using those functions.", "_____no_output_____" ] ], [ [ "_batch_size = 32\n_training_size = 8000\n_test_size = 200\n_image_size = (64,64)\n\n# change/create the train dataset images\ntrain_datagen = ImageDataGenerator(\n # rescale the values of each pixel between 0 and 1\n rescale=1./255,\n # transvection (rotating in 3D but still seing in 2D)\n shear_range=0.2,\n # zoom\n zoom_range=0.2,\n # return the image on a horizontal plan\n horizontal_flip=True)\n\n# same for the test set\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# generate the new images for train dataset\ntrain_generator = train_datagen.flow_from_directory(\n 'training_set',\n target_size=_image_size,\n batch_size=_batch_size,\n class_mode='binary')\n# same for the test dataset\ntest_generator = test_datagen.flow_from_directory(\n 'test_set',\n target_size=_image_size,\n batch_size=_batch_size,\n class_mode='binary')\n\n# do the job\nclf.fit(train_generator,\n steps_per_epoch=int(_training_size/_batch_size),\n epochs=25,\n validation_data=test_generator,\n validation_steps=int(_test_size/_batch_size))", "Found 8000 images belonging to 2 classes.\nFound 2000 images belonging to 2 classes.\nEpoch 1/25\n250/250 [==============================] - 40s 161ms/step - loss: 0.6819 - accuracy: 0.6281 - val_loss: 0.6625 - val_accuracy: 0.6875\nEpoch 2/25\n250/250 [==============================] - 40s 159ms/step - loss: 0.5893 - accuracy: 0.6871 - val_loss: 0.5655 - val_accuracy: 0.7812\nEpoch 3/25\n250/250 [==============================] - 39s 156ms/step - loss: 0.5572 - accuracy: 0.7097 - val_loss: 0.5740 - val_accuracy: 0.7552\nEpoch 4/25\n250/250 [==============================] - 40s 160ms/step - loss: 0.5383 - accuracy: 0.7236 - val_loss: 0.6166 - val_accuracy: 0.6667\nEpoch 5/25\n250/250 [==============================] - 39s 158ms/step - loss: 0.5261 - accuracy: 0.7330 - val_loss: 0.5532 - val_accuracy: 0.7240\nEpoch 6/25\n250/250 [==============================] - 40s 158ms/step - loss: 0.5067 - accuracy: 0.7467 - val_loss: 0.6338 - val_accuracy: 0.7396\nEpoch 7/25\n250/250 [==============================] - 40s 159ms/step - loss: 0.4935 - accuracy: 0.7602 - val_loss: 0.8266 - val_accuracy: 0.6979\nEpoch 8/25\n250/250 [==============================] - 40s 159ms/step - loss: 0.4880 - accuracy: 0.7607 - val_loss: 0.6511 - val_accuracy: 0.7292\nEpoch 9/25\n250/250 [==============================] - 39s 157ms/step - loss: 0.4789 - accuracy: 0.7681 - val_loss: 0.3234 - val_accuracy: 0.8125\nEpoch 10/25\n250/250 [==============================] - 40s 159ms/step - loss: 0.4652 - accuracy: 0.7766 - val_loss: 0.6970 - val_accuracy: 0.7083\nEpoch 11/25\n250/250 [==============================] - 40s 159ms/step - loss: 0.4516 - accuracy: 0.7814 - val_loss: 0.6109 - val_accuracy: 0.7045\nEpoch 12/25\n250/250 [==============================] - 39s 157ms/step - loss: 0.4400 - accuracy: 0.7894 - val_loss: 0.6008 - val_accuracy: 0.7240\nEpoch 13/25\n250/250 [==============================] - 40s 160ms/step - loss: 0.4368 - accuracy: 0.7919 - val_loss: 0.8036 - val_accuracy: 0.7500\nEpoch 14/25\n250/250 [==============================] - 40s 158ms/step - loss: 0.4217 - accuracy: 0.7981 - val_loss: 0.4839 - val_accuracy: 0.7344\nEpoch 15/25\n250/250 [==============================] - 39s 157ms/step - loss: 0.4135 - accuracy: 0.8087 - val_loss: 0.4381 - val_accuracy: 0.7708\nEpoch 16/25\n250/250 [==============================] - 40s 161ms/step - loss: 0.4079 - accuracy: 0.8136 - val_loss: 0.3330 - val_accuracy: 0.8021\nEpoch 17/25\n250/250 [==============================] - 41s 163ms/step - loss: 0.3901 - accuracy: 0.8248 - val_loss: 0.5646 - val_accuracy: 0.7604\nEpoch 18/25\n250/250 [==============================] - 41s 163ms/step - loss: 0.3878 - accuracy: 0.8285 - val_loss: 0.5941 - val_accuracy: 0.7448\nEpoch 19/25\n250/250 [==============================] - 41s 163ms/step - loss: 0.3692 - accuracy: 0.8320 - val_loss: 0.6090 - val_accuracy: 0.7708\nEpoch 20/25\n250/250 [==============================] - 41s 164ms/step - loss: 0.3511 - accuracy: 0.8479 - val_loss: 0.2442 - val_accuracy: 0.7865\nEpoch 21/25\n250/250 [==============================] - 40s 161ms/step - loss: 0.3490 - accuracy: 0.8426 - val_loss: 0.4292 - val_accuracy: 0.7614\nEpoch 22/25\n250/250 [==============================] - 41s 165ms/step - loss: 0.3388 - accuracy: 0.8512 - val_loss: 0.4262 - val_accuracy: 0.8490\nEpoch 23/25\n250/250 [==============================] - 41s 164ms/step - loss: 0.3188 - accuracy: 0.8609 - val_loss: 0.3888 - val_accuracy: 0.7396\nEpoch 24/25\n250/250 [==============================] - 41s 163ms/step - loss: 0.3081 - accuracy: 0.8662 - val_loss: 0.7291 - val_accuracy: 0.7448\nEpoch 25/25\n250/250 [==============================] - 41s 163ms/step - loss: 0.3064 - accuracy: 0.8671 - val_loss: 0.3930 - val_accuracy: 0.7812\n" ] ], [ [ "As we can see, the network performs medium-well on the image recognition : 0.86 on training set and 0.78 on test set.\n\nNethertheless, it seems that there is overfitting since we have a 0.08 difference which is quite enormous. Our neural network is not able to well generalize.\n\n### Improving the model\n\nWe will add some convolution layers.", "_____no_output_____" ] ], [ [ "def create_convolutional_layer(clf):\n # convolution layer\n clf.add(Convolution2D(filters=64, kernel_size=(3,3), strides=(1,1),\n input_shape=(128,128,3), activation=\"relu\"))\n # Pooling (here max)\n clf.add(MaxPooling2D(pool_size=(2,2)))\n clf.add(Dropout(0.1))\n clf.add(Convolution2D(filters=32, kernel_size=(3,3), strides=(1,1),\n activation=\"relu\"))\n clf.add(MaxPooling2D(pool_size=(2,2)))\n clf.add(Dropout(0.1))\n # Flattening \n clf.add(Flatten())\n return clf", "_____no_output_____" ], [ "def hidden_layer(clf):\n clf.add(Dense(units=128, activation=\"relu\"))\n clf.add(Dense(units=64, activation=\"relu\"))\n clf.add(Dense(units=1, activation=\"sigmoid\"))\n return clf", "_____no_output_____" ], [ "clf = Sequential()\nclf = create_convolutional_layer(clf)\nclf = hidden_layer(clf)\nclf.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])", "_____no_output_____" ], [ "_batch_size = 32\n_training_size = 8000\n_test_size = 200\n_image_size = (128,128)\n\n# change/create the train dataset images\ntrain_datagen = ImageDataGenerator(\n # rescale the values of each pixel between 0 and 1\n rescale=1./255,\n # transvection (rotating in 3D but still seing in 2D)\n shear_range=0.2,\n # zoom\n zoom_range=0.2,\n # return the image on a horizontal plan\n horizontal_flip=True)\n\n# same for the test set\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# generate the new images for train dataset\ntrain_generator = train_datagen.flow_from_directory(\n 'training_set',\n target_size=_image_size,\n batch_size=_batch_size,\n class_mode='binary')\n# same for the test dataset\ntest_generator = test_datagen.flow_from_directory(\n 'test_set',\n target_size=_image_size,\n batch_size=_batch_size,\n class_mode='binary')\n\n# do the job\nclf.fit(train_generator,\n steps_per_epoch=int(_training_size/_batch_size),\n epochs=25,\n validation_data=test_generator,\n validation_steps=int(_test_size/_batch_size))", "Found 8000 images belonging to 2 classes.\nFound 2000 images belonging to 2 classes.\nEpoch 1/25\n250/250 [==============================] - 115s 459ms/step - loss: 0.6960 - accuracy: 0.5299 - val_loss: 0.6686 - val_accuracy: 0.5990\nEpoch 2/25\n250/250 [==============================] - 112s 449ms/step - loss: 0.6622 - accuracy: 0.6058 - val_loss: 0.5992 - val_accuracy: 0.7031\nEpoch 3/25\n250/250 [==============================] - 114s 457ms/step - loss: 0.6298 - accuracy: 0.6489 - val_loss: 0.6285 - val_accuracy: 0.6198\nEpoch 4/25\n250/250 [==============================] - 114s 454ms/step - loss: 0.6064 - accuracy: 0.6731 - val_loss: 0.6038 - val_accuracy: 0.6510\nEpoch 5/25\n250/250 [==============================] - 113s 451ms/step - loss: 0.5799 - accuracy: 0.7011 - val_loss: 0.4057 - val_accuracy: 0.7760\nEpoch 6/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.5649 - accuracy: 0.7076 - val_loss: 0.6833 - val_accuracy: 0.7031\nEpoch 7/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.5321 - accuracy: 0.7384 - val_loss: 0.4534 - val_accuracy: 0.7188\nEpoch 8/25\n250/250 [==============================] - 113s 452ms/step - loss: 0.5005 - accuracy: 0.7548 - val_loss: 0.6207 - val_accuracy: 0.7500\nEpoch 9/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.4799 - accuracy: 0.7648 - val_loss: 0.8075 - val_accuracy: 0.7188\nEpoch 10/25\n250/250 [==============================] - 112s 449ms/step - loss: 0.4644 - accuracy: 0.7793 - val_loss: 0.4120 - val_accuracy: 0.7969\nEpoch 11/25\n250/250 [==============================] - 112s 449ms/step - loss: 0.4338 - accuracy: 0.7926 - val_loss: 0.3669 - val_accuracy: 0.7670\nEpoch 12/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.4162 - accuracy: 0.8070 - val_loss: 0.5565 - val_accuracy: 0.7604\nEpoch 13/25\n250/250 [==============================] - 112s 449ms/step - loss: 0.4029 - accuracy: 0.8138 - val_loss: 0.4363 - val_accuracy: 0.7344\nEpoch 14/25\n250/250 [==============================] - 113s 452ms/step - loss: 0.3785 - accuracy: 0.8325 - val_loss: 0.4355 - val_accuracy: 0.7344\nEpoch 15/25\n250/250 [==============================] - 112s 447ms/step - loss: 0.3650 - accuracy: 0.8334 - val_loss: 0.4168 - val_accuracy: 0.7396\nEpoch 16/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.3407 - accuracy: 0.8490 - val_loss: 0.5171 - val_accuracy: 0.7656\nEpoch 17/25\n250/250 [==============================] - 112s 448ms/step - loss: 0.3229 - accuracy: 0.8560 - val_loss: 0.3271 - val_accuracy: 0.8021\nEpoch 18/25\n250/250 [==============================] - 115s 462ms/step - loss: 0.3161 - accuracy: 0.8694 - val_loss: 0.6226 - val_accuracy: 0.7969\nEpoch 19/25\n250/250 [==============================] - 114s 457ms/step - loss: 0.2921 - accuracy: 0.8754 - val_loss: 0.2555 - val_accuracy: 0.8073\nEpoch 20/25\n250/250 [==============================] - 113s 450ms/step - loss: 0.2793 - accuracy: 0.8855 - val_loss: 0.4655 - val_accuracy: 0.7500\nEpoch 21/25\n250/250 [==============================] - 113s 451ms/step - loss: 0.2540 - accuracy: 0.8878 - val_loss: 0.6282 - val_accuracy: 0.7443\nEpoch 22/25\n250/250 [==============================] - 113s 451ms/step - loss: 0.2524 - accuracy: 0.8979 - val_loss: 0.4574 - val_accuracy: 0.8177\nEpoch 23/25\n250/250 [==============================] - 113s 451ms/step - loss: 0.2335 - accuracy: 0.9044 - val_loss: 0.7167 - val_accuracy: 0.8229\nEpoch 24/25\n250/250 [==============================] - 114s 455ms/step - loss: 0.2225 - accuracy: 0.9087 - val_loss: 0.4846 - val_accuracy: 0.8281\nEpoch 25/25\n250/250 [==============================] - 113s 451ms/step - loss: 0.2113 - accuracy: 0.9150 - val_loss: 0.5986 - val_accuracy: 0.7656\n" ] ], [ [ "## Predict One image", "_____no_output_____" ] ], [ [ "test_image1_128 = image.load_img(\"single_prediction\\\\cat_or_dog_1.jpg\",\n target_size=(128,128))\ntest_image2_128 = image.load_img(\"single_prediction\\\\cat_or_dog_2.jpg\",\n target_size=(128,128))", "_____no_output_____" ], [ "test_image1_128 = image.img_to_array(test_image1_128)\ntest_image2_128 = image.img_to_array(test_image2_128)", "_____no_output_____" ], [ "test_image1_128 = np.expand_dims(test_image1_128, axis=0)\ntest_image2_128 = np.expand_dims(test_image2_128, axis=0)", "_____no_output_____" ], [ "test_image1_128.shape", "_____no_output_____" ], [ "classes = {value : key for (key, value) in train_generator.class_indices.items()}\nclasses", "_____no_output_____" ], [ "y_pred = classes[int(clf.predict(test_image1_128)[0][0])]\ny = classes[1]\nprint(\"Predicted {} and is {}\".format(y_pred, y))\ny_pred = classes[int(clf.predict(test_image2_128)[0][0])]\ny = classes[0]\nprint(\"Predicted {} and is {}\".format(y_pred, y))", "Predicted dogs and is dogs\nPredicted cats and is cats\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4af7c332ccf6055568df416df90549b5b63b7776
18,597
ipynb
Jupyter Notebook
research/object_detection/object_detection_tutorial.ipynb
seventeenxsq/tensorflow_Object_Detection-
dd46fb0cb65886e6ced05dc5fcd7876ed06af847
[ "Apache-2.0" ]
null
null
null
research/object_detection/object_detection_tutorial.ipynb
seventeenxsq/tensorflow_Object_Detection-
dd46fb0cb65886e6ced05dc5fcd7876ed06af847
[ "Apache-2.0" ]
null
null
null
research/object_detection/object_detection_tutorial.ipynb
seventeenxsq/tensorflow_Object_Detection-
dd46fb0cb65886e6ced05dc5fcd7876ed06af847
[ "Apache-2.0" ]
null
null
null
25.686464
416
0.585041
[ [ [ "# Object Detection API Demo\n\n<table align=\"left\"><td>\n <a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab\n </a>\n</td><td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\">\n <img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n</td></table>", "_____no_output_____" ], [ "Welcome to the [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image.", "_____no_output_____" ], [ "> **Important**: This tutorial is to help you through the first step towards using [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to build models. If you just just need an off the shelf model that does the job, see the [TFHub object detection example](https://colab.sandbox.google.com/github/tensorflow/hub/blob/master/examples/colab/object_detection.ipynb).", "_____no_output_____" ], [ "# Setup", "_____no_output_____" ], [ "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab.", "_____no_output_____" ], [ "### Install", "_____no_output_____" ] ], [ [ "!pip install -U --pre tensorflow==\"2.*\"", "_____no_output_____" ] ], [ [ "Make sure you have `pycocotools` installed", "_____no_output_____" ] ], [ [ "!pip install pycocotools", "_____no_output_____" ] ], [ [ "Get `tensorflow/models` or `cd` to parent directory of the repository.", "_____no_output_____" ] ], [ [ "import os\nimport pathlib\n\n\nif \"models\" in pathlib.Path.cwd().parts:\n while \"models\" in pathlib.Path.cwd().parts:\n os.chdir('..')\nelif not pathlib.Path('models').exists():\n !git clone --depth 1 https://github.com/tensorflow/models", "_____no_output_____" ] ], [ [ "Compile protobufs and install the object_detection package", "_____no_output_____" ] ], [ [ "%%bash\ncd models/research/\nprotoc object_detection/protos/*.proto --python_out=.", "_____no_output_____" ], [ "%%bash \ncd models/research\npip install .", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom IPython.display import display", "_____no_output_____" ] ], [ [ "Import the object detection module.", "_____no_output_____" ] ], [ [ "from object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util", "_____no_output_____" ] ], [ [ "Patches:", "_____no_output_____" ] ], [ [ "# patch tf1 into `utils.ops`\nutils_ops.tf = tf.compat.v1\n\n# Patch the location of gfile\ntf.gfile = tf.io.gfile", "_____no_output_____" ] ], [ [ "# Model preparation ", "_____no_output_____" ], [ "## Variables\n\nAny model exported using the `export_inference_graph.py` tool can be loaded here simply by changing the path.\n\nBy default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.", "_____no_output_____" ], [ "## Loader", "_____no_output_____" ] ], [ [ "def load_model(model_name):\n base_url = 'http://download.tensorflow.org/models/object_detection/'\n model_file = model_name + '.tar.gz'\n model_dir = tf.keras.utils.get_file(\n fname=model_name, \n origin=base_url + model_file,\n untar=True)\n\n model_dir = pathlib.Path(model_dir)/\"saved_model\"\n\n model = tf.saved_model.load(str(model_dir))\n model = model.signatures['serving_default']\n\n return model", "_____no_output_____" ] ], [ [ "## Loading label map\nLabel maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine", "_____no_output_____" ] ], [ [ "# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)", "_____no_output_____" ] ], [ [ "For the sake of simplicity we will test on 2 images:", "_____no_output_____" ] ], [ [ "# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\nPATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')\nTEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpg\")))\nTEST_IMAGE_PATHS", "_____no_output_____" ] ], [ [ "# Detection", "_____no_output_____" ], [ "Load an object detection model:", "_____no_output_____" ] ], [ [ "model_name = 'ssd_mobilenet_v1_coco_2017_11_17'\ndetection_model = load_model(model_name)", "_____no_output_____" ] ], [ [ "Check the model's input signature, it expects a batch of 3-color images of type uint8: ", "_____no_output_____" ] ], [ [ "print(detection_model.inputs)", "_____no_output_____" ] ], [ [ "And returns several outputs:", "_____no_output_____" ] ], [ [ "detection_model.output_dtypes", "_____no_output_____" ], [ "detection_model.output_shapes", "_____no_output_____" ] ], [ [ "Add a wrapper function to call the model, and cleanup the outputs:", "_____no_output_____" ] ], [ [ "def run_inference_for_single_image(model, image):\n image = np.asarray(image)\n # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n input_tensor = tf.convert_to_tensor(image)\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis,...]\n\n # Run inference\n output_dict = model(input_tensor)\n\n # All outputs are batches tensors.\n # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n # We're only interested in the first num_detections.\n num_detections = int(output_dict.pop('num_detections'))\n output_dict = {key:value[0, :num_detections].numpy() \n for key,value in output_dict.items()}\n output_dict['num_detections'] = num_detections\n\n # detection_classes should be ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n \n # Handle models with masks:\n if 'detection_masks' in output_dict:\n # Reframe the the bbox mask to the image size.\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n output_dict['detection_masks'], output_dict['detection_boxes'],\n image.shape[0], image.shape[1]) \n detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,\n tf.uint8)\n output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()\n \n return output_dict", "_____no_output_____" ] ], [ [ "Run it on each test image and show the results:", "_____no_output_____" ] ], [ [ "def show_inference(model, image_path):\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image_np = np.array(Image.open(image_path))\n # Actual detection.\n output_dict = run_inference_for_single_image(model, image_np)\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks_reframed', None),\n use_normalized_coordinates=True,\n line_thickness=8)\n\n display(Image.fromarray(image_np))", "_____no_output_____" ], [ "for image_path in TEST_IMAGE_PATHS:\n show_inference(detection_model, image_path)\n", "_____no_output_____" ] ], [ [ "## Instance Segmentation", "_____no_output_____" ] ], [ [ "model_name = \"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\"\nmasking_model = load_model(model_name)", "_____no_output_____" ] ], [ [ "The instance segmentation model includes a `detection_masks` output:", "_____no_output_____" ] ], [ [ "masking_model.output_shapes", "_____no_output_____" ], [ "for image_path in TEST_IMAGE_PATHS:\n show_inference(masking_model, image_path)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4af7d34d945a5501402aa894af6ef733a9e4136d
2,290
ipynb
Jupyter Notebook
examples/Trajectory Clustering.ipynb
ClaudioZeni/Raffy
50817af872f84c946a956c86aaa8bf352bd74fce
[ "Apache-2.0" ]
null
null
null
examples/Trajectory Clustering.ipynb
ClaudioZeni/Raffy
50817af872f84c946a956c86aaa8bf352bd74fce
[ "Apache-2.0" ]
null
null
null
examples/Trajectory Clustering.ipynb
ClaudioZeni/Raffy
50817af872f84c946a956c86aaa8bf352bd74fce
[ "Apache-2.0" ]
null
null
null
27.261905
311
0.588646
[ [ [ "## This notebook serves as a simple example on how to run the clustering algorithm on a MD trajectory file", "_____no_output_____" ] ], [ [ "from raffy import trajectory_cluster as tc", "/home/claudio/postdoc/venv/raffy/lib/python3.8/site-packages/ray/autoscaler/_private/cli_logger.py:57: FutureWarning: Not all Ray CLI dependencies were found. In Ray 1.4+, the Ray CLI, autoscaler, and dashboard will only be usable via `pip install 'ray[default]'`. Please update your install command.\n warnings.warn(\n" ], [ "# Choose parameters\n\nframes = ':' # Indicate which frames to analyse\nk = 4 # Number of clusters to be found\nncores = 1 # For multiprocessing, requires the ray package\ncut = 4.42 # Cutoff in Angstrom of the descriptor. If not specified it is automatically set\nfilename = \"data/Au/example.xyz\" # The trajectory file can be in .xyz or .dump format\n\n# Run the clustering. This will generate another .xyz file containing the label of each atom in a \"tags\" column.\ntc.trajectory_cluster(filename, index = frames, k = k, ncores = ncores, cut=cut)", "Finished. The Labeled trajectory file can be found at data/Au/example_clustered_k=4_cut=5.17.xyz\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
4af7ec925fd133d64929113d2cd15822ce6ebc0e
10,118
ipynb
Jupyter Notebook
components/gcp/dataflow/launch_template/sample.ipynb
cliveseldon/pipelines
c7300c343cca9b22c9c5bdb64823e312dac1e495
[ "Apache-2.0" ]
null
null
null
components/gcp/dataflow/launch_template/sample.ipynb
cliveseldon/pipelines
c7300c343cca9b22c9c5bdb64823e312dac1e495
[ "Apache-2.0" ]
2
2019-10-29T05:39:48.000Z
2019-11-18T08:06:42.000Z
components/gcp/dataflow/launch_template/sample.ipynb
ckadner/repo-that-should-be-a-fork
d4aabd15b15022999da7660d2bc808347b2d9f06
[ "Apache-2.0" ]
2
2019-10-15T03:06:15.000Z
2019-10-15T03:10:39.000Z
36.007117
464
0.611188
[ [ [ "# Name\nData preparation by using a template to submit a job to Cloud Dataflow\n\n# Labels\nGCP, Cloud Dataflow, Kubeflow, Pipeline\n\n# Summary\nA Kubeflow Pipeline component to prepare data by using a template to submit a job to Cloud Dataflow.\n\n# Details\n\n## Intended use\nUse this component when you have a pre-built Cloud Dataflow template and want to launch it as a step in a Kubeflow Pipeline.\n\n## Runtime arguments\nArgument | Description | Optional | Data type | Accepted values | Default |\n:--- | :---------- | :----------| :----------| :---------- | :----------|\nproject_id | The ID of the Google Cloud Platform (GCP) project to which the job belongs. | No | GCPProjectID | | |\ngcs_path | The path to a Cloud Storage bucket containing the job creation template. It must be a valid Cloud Storage URL beginning with 'gs://'. | No | GCSPath | | |\nlaunch_parameters | The parameters that are required to launch the template. The schema is defined in [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters). The parameter `jobName` is replaced by a generated name. | Yes | Dict | A JSON object which has the same structure as [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters) | None |\nlocation | The regional endpoint to which the job request is directed.| Yes | GCPRegion | | None |\nstaging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information. This is done so that you can resume the job in case of failure.| Yes | GCSPath | | None |\nvalidate_only | If True, the request is validated but not executed. | Yes | Boolean | | False |\nwait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | | 30 |\n\n## Input data schema\n\nThe input `gcs_path` must contain a valid Cloud Dataflow template. The template can be created by following the instructions in [Creating Templates](https://cloud.google.com/dataflow/docs/guides/templates/creating-templates). You can also use [Google-provided templates](https://cloud.google.com/dataflow/docs/guides/templates/provided-templates).\n\n## Output\nName | Description\n:--- | :----------\njob_id | The id of the Cloud Dataflow job that is created.\n\n## Caution & requirements\n\nTo use the component, the following requirements must be met:\n- Cloud Dataflow API is enabled.\n- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n- The Kubeflow user service account is a member of:\n - `roles/dataflow.developer` role of the project.\n - `roles/storage.objectViewer` role of the Cloud Storage Object `gcs_path.`\n - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir.` \n\n## Detailed description\nYou can execute the template locally by following the instructions in [Executing Templates](https://cloud.google.com/dataflow/docs/guides/templates/executing-templates). See the sample code below to learn how to execute the template.\nFollow these steps to use the component in a pipeline:\n1. Install the Kubeflow Pipeline SDK:\n", "_____no_output_____" ] ], [ [ "%%capture --no-stderr\n\nKFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'\n!pip3 install $KFP_PACKAGE --upgrade", "_____no_output_____" ] ], [ [ "2. Load the component using KFP SDK", "_____no_output_____" ] ], [ [ "import kfp.components as comp\n\ndataflow_template_op = comp.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/1.0.0/components/gcp/dataflow/launch_template/component.yaml')\nhelp(dataflow_template_op)", "_____no_output_____" ] ], [ [ "### Sample\n\nNote: The following sample code works in an IPython notebook or directly in Python code.\nIn this sample, we run a Google-provided word count template from `gs://dataflow-templates/latest/Word_Count`. The template takes a text file as input and outputs word counts to a Cloud Storage bucket. Here is the sample input:", "_____no_output_____" ] ], [ [ "!gsutil cat gs://dataflow-samples/shakespeare/kinglear.txt", "_____no_output_____" ] ], [ [ "#### Set sample parameters", "_____no_output_____" ] ], [ [ "# Required Parameters\nPROJECT_ID = '<Please put your project ID here>'\nGCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash", "_____no_output_____" ], [ "# Optional Parameters\nEXPERIMENT_NAME = 'Dataflow - Launch Template'\nOUTPUT_PATH = '{}/out/wc'.format(GCS_WORKING_DIR)", "_____no_output_____" ] ], [ [ "#### Example pipeline that uses the component", "_____no_output_____" ] ], [ [ "import kfp.dsl as dsl\nimport json\[email protected](\n name='Dataflow launch template pipeline',\n description='Dataflow launch template pipeline'\n)\ndef pipeline(\n project_id = PROJECT_ID, \n gcs_path = 'gs://dataflow-templates/latest/Word_Count', \n launch_parameters = json.dumps({\n 'parameters': {\n 'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',\n 'output': OUTPUT_PATH\n }\n }), \n location = '',\n validate_only = 'False', \n staging_dir = GCS_WORKING_DIR,\n wait_interval = 30):\n dataflow_template_op(\n project_id = project_id, \n gcs_path = gcs_path, \n launch_parameters = launch_parameters, \n location = location, \n validate_only = validate_only,\n staging_dir = staging_dir,\n wait_interval = wait_interval)", "_____no_output_____" ] ], [ [ "#### Compile the pipeline", "_____no_output_____" ] ], [ [ "pipeline_func = pipeline\npipeline_filename = pipeline_func.__name__ + '.zip'\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(pipeline_func, pipeline_filename)", "_____no_output_____" ] ], [ [ "#### Submit the pipeline for execution", "_____no_output_____" ] ], [ [ "#Specify pipeline argument values\narguments = {}\n\n#Get or create an experiment and submit a pipeline run\nimport kfp\nclient = kfp.Client()\nexperiment = client.create_experiment(EXPERIMENT_NAME)\n\n#Submit a pipeline run\nrun_name = pipeline_func.__name__ + ' run'\nrun_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)", "_____no_output_____" ] ], [ [ "#### Inspect the output", "_____no_output_____" ] ], [ [ "!gsutil cat $OUTPUT_PATH*", "_____no_output_____" ] ], [ [ "## References\n\n* [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_template.py)\n* [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)\n* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataflow/launch_template/sample.ipynb)\n* [Cloud Dataflow Templates overview](https://cloud.google.com/dataflow/docs/guides/templates/overview)\n\n## License\nBy deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af804ad7bd1bc23340b53cc8f2370896273dbe6
3,131
ipynb
Jupyter Notebook
notebooks/ensemble_ex_01.ipynb
lesteve/scikit-learn-mooc
b822586b98e71dbbf003bde86be57412cb170291
[ "CC-BY-4.0" ]
1
2022-01-25T19:20:21.000Z
2022-01-25T19:20:21.000Z
notebooks/ensemble_ex_01.ipynb
lesteve/scikit-learn-mooc
b822586b98e71dbbf003bde86be57412cb170291
[ "CC-BY-4.0" ]
null
null
null
notebooks/ensemble_ex_01.ipynb
lesteve/scikit-learn-mooc
b822586b98e71dbbf003bde86be57412cb170291
[ "CC-BY-4.0" ]
null
null
null
29.819048
131
0.6145
[ [ [ "# 📝 Exercise M6.01\n\nThe aim of this notebook is to investigate if we can tune the hyperparameters\nof a bagging regressor and evaluate the gain obtained.\n\nWe will load the California housing dataset and split it into a training and\na testing set.", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\n\ndata, target = fetch_california_housing(as_frame=True, return_X_y=True)\ntarget *= 100 # rescale the target in k$\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, random_state=0, test_size=0.5)", "_____no_output_____" ] ], [ [ "<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\">If you want a deeper overview regarding this dataset, you can refer to the\nAppendix - Datasets description section at the end of this MOOC.</p>\n</div>", "_____no_output_____" ], [ "Create a `BaggingRegressor` and provide a `DecisionTreeRegressor`\nto its parameter `base_estimator`. Train the regressor and evaluate its\ngeneralization performance on the testing set using the mean absolute error.", "_____no_output_____" ] ], [ [ "# Write your code here.", "_____no_output_____" ] ], [ [ "Now, create a `RandomizedSearchCV` instance using the previous model and\ntune the important parameters of the bagging regressor. Find the best\nparameters and check if you are able to find a set of parameters that\nimprove the default regressor still using the mean absolute error as a\nmetric.\n\n<div class=\"admonition tip alert alert-warning\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Tip</p>\n<p class=\"last\">You can list the bagging regressor's parameters using the <tt class=\"docutils literal\">get_params</tt>\nmethod.</p>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code here.", "_____no_output_____" ] ], [ [ "We see that the predictor provided by the bagging regressor does not need\nmuch hyperparameter tuning compared to a single decision tree.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af81080faa87797dace58386d6acba8ec0b3eec
119,399
ipynb
Jupyter Notebook
Hyperparameter Tuning Result Analysis.ipynb
kevinyang372/HYU_PH
5f4b3c9d4f8caed1c228077f5b630e813ba93a7b
[ "MIT" ]
null
null
null
Hyperparameter Tuning Result Analysis.ipynb
kevinyang372/HYU_PH
5f4b3c9d4f8caed1c228077f5b630e813ba93a7b
[ "MIT" ]
null
null
null
Hyperparameter Tuning Result Analysis.ipynb
kevinyang372/HYU_PH
5f4b3c9d4f8caed1c228077f5b630e813ba93a7b
[ "MIT" ]
null
null
null
50.592797
11,960
0.463639
[ [ [ "import talos as ta\nimport pandas as pd", "_____no_output_____" ], [ "d = pd.read_csv('higgs_nn_1.csv')\nr = ta.Reporting('higgs_nn_1.csv')\nr.table()", "_____no_output_____" ], [ "d_2 = pd.read_csv('higgs_nn_2.csv')\nr_2 = ta.Reporting('higgs_nn_2.csv')\nr_2.table()", "_____no_output_____" ], [ "r.plot_hist()", "_____no_output_____" ], [ "r.high()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4af812ff7311df2fef4a1f7fa328d27b54f7eaec
539,597
ipynb
Jupyter Notebook
dsc450-databases-for-analytics/DSC450_TakeHomeFinal.ipynb
AlexTeboul/msds
e668b67ef5f47cc440fa02e28165e9f1775a4c74
[ "MIT" ]
1
2022-03-15T18:41:58.000Z
2022-03-15T18:41:58.000Z
dsc450-databases-for-analytics/DSC450_TakeHomeFinal.ipynb
AlexTeboul/msds
e668b67ef5f47cc440fa02e28165e9f1775a4c74
[ "MIT" ]
null
null
null
dsc450-databases-for-analytics/DSC450_TakeHomeFinal.ipynb
AlexTeboul/msds
e668b67ef5f47cc440fa02e28165e9f1775a4c74
[ "MIT" ]
null
null
null
37.941007
397
0.473301
[ [ [ "### Problem 1\n\n__We will use a full day worth of tweets as an input (there are total of 4.4M tweets in this file, but you only need to read 1M):__ http://rasinsrv07.cstcis.cti.depaul.edu/CSC455/OneDayOfTweets.txt", "_____no_output_____" ], [ "__a. Create a 3rd table incorporating the Geo table (in addition to tweet and user tables that you already have from HW4 and HW5) and extend your schema accordingly. You do not need to use ALTER TABLE, it is sufficient to just re-make your schema.__\n\n__You will need to generate an ID for the Geo table primary key (you may use any value or reasonable combination of values as long as it is unique) for that table and link it to the Tweet table (foreign key should be in the Tweet). In addition to the primary key column, the geo table should have at least the “type”, “longitude” and “latitude” columns.__\n", "_____no_output_____" ] ], [ [ "#imports\nimport urllib.request, time, json, sqlite3\n\n#setup\nconn = sqlite3.connect('Tweets_Database_THF1.db') #db connection\nc = conn.cursor()\nwFD = urllib.request.urlopen('http://rasinsrv07.cstcis.cti.depaul.edu/CSC455/OneDayOfTweets.txt') #get the file", "_____no_output_____" ], [ "#c.execute('DROP TABLE IF EXISTS User');\n#c.execute('DROP TABLE IF EXISTS Tweets');\n#c.execute('DROP TABLE IF EXISTS Geo');\nwFD.close()\nfdErr.close()\nc.close()\nconn.commit()\nconn.close()", "_____no_output_____" ], [ "#create User Table\ncreate_UserTable = '''CREATE TABLE User (\n ID INTEGER,\n NAME TEXT,\n SCREEN_NAME TEXT,\n DESCRIPTION TEXT,\n FRIENDS_COUNT INTEGER,\n CONSTRAINT User_pk PRIMARY KEY(ID)\n );'''\nc.execute('DROP TABLE IF EXISTS User')\nc.execute(create_UserTable)", "_____no_output_____" ], [ "#create Tweets Table\ncreate_TweetsTable = '''CREATE TABLE Tweets (\n ID INTEGER,\n Created_At DATE,\n Text TEXT,\n Source TEXT,\n In_Reply_to_User_ID INTEGER,\n In_Reply_to_Screen_Name TEXT,\n In_Reply_to_Status_ID INTEGER,\n Retweet_Count INTEGER,\n Contributors TEXT,\n User_ID INTEGER,\n Geo_ID Text,\n \n CONSTRAINT Tweet_pk PRIMARY KEY(ID),\n CONSTRAINT tweet_fk1 FOREIGN KEY (User_ID) REFERENCES User(ID),\n CONSTRAINT tweet_fk2 FOREIGN KEY (Geo_ID) REFERENCES Geo(ID)\n \n );'''\nc.execute('DROP TABLE IF EXISTS Tweets')\nc.execute(create_TweetsTable)", "_____no_output_____" ], [ "#create Geo Table\ncreate_GeoTable = '''CREATE TABLE Geo (\n ID Text,\n Type Text,\n Latitude INTEGER,\n Longitude INTEGER,\n CONSTRAINT Geo_pk PRIMARY KEY(ID)\n );'''\nc.execute('DROP TABLE IF EXISTS Geo')\nc.execute(create_GeoTable)", "_____no_output_____" ] ], [ [ "__b. Use python to download from the web and save to a local text file (not into database yet, just to text file) at least 1,000,000 lines worth of tweets. Test your code with fewer rows first and only time it when you know it works. Report how long did it take.__\n\n__NOTE: Do not call read() or readlines() without any parameters at any point. That command will attempt to read the entire file which is too much data.__", "_____no_output_____" ] ], [ [ "#open files\nstart = time.time()\ndb_file = open('THF_db.txt', 'w') #\ndb_err_file = open('THF_db_errors.txt', 'w')\n\nfor i in range(1000000): #for lines 1 through 1,000,0000\n line = wFD.readline()\n try:\n db_file.write(line.decode()) #write to the database txt file\n except ValueError:\n db_err_file.write(line.decode() + '\\n') #catch errors if they come up\n\n#close files\ndb_file.close()\ndb_err_file.close()\nend = time.time()\n\nprint(\"Part b file writing took \", (end-start), ' seconds.')", "Part b file writing took 1662.4021990299225 seconds.\n" ] ], [ [ "__c. Repeat what you did in part-b, but instead of saving tweets to the file, populate the 3-table schema that you created in SQLite. Be sure to execute commit and verify that the data has been successfully loaded (report loaded row counts for each of the 3 tables).__", "_____no_output_____" ] ], [ [ "start = time.time()\nfdErr = open('THF_error.txt', 'w', errors = 'replace')\ntweetBatch = []\nuserBatch = []\ngeoBatch = []\nloadCounter = 0\n\n# There is a total of 1,000,000 tweets, but we will do a for-loop here\nfor i in range(1000000):\n \n line = wFD.readline()\n \n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n loadCounter = loadCounter + 1\n\n #------------------------------------\n #Tweet Table\n newRowTweet = [] # hold individual values of to-be-inserted row\n tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', \n 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors']\n \n for key in tweetKeys: # For each dictionary key we want\n if tweetDict[key] == 'null' or tweetDict[key] == '':\n newRowTweet.append(None) #null\n else:\n newRowTweet.append(tweetDict[key]) # use value as-is\n \n #Adds in user_id\n userDict = tweetDict['user'] # This the the dictionary for user information\n newRowTweet.append(userDict['id']) # User id/ foreign key\n \n #Adds in geo_id\n geoDict = tweetDict['geo']\n if tweetDict['geo']:\n newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string\n else:\n newRowTweet.append(None) # Geo info is missing\n \n #batching\n if loadCounter < 50: # Batching 1 at a time\n tweetBatch.append(newRowTweet)\n else:\n c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\n tweetBatch = [] # Reset the list of batched tweets\n \n #------------------------------------\n #User Table\n newRowUser = [] # hold individual values of to-be-inserted row for user table\n userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count']\n\n for key in userKeys: # For each dictionary key we want\n if userDict[key] == 'null' or userDict[key] == '':\n newRowUser.append(None) # proper NULL\n else:\n newRowUser.append(userDict[key]) # use value as-is\n \n #batching\n if loadCounter < 50: # Batching 1 at a time\n userBatch.append(newRowUser)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\n loadCounter = 0\n userBatch = [] # Reset the list of batched users\n\n #------------------------------------\n #Geo Table\n newRowGeo = [] # hold individual values of to-be-inserted row for geo table\n geoKeys = ['id','type','latitude', 'longitude']\n \n if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None:\n #do nothing\n continue\n else:\n #id\n newRowGeo.append(str(tweetDict['geo']['coordinates']))\n #type\n newRowGeo.append(tweetDict['geo']['type'])\n #latitude\n newRowGeo.append(tweetDict['geo']['coordinates'][0])\n #longitude\n newRowGeo.append(tweetDict['geo']['coordinates'][1])\n \n\n if loadCounter < 50: # Batching 1 at a time\n geoBatch.append(newRowGeo)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch)\n loadCounter = 0\n geoBatch = [] # Reset the list of batched geos\n\n except ValueError: # Handle the error of JSON parsing\n fdErr.write(line.decode() + '\\n') \n\n# Final batch (the remaining less-than-50 rows to be loaded)\nc.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\nc.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\nc.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch)\n\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], \" Tweet rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], \" User rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], \" Geo rows\")\n\nwFD.close()\nfdErr.close()\nc.close()\nconn.commit()\nconn.close()\n\nend = time.time()\nprint(\"Part c file writing took \", (end-start), ' seconds.')", "Loaded (732324,) Tweet rows\nLoaded (641680,) User rows\nLoaded (18084,) Geo rows\nPart c file writing took 479.5136480331421 seconds.\n" ], [ "c.execute('SELECT * FROM Geo LIMIT 2').fetchall()", "_____no_output_____" ], [ "c.execute('SELECT * FROM Tweets LIMIT 2').fetchall()", "_____no_output_____" ], [ "c.execute('SELECT * FROM User LIMIT 2').fetchall()", "_____no_output_____" ] ], [ [ "__How long did this step take?__", "_____no_output_____" ], [ "It took:", "_____no_output_____" ], [ "__d. Use your locally saved tweet file (created in part-b) to repeat the database population step from part-c. That is, load 1,000,000 tweets into the 3-table database using your saved file with tweets (do not use the URL to read twitter data).__", "_____no_output_____" ] ], [ [ "start = time.time()\n#open the database text file\nf = open(\"THF_db.txt\", 'r', encoding='utf-8')\n\nfdErr = open('THF_error.txt', 'w', errors = 'replace')\ntweetBatch = []\nuserBatch = []\ngeoBatch = []\nloadCounter = 0\n\n# There is a total of 1,000,000 tweets, but we will do a for-loop here\nfor i in range(1000000):\n \n line = f.readline()\n \n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n loadCounter = loadCounter + 1\n\n #------------------------------------\n #Tweet Table\n newRowTweet = [] # hold individual values of to-be-inserted row\n tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', \n 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors']\n \n for key in tweetKeys: # For each dictionary key we want\n if tweetDict[key] == 'null' or tweetDict[key] == '':\n newRowTweet.append(None) #null\n else:\n newRowTweet.append(tweetDict[key]) # use value as-is\n \n #Adds in user_id\n userDict = tweetDict['user'] # This the the dictionary for user information\n newRowTweet.append(userDict['id']) # User id/ foreign key\n \n #Adds in geo_id\n geoDict = tweetDict['geo']\n if tweetDict['geo']:\n newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string\n else:\n newRowTweet.append(None) # Geo info is missing\n \n #batching\n if loadCounter < 50: # Batching 1 at a time\n tweetBatch.append(newRowTweet)\n else:\n c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\n tweetBatch = [] # Reset the list of batched tweets\n \n #------------------------------------\n #User Table\n newRowUser = [] # hold individual values of to-be-inserted row for user table\n userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count']\n\n for key in userKeys: # For each dictionary key we want\n if userDict[key] == 'null' or userDict[key] == '':\n newRowUser.append(None) # proper NULL\n else:\n newRowUser.append(userDict[key]) # use value as-is\n \n #batching\n if loadCounter < 50: # Batching 1 at a time\n userBatch.append(newRowUser)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\n loadCounter = 0\n userBatch = [] # Reset the list of batched users\n\n #------------------------------------\n #Geo Table\n newRowGeo = [] # hold individual values of to-be-inserted row for geo table\n geoKeys = ['id','type','latitude', 'longitude']\n \n if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None:\n #do nothing\n continue\n else:\n #id\n newRowGeo.append(str(tweetDict['geo']['coordinates']))\n #type\n newRowGeo.append(tweetDict['geo']['type'])\n #latitude\n newRowGeo.append(tweetDict['geo']['coordinates'][0])\n #longitude\n newRowGeo.append(tweetDict['geo']['coordinates'][1])\n \n\n if loadCounter < 50: # Batching 1 at a time\n geoBatch.append(newRowGeo)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch)\n loadCounter = 0\n geoBatch = [] # Reset the list of batched geos\n\n except ValueError: # Handle the error of JSON parsing\n fdErr.write(line.decode() + '\\n') \n\n# Final batch (the remaining less-than-50 rows to be loaded)\nc.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\nc.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\nc.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch)\n\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], \" Tweet rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], \" User rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], \" Geo rows\")\n\nf.close()\n\nwFD.close()\nfdErr.close()\nc.close()\nconn.commit()\nconn.close()\nend = time.time()\nprint(\"Part d file writing took \", (end-start), ' seconds.')", "Loaded (979569,) Tweet rows\nLoaded (840278,) User rows\nLoaded (23993,) Geo rows\nPart d file writing took 585.7131180763245 seconds.\n" ] ], [ [ "__How does the runtime compare with part-c?__", "_____no_output_____" ], [ "Compared to part-c it took:", "_____no_output_____" ], [ "__e. Re-run the previous step with a batching size of 1000 (i.e. by inserting 1000 rows at a time with executemany).__", "_____no_output_____" ] ], [ [ "start = time.time()\n#open the database text file\nf = open(\"THF_db.txt\", 'r', encoding='utf-8')\n\nfdErr = open('THF_error.txt', 'w', errors = 'replace')\ntweetBatch = []\nuserBatch = []\ngeoBatch = []\nloadCounter = 0\n\n# There is a total of 1,000,000 tweets, but we will do a for-loop here\nfor i in range(1000000):\n \n line = f.readline()\n \n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n loadCounter = loadCounter + 1\n\n #------------------------------------\n #Tweet Table\n newRowTweet = [] # hold individual values of to-be-inserted row\n tweetKeys = ['id_str','created_at','text','source','in_reply_to_user_id', \n 'in_reply_to_screen_name', 'in_reply_to_status_id', 'retweet_count', 'contributors']\n \n for key in tweetKeys: # For each dictionary key we want\n if tweetDict[key] == 'null' or tweetDict[key] == '':\n newRowTweet.append(None) #null\n else:\n newRowTweet.append(tweetDict[key]) # use value as-is\n \n #Adds in user_id\n userDict = tweetDict['user'] # This the the dictionary for user information\n newRowTweet.append(userDict['id']) # User id/ foreign key\n \n #Adds in geo_id\n geoDict = tweetDict['geo']\n if tweetDict['geo']:\n newRowTweet.append(str(tweetDict['geo']['coordinates'])) #geo_id is the latitude/longitude as a string\n else:\n newRowTweet.append(None) # Geo info is missing\n \n #batching\n if loadCounter < 1000: # Batching 1 at a time\n tweetBatch.append(newRowTweet)\n else:\n c.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\n tweetBatch = [] # Reset the list of batched tweets\n \n #------------------------------------\n #User Table\n newRowUser = [] # hold individual values of to-be-inserted row for user table\n userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count']\n\n for key in userKeys: # For each dictionary key we want\n if userDict[key] == 'null' or userDict[key] == '':\n newRowUser.append(None) # proper NULL\n else:\n newRowUser.append(userDict[key]) # use value as-is\n \n #batching\n if loadCounter < 1000: # Batching 1 at a time\n userBatch.append(newRowUser)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\n loadCounter = 0\n userBatch = [] # Reset the list of batched users\n\n #------------------------------------\n #Geo Table\n newRowGeo = [] # hold individual values of to-be-inserted row for geo table\n geoKeys = ['id','type','latitude', 'longitude']\n \n if tweetDict['geo'] == 'null' or tweetDict['geo'] == '' or tweetDict['geo'] is None:\n #do nothing\n continue\n else:\n #id\n newRowGeo.append(str(tweetDict['geo']['coordinates']))\n #type\n newRowGeo.append(tweetDict['geo']['type'])\n #latitude\n newRowGeo.append(tweetDict['geo']['coordinates'][0])\n #longitude\n newRowGeo.append(tweetDict['geo']['coordinates'][1])\n \n\n if loadCounter < 1000: # Batching 1 at a time\n geoBatch.append(newRowGeo)\n else:\n c.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?)', geoBatch)\n loadCounter = 0\n geoBatch = [] # Reset the list of batched geos\n\n except ValueError: # Handle the error of JSON parsing\n fdErr.write(line.decode() + '\\n') \n\n# Final batch (the remaining less-than-50 rows to be loaded)\nc.executemany ('INSERT OR IGNORE INTO Tweets VALUES(?,?,?,?,?,?,?,?,?,?,?)', tweetBatch)\nc.executemany ('INSERT OR IGNORE INTO User VALUES(?,?,?,?,?)', userBatch)\nc.executemany ('INSERT OR IGNORE INTO Geo VALUES(?,?,?,?)', geoBatch)\n\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Tweets').fetchall()[0], \" Tweet rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM User').fetchall()[0], \" User rows\")\nprint (\"Loaded \", c.execute('SELECT COUNT(*) FROM Geo').fetchall()[0], \" Geo rows\")\n\nf.close()\n\nwFD.close()\nfdErr.close()\nc.close()\nconn.commit()\nconn.close()\nend = time.time()\nprint(\"Part e file writing took \", (end-start), ' seconds.')", "Loaded (998554,) Tweet rows\nLoaded (854707,) User rows\nLoaded (23993,) Geo rows\nPart d file writing took 348.1966998577118 seconds.\n" ] ], [ [ "__How does the runtime compare when batching is used?__", "_____no_output_____" ], [ "The runtime with batching:", "_____no_output_____" ], [ "### Problem 2\n__a. Write and execute SQL queries to do the following. Don’t forget to report the running times in each part and the code you used.__", "_____no_output_____" ], [ "__i. Find tweets where tweet id_str contains “55” or “88” anywhere in the column__", "_____no_output_____" ] ], [ [ "start = time.time()\nc.execute('SELECT * FROM Tweets WHERE id LIKE \"%55%\" or id LIKE \"%88%\" ').fetchall()\nend = time.time()\nprint(\"Part i query took \", (end-start), ' seconds.')", "Part e file writing took 5.004155158996582 seconds.\n" ], [ "c.execute('SELECT * FROM Tweets WHERE id LIKE \"%55%\" or id LIKE \"%88%\" ').fetchall()", "_____no_output_____" ] ], [ [ "__ii. Find how many unique values are there in the “in_reply_to_user_id” column__", "_____no_output_____" ] ], [ [ "start = time.time()\nc.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall()\nend = time.time()\nprint(\"Part ii query took \", (end-start), ' seconds.')", "Part ii file writing took 1.919631004333496 seconds.\n" ], [ "c.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall()", "_____no_output_____" ] ], [ [ "__iii. Find the tweet(s) with the shortest, longest and average length text message.__", "_____no_output_____" ] ], [ [ "start = time.time()\nc.execute('SELECT MIN(LENGTH(Text)) AS shortest, MAX(LENGTH(Text)) AS longest, AVG(LENGTH(Text)) \\\n AS average FROM Tweets').fetchall()\nend = time.time()\nprint(\"Part iii query took \", (end-start), ' seconds.')", "Part iii file writing took 0.9016211032867432 seconds.\n" ], [ "start = time.time()\nc.execute('SELECT * FROM TWEETS WHERE LENGTH(Text) IN (1,434,68.83193998521863)').fetchall()\nend = time.time()\nprint(\"Part iii query took \", (end-start), ' seconds.')", "Part iii file writing took 0.5746147632598877 seconds.\n" ] ], [ [ "__iv. Find the average longitude and latitude value for each user name.__", "_____no_output_____" ] ], [ [ "start = time.time()\nc.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \\\n JOIN Tweets ON User.ID=Tweets.user_id \\\n JOIN Geo ON Tweets.geo_id=Geo.ID \\\n GROUP BY screen_name').fetchall()\nend = time.time()\nprint(\"Part iv query took \", (end-start), ' seconds.')", "Part iv query took 6.078100919723511 seconds.\n" ], [ "c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \\\n JOIN Tweets ON User.ID=Tweets.user_id \\\n JOIN Geo ON Tweets.geo_id=Geo.ID \\\n GROUP BY screen_name').fetchall()", "_____no_output_____" ] ], [ [ "__v. Find how many known/unknown locations there were in total (e.g., 50,000 known, 950,000 unknown, 5% locations are available)__ ", "_____no_output_____" ] ], [ [ "c.execute('SELECT (COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) \\\n ,COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), \\\n ROUND((COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) * 100.0 / \\\n COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), 1)FROM Tweets').fetchall()", "_____no_output_____" ], [ "start = time.time()\nc.execute('SELECT (COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) \\\n ,COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), \\\n ROUND((COUNT(*)-COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END)) * 100.0 / \\\n COUNT(CASE WHEN `geo_id` IS NULL THEN 1 END), 1)FROM Tweets').fetchall()\nend = time.time()\nprint(\"Part v query took \", (end-start), ' seconds.')", "Part v query took 0.8259048461914062 seconds.\n" ] ], [ [ "__vi. Re-execute the query in part iv) 10 times and 100 times and measure the total runtime (just re-run the same exact query multiple times using a for-loop). Does the runtime scale linearly? (i.e., does it take 10X and 100X as much time?)__", "_____no_output_____" ] ], [ [ "start = time.time()\n\nfor i in range(10):\n c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \\\n JOIN Tweets ON User.ID=Tweets.user_id \\\n JOIN Geo ON Tweets.geo_id=Geo.ID \\\n GROUP BY screen_name').fetchall()\nend = time.time()\nprint(\"Part iv 10x query took \", (end-start), ' seconds.')", "Part iv 10x query took 62.96931505203247 seconds.\n" ], [ "start = time.time()\n\nfor i in range(100):\n c.execute('SELECT screen_name, AVG(latitude), AVG(longitude) FROM User \\\n JOIN Tweets ON User.ID=Tweets.user_id \\\n JOIN Geo ON Tweets.geo_id=Geo.ID \\\n GROUP BY screen_name').fetchall()\nend = time.time()\nprint(\"Part iv 100x query took \", (end-start), ' seconds.')", "Part iv 100x query took 605.360847234726 seconds.\n" ] ], [ [ "__b. Write python code that is going to read the locally saved tweet data file from 1-b and perform the equivalent computation for parts 2-i and 2-ii only. How does the runtime compare to the SQL queries?__", "_____no_output_____" ] ], [ [ "#i - c.execute('SELECT * FROM Tweets WHERE id LIKE \"%55%\" or id LIKE \"%88%\" ').fetchall()\nimport pandas as pd\n\nstart = time.time()\nf = open(\"THF_db.txt\", 'r', encoding='utf-8')\n\ndata = []\nlabels = ['id_str','in_reply_to_user_id']\nerror_tally =0\n# Loop through the 1,000,000 tweets in the text file\nfor i in range(1000000):\n \n line = f.readline()\n \n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n data.append((tweetDict[\"id_str\"], tweetDict[\"in_reply_to_user_id\"]))\n \n except: #catch any error\n error_tally+=1\n\ndf=pd.DataFrame.from_records(data,columns=labels)\n\nf.close() \ndf_end = df[df['id_str'].astype(str).str.contains('55|88')]\n\nend = time.time()\nprint(\"Part 2b-i loop took \", (end-start), ' seconds.')", "Part 2b-i loop took 103.7078309059143 seconds.\n" ], [ "df_end.head(10)", "_____no_output_____" ], [ "#ii - c.execute('SELECT COUNT(DISTINCT in_reply_to_user_id) AS num_replies FROM Tweets').fetchall()\nimport pandas as pd\n\nstart = time.time()\nf = open(\"THF_db.txt\", 'r', encoding='utf-8')\n\ndata = []\nlabels = ['id_str','in_reply_to_user_id']\nerror_tally =0\n# Loop through the 1,000,000 tweets in the text file\nfor i in range(1000000):\n \n line = f.readline()\n \n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n data.append((tweetDict[\"id_str\"], tweetDict[\"in_reply_to_user_id\"]))\n \n except: #catch any error\n error_tally+=1\n\ndf=pd.DataFrame.from_records(data,columns=labels)\nf.close() \n\ndf_end = df['in_reply_to_user_id'].value_counts(ascending=False)\n\nend = time.time()\nprint(\"Part 2b-ii loop took \", (end-start), ' seconds.')", "Part 2b-ii loop took 100.09605598449707 seconds.\n" ], [ "df_end.head(10)", "_____no_output_____" ] ], [ [ "### Problem 3\n\n__a. Export the contents of the User table from a SQLite table into a sequence of INSERT statements within a file. This is very similar to what you already did in Assignment 4. However, you have to add a unique ID column which has to be a string (you cannot use numbers). Hint: you can replace digits with letters, e.g., chr(ord('a')+1) gives you a 'b' and chr(ord('a')+2) returns a 'c'__\n ", "_____no_output_____" ] ], [ [ "#import sqlite3\n\ndef generateInsertStatements(tblName):\n conn = sqlite3.connect('Tweets_Database_THF1.db') # Using HW3 SQLite DB (preloaded)\n c = conn.cursor()\n\n # Open file for export\n fd = open(tblName+'.txt', 'w')\n\n tblRows = c.execute('SELECT * FROM %s' % tblName)\n\n for row in tblRows:\n fd.write(\"INSERT INTO %s VALUES %s;\\n\" % (tblName, str(row)))\n \n fd.close()\n c.close()\n conn.close()\n\nstart = time.time()\ngenerateInsertStatements('User')\nend = time.time()\nprint(\"Part 3a loop took \", (end-start), ' seconds.')", "Part 2b-ii loop took 314.5617868900299 seconds.\n" ] ], [ [ "Part 2b-ii loop took 314.5617868900299 seconds.", "_____no_output_____" ], [ "__b. Create the same collection of INSERT for the User table by reading data from the local tweet file that you have saved earlier.__", "_____no_output_____" ] ], [ [ "def generateInsertStatements_b(tblName):\n #open the database text file\n f = open(\"THF_db.txt\", 'r', encoding='utf-8')\n #open the file to write to\n fd = open(tblName+'.txt', 'w')\n err=0\n for i in range(1000000):\n line = f.readline()\n try: \n tweetDict = json.loads(line) # This is the dictionary for tweet info\n userDict = tweetDict['user']\n \n #User Table\n newRowUser = [] # hold individual values of to-be-inserted row for user table\n userKeys = ['id', 'name', 'screen_name', 'description', 'friends_count']\n\n for key in userKeys: # For each dictionary key we want\n if userDict[key] == 'null' or userDict[key] == '':\n newRowUser.append(None) # proper NULL\n else:\n newRowUser.append(userDict[key]) # use value as-is\n fd.write(\"INSERT INTO %s VALUES %s;\\n\" % (tblName, str((newRowUser))))\n except:\n err+=1\n f.close()\n fd.close()\nstart = time.time()\ngenerateInsertStatements_b('User')\nend = time.time()\nprint(\"Part 3b loop took \", (end-start), ' seconds.')", "Part 3b loop took 134.9316029548645 seconds.\n" ] ], [ [ "__How do these compare in runtime? Which method was faster?__", "_____no_output_____" ], [ "Comparing the runtime:", "_____no_output_____" ], [ "### Problem 4\n\n__4. Export all three tables (Tweet, User and Geo tables) from the database into a |-separated text file (each value in a row should be separated by |). You do not generate SQL INSERT statements, just raw |-separated text data.__", "_____no_output_____" ] ], [ [ "#import sqlite3 \n#import pandas as pd\nconn = sqlite3.connect('Tweets_Database_THF1.db')\nc = conn.cursor()\n\n\ndf_tweets_read = pd.read_sql_query(\"SELECT * FROM Tweets;\", conn) #tweets\ndf_user_read = pd.read_sql_query(\"SELECT * FROM User;\", conn) #user\ndf_geo_read = pd.read_sql_query(\"SELECT * FROM Geo;\", conn) #geo\n\ndf_tweets_write = df_tweets_read.to_csv(\"tweets_table.txt\", sep ='|') #tweets\ndf_user_write = df_user_read.to_csv(\"user_table.txt\", sep ='|') #user\ndf_geo_write = df_geo_read.to_csv(\"geo_table.txt\", sep ='|') #geo\n\n\n\nc.close()\nconn.commit()\nconn.close()", "_____no_output_____" ] ], [ [ "__a. For the Geo table, add a new column with relative distance from a fixed point which is the location of CDM (41.878668, -87.625555). You can simply treat it as a point-to-point Euclidean distance (although bonus points for finding a real distance in miles) and round the longitude and latitude columns to a maximum of 4 digits after the decimal.__\n ", "_____no_output_____" ] ], [ [ "import sqlite3 \nimport pandas as pd\nimport numpy as np\nconn = sqlite3.connect('Tweets_Database_THF1.db')\nc = conn.cursor()\n\ndf_geo_read = pd.read_sql_query(\"SELECT * FROM Geo;\", conn) #geo\n\ndf_geo_read['Latitude'] = df_geo_read.Latitude.round(4)\ndf_geo_read['Longitude'] = df_geo_read.Longitude.round(4)\ndf_geo_read['distance'] = (df_geo_read.Latitude.sub(41.878668).pow(2).add(df_geo_read.Longitude.sub(-87.625555).pow(2))).pow(.5).round(4)\n\ndf_geo_write = df_geo_read.to_csv(\"geo_table.txt\", sep ='|') #geo\n\nc.close()\nconn.commit()\nconn.close()\ndf_geo_read.head(10)", "_____no_output_____" ] ], [ [ "__b. For the Tweet table, add two new columns from the User table (“name” and “screen_name”) in addition to existing columns.__\n ", "_____no_output_____" ] ], [ [ "import sqlite3 \nimport pandas as pd\nconn = sqlite3.connect('Tweets_Database_THF1.db')\nc = conn.cursor()\n\ndf_tweets_read = pd.read_sql_query(\"SELECT * FROM Tweets;\", conn) #Tweets\ndf_user_read = pd.read_sql_query(\"SELECT * FROM User;\", conn) #User\n\nnew_df = pd.merge(df_tweets_read, df_user_read, how='left', left_on='User_ID',right_on='ID')\nnew_df = new_df.drop(['DESCRIPTION','FRIENDS_COUNT','ID_y'], axis=1)\n\ndf_tweets_write = new_df.to_csv(\"tweet_table.txt\", sep ='|') #Tweets written\n\nc.close()\nconn.commit()\nconn.close()", "_____no_output_____" ], [ "new_df.head(10)", "_____no_output_____" ] ], [ [ "__c. For the User table file add a column that specifies how many tweets by that user are currently in the database. That is, your output file should contain all of the columns from the User table, plus the new column with tweet count. You do not need to modify the User table, just create the output text file. What is the name of the user with most tweets?__", "_____no_output_____" ] ], [ [ "#x = new_df['User_ID'].value_counts()\n#x.columns = ['User_ID','tweets_count']\n#x.head()\nz = new_df['User_ID']", "_____no_output_____" ], [ "import sqlite3 \nimport pandas as pd\nconn = sqlite3.connect('Tweets_Database_THF1.db')\nc = conn.cursor()\n\n#df_user_read = pd.read_sql_query(\"SELECT * FROM User;\", conn) #User\n#df_tweets_read = pd.read_sql_query(\"SELECT * FROM Tweets;\", conn) #Tweets\n\nnew_df['tweets_count'] = new_df.groupby('User_ID')['User_ID'].transform('count') #from last part\njoin_df = new_df[['User_ID','tweets_count']] #dataframe with only id and tweetcount\n\nnewer_df = pd.merge(df_user_read, join_df, left_on='ID', right_on='User_ID')\n\nclean_df = newer_df.sort_values(by=['tweets_count'], ascending=False).drop_duplicates()\n\ndf_user_write = clean_df.to_csv(\"user_table.txt\", sep ='|') #user\n\nc.close()\nconn.commit()\nconn.close()", "_____no_output_____" ], [ "clean_df.head(10)", "_____no_output_____" ], [ "newer_df.head(10)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af8132fd224dbe84b2164fba649a6d54525e019
213,187
ipynb
Jupyter Notebook
notebooks/Understand_Tables.ipynb
BryanCutler/text-extensions-for-pandas
15b0c81aa957189bfcab7147924aff51c35661a0
[ "Apache-2.0" ]
null
null
null
notebooks/Understand_Tables.ipynb
BryanCutler/text-extensions-for-pandas
15b0c81aa957189bfcab7147924aff51c35661a0
[ "Apache-2.0" ]
null
null
null
notebooks/Understand_Tables.ipynb
BryanCutler/text-extensions-for-pandas
15b0c81aa957189bfcab7147924aff51c35661a0
[ "Apache-2.0" ]
null
null
null
47.353843
53,400
0.520778
[ [ [ "<font size=6><b>Understand_Tables.ipynb:</b></font>\n<p>\n<font size=4>Extract Structured Information from Tables in PDF Documents\n using IBM Watson Discovery and Text Extensions for Pandas\n</font>", "_____no_output_____" ], [ "# Introduction\n\nMany organizations have valuable information hidden in tables inside human-readable documents like PDF files and web pages. Table identification and extraction technology can turn this human-readable information into a format that data science tools can import and use. Text Extensions for Pandas and Watson Discovery make this process much easier.\n\nIn this notebook, we'll follow the journey of Allison, an analyst at an investment bank. Allison's employer has assigned her to cover several different companies, one of which is IBM. As part of her analysis, Allison wants to track IBM's revenue over time, broken down by geographical region. That detailed revenue information is all there in IBM's filings with the U.S. Securities and Exchange Commission (SEC). For example, here's IBM's 2019 annual report:\n\n![IBM Annual Report for 2019 (146 pages)](images/IBM_Annual_Report_2019.png)\n\nDid you see the table of revenue by geography? It's here, on page 39:\n\n![Page 39 of IBM Annual Report for 2019](images/IBM_Annual_Report_2019_page_39.png)\n\nHere's what that table looks like close up:\n\n![Table: Geographic Revenue (from IBM 2019 annual report)](images/screenshot_table_2019.png)\n\nBut this particular table only gives two years' revenue figures. Allison needs to have enough data to draw a meaningful chart of revenue over time. 10 years of annual revenue figures would be a good starting point. \n\nAllison has a collection of IBM annual reports going back to 2009. In total, these documents contain about 1500 pages of financial information. Hidden inside those 1500 pages are the detailed revenue figures that Allison wants. She needs to find those figures, extract them from the documents, and import them into her data science tools.\n\nFortunately, Allison has [Watson Discovery](https://www.ibm.com/cloud/watson-discovery), IBM's suite of tools for managing and extracting value from collections of human-readable documents.\n\nThe cells that follow will show how Allison uses Text Extensions for Pandas and Watson Discovery to import the detailed revenue information from her PDF documents into a Pandas DataFrame...\n\n![Screenshot of a DataFrame from later in this notebook.](images/revenue_table.png)\n\n...that she then uses to generate a chart of revenue over time:\n\n![Chart of revenue over time, from later in this notebook.](images/revenue_over_time.png)\n\nBut first, let's set your environment up so that you can run Allison's code yourself.\n\n(If you're just reading through the precomputed outputs of this notebook, you can skip ahead to the section labeled [\"Extract Tables with Watson Discovery\"](#watson_discovery)).", "_____no_output_____" ], [ "# Environment Setup\n\nThis notebook requires a Python 3.7 or later environment with the following packages:\n* The dependencies listed in the [\"requirements.txt\" file for Text Extensions for Pandas](https://github.com/CODAIT/text-extensions-for-pandas/blob/master/requirements.txt)\n* `matplotlib`\n* `text_extensions_for_pandas`\n\nYou can satisfy the dependency on `text_extensions_for_pandas` in either of two ways:\n\n* Run `pip install text_extensions_for_pandas` before running this notebook. This command adds the library to your Python environment.\n* Run this notebook out of your local copy of the Text Extensions for Pandas project's [source tree](https://github.com/CODAIT/text-extensions-for-pandas). In this case, the notebook will use the version of Text Extensions for Pandas in your local source tree **if the package is not installed in your Python environment**.\n", "_____no_output_____" ] ], [ [ "# Core Python libraries\nimport json\nimport os\nimport sys\nfrom typing import *\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n# And of course we need the text_extensions_for_pandas library itself.\ntry:\n import text_extensions_for_pandas as tp\nexcept ModuleNotFoundError as e:\n # If we're running from within the project source tree and the parent Python\n # environment doesn't have the text_extensions_for_pandas package, use the\n # version in the local source tree.\n if not os.getcwd().endswith(\"notebooks\"):\n raise e\n if \"..\" not in sys.path:\n sys.path.insert(0, \"..\")\n import text_extensions_for_pandas as tp", "_____no_output_____" ] ], [ [ "<div id=\"watson_discovery\"/>\n\n# Extract Tables with Watson Discovery\n\nAllison connects to the [Watson Discovery](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-install) component of her firm's [IBM Cloud Pak for Data](\nhttps://www.ibm.com/products/cloud-pak-for-data) installation on their [OpenShift](https://www.openshift.com/) cluster.\n\nShe creates a new Watson Discovery project and uploads her stack of IBM annual reports to her project. Then she uses the Watson Discovery's [Table Understanding enrichment](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-understanding_tables) to identify tables in the PDF documents and to extract detailed information about the cells and headers that make up each table.\n\nTo keep this notebook short, we've captured the output of Table Understanding on Allison's documents and checked it into Github [here](https://github.com/CODAIT/text-extensions-for-pandas/tree/master/resources/tables/Financial_table_demo/IBM_10-K). We will use these JSON files as input for the rest of this scenario. If you'd like to learn more about importing and managing document collections in Watson Discovery, take a look at the [Getting Started Guide for Watson Discovery](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-getting-started).\n\nAllison reads the JSON output from Watson Discovery's table enrichment into a Python variable, then prints out what the 2019 \"Geographic Revenue\" table looks like in this raw output.", "_____no_output_____" ] ], [ [ "# Location of the output from Watson Discovery's Table Understanding enrichment\n# (relative to this notebook file)\nFILES_DIR = \"../resources/tables/financial_table_demo/IBM_10-K\"\n\nwith open(f\"{FILES_DIR}/2019.json\", \"r\") as f:\n ibm_2019_json = json.load(f)\n\n# Find the table in the \"Geographic Revenue\" section.\ntable_index = [i for i in range(len(ibm_2019_json[\"tables\"])) \n if ibm_2019_json[\"tables\"][i][\"section_title\"][\"text\"] == \"Geographic Revenue\"][0]\nprint(json.dumps(ibm_2019_json[\"tables\"][table_index], indent=2))", "{\n \"location\": {\n \"begin\": 664612,\n \"end\": 673296\n },\n \"text\": \"($ in millions)\\n For the year ended December 31: 2019 2018\\nYr.-to-Yr. Percent Change\\nYr.-to-Yr. Percent Change Adjusted for Currency\\nYr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\\nTotal revenue $77,147 $79,591 (3.1 )% (1.0)% 0.2%\\nAmericas $36,274 $36,994 (1.9)% (1.1)% 0.8%\\nEurope/Middle East/Africa 24,443 25,491 (4.1) 0.4 1.3\\nAsia Pacific 16,430 17,106 (4.0) (3.0) (2.5)\\n\",\n \"section_title\": {\n \"location\": {\n \"begin\": 663834,\n \"end\": 663852\n },\n \"text\": \"Geographic Revenue\"\n },\n \"title\": {},\n \"table_headers\": [\n {\n \"cell_id\": \"tableHeader-664612-664628\",\n \"location\": {\n \"begin\": 664612,\n \"end\": 664628\n },\n \"text\": \"($ in millions)\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n }\n ],\n \"row_headers\": [\n {\n \"cell_id\": \"rowHeader-667212-667226\",\n \"location\": {\n \"begin\": 667212,\n \"end\": 667226\n },\n \"text\": \"Total revenue\",\n \"text_normalized\": \"Total revenue\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n },\n {\n \"cell_id\": \"rowHeader-668801-668810\",\n \"location\": {\n \"begin\": 668801,\n \"end\": 668810\n },\n \"text\": \"Americas\",\n \"text_normalized\": \"Americas\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n },\n {\n \"cell_id\": \"rowHeader-670386-670412\",\n \"location\": {\n \"begin\": 670386,\n \"end\": 670412\n },\n \"text\": \"Europe/Middle East/Africa\",\n \"text_normalized\": \"Europe/Middle East/Africa\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n },\n {\n \"cell_id\": \"rowHeader-671979-671992\",\n \"location\": {\n \"begin\": 671979,\n \"end\": 671992\n },\n \"text\": \"Asia Pacific\",\n \"text_normalized\": \"Asia Pacific\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n }\n ],\n \"column_headers\": [\n {\n \"cell_id\": \"colHeader-664705-664706\",\n \"location\": {\n \"begin\": 664705,\n \"end\": 664706\n },\n \"text\": \"\",\n \"text_normalized\": \"\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 1,\n \"column_index_end\": 1\n },\n {\n \"cell_id\": \"colHeader-664770-664771\",\n \"location\": {\n \"begin\": 664770,\n \"end\": 664771\n },\n \"text\": \"\",\n \"text_normalized\": \"\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 2,\n \"column_index_end\": 2\n },\n {\n \"cell_id\": \"colHeader-664835-664836\",\n \"location\": {\n \"begin\": 664835,\n \"end\": 664836\n },\n \"text\": \"\",\n \"text_normalized\": \"\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 3,\n \"column_index_end\": 3\n },\n {\n \"cell_id\": \"colHeader-664900-664901\",\n \"location\": {\n \"begin\": 664900,\n \"end\": 664901\n },\n \"text\": \"\",\n \"text_normalized\": \"\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 4,\n \"column_index_end\": 4\n },\n {\n \"cell_id\": \"colHeader-664965-664966\",\n \"location\": {\n \"begin\": 664965,\n \"end\": 664966\n },\n \"text\": \"\",\n \"text_normalized\": \"\",\n \"row_index_begin\": 0,\n \"row_index_end\": 0,\n \"column_index_begin\": 5,\n \"column_index_end\": 5\n },\n {\n \"cell_id\": \"colHeader-665217-665249\",\n \"location\": {\n \"begin\": 665217,\n \"end\": 665249\n },\n \"text\": \"For the year ended December 31:\",\n \"text_normalized\": \"For the year ended December 31:\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 0,\n \"column_index_end\": 0\n },\n {\n \"cell_id\": \"colHeader-665513-665518\",\n \"location\": {\n \"begin\": 665513,\n \"end\": 665518\n },\n \"text\": \"2019\",\n \"text_normalized\": \"2019\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 1,\n \"column_index_end\": 1\n },\n {\n \"cell_id\": \"colHeader-665788-665793\",\n \"location\": {\n \"begin\": 665788,\n \"end\": 665793\n },\n \"text\": \"2018\",\n \"text_normalized\": \"2018\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 2,\n \"column_index_end\": 2\n },\n {\n \"cell_id\": \"colHeader-666061-666087\",\n \"location\": {\n \"begin\": 666061,\n \"end\": 666087\n },\n \"text\": \"Yr.-to-Yr. Percent Change\",\n \"text_normalized\": \"Yr.-to-Yr. Percent Change\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 3,\n \"column_index_end\": 3\n },\n {\n \"cell_id\": \"colHeader-666356-666404\",\n \"location\": {\n \"begin\": 666356,\n \"end\": 666404\n },\n \"text\": \"Yr.-to-Yr. Percent Change Adjusted for Currency\",\n \"text_normalized\": \"Yr.-to-Yr. Percent Change Adjusted for Currency\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 4,\n \"column_index_end\": 4\n },\n {\n \"cell_id\": \"colHeader-666675-666948\",\n \"location\": {\n \"begin\": 666675,\n \"end\": 666948\n },\n \"text\": \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\",\n \"text_normalized\": \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\",\n \"row_index_begin\": 1,\n \"row_index_end\": 1,\n \"column_index_begin\": 5,\n \"column_index_end\": 5\n }\n ],\n \"body_cells\": [\n {\n \"cell_id\": \"bodyCell-667480-667488\",\n \"location\": {\n \"begin\": 667480,\n \"end\": 667488\n },\n \"text\": \"$77,147\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 1,\n \"column_index_end\": 1,\n \"row_header_ids\": [\n \"rowHeader-667212-667226\"\n ],\n \"row_header_texts\": [\n \"Total revenue\"\n ],\n \"row_header_texts_normalized\": [\n \"Total revenue\"\n ],\n \"column_header_ids\": [\n \"colHeader-664705-664706\",\n \"colHeader-665513-665518\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2019\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2019\"\n ],\n \"attributes\": [\n {\n \"type\": \"Currency\",\n \"text\": \"$77,147\",\n \"location\": {\n \"begin\": 667480,\n \"end\": 667487\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-667744-667752\",\n \"location\": {\n \"begin\": 667744,\n \"end\": 667752\n },\n \"text\": \"$79,591\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 2,\n \"column_index_end\": 2,\n \"row_header_ids\": [\n \"rowHeader-667212-667226\"\n ],\n \"row_header_texts\": [\n \"Total revenue\"\n ],\n \"row_header_texts_normalized\": [\n \"Total revenue\"\n ],\n \"column_header_ids\": [\n \"colHeader-664770-664771\",\n \"colHeader-665788-665793\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2018\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2018\"\n ],\n \"attributes\": [\n {\n \"type\": \"Currency\",\n \"text\": \"$79,591\",\n \"location\": {\n \"begin\": 667744,\n \"end\": 667751\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-668006-668014\",\n \"location\": {\n \"begin\": 668006,\n \"end\": 668014\n },\n \"text\": \"(3.1 )%\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 3,\n \"column_index_end\": 3,\n \"row_header_ids\": [\n \"rowHeader-667212-667226\"\n ],\n \"row_header_texts\": [\n \"Total revenue\"\n ],\n \"row_header_texts_normalized\": [\n \"Total revenue\"\n ],\n \"column_header_ids\": [\n \"colHeader-664835-664836\",\n \"colHeader-666061-666087\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"3.1\",\n \"location\": {\n \"begin\": 668007,\n \"end\": 668010\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-668266-668273\",\n \"location\": {\n \"begin\": 668266,\n \"end\": 668273\n },\n \"text\": \"(1.0)%\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 4,\n \"column_index_end\": 4,\n \"row_header_ids\": [\n \"rowHeader-667212-667226\"\n ],\n \"row_header_texts\": [\n \"Total revenue\"\n ],\n \"row_header_texts_normalized\": [\n \"Total revenue\"\n ],\n \"column_header_ids\": [\n \"colHeader-664900-664901\",\n \"colHeader-666356-666404\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"1.0\",\n \"location\": {\n \"begin\": 668267,\n \"end\": 668270\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-668530-668535\",\n \"location\": {\n \"begin\": 668530,\n \"end\": 668535\n },\n \"text\": \"0.2%\",\n \"row_index_begin\": 2,\n \"row_index_end\": 2,\n \"column_index_begin\": 5,\n \"column_index_end\": 5,\n \"row_header_ids\": [\n \"rowHeader-667212-667226\"\n ],\n \"row_header_texts\": [\n \"Total revenue\"\n ],\n \"row_header_texts_normalized\": [\n \"Total revenue\"\n ],\n \"column_header_ids\": [\n \"colHeader-664965-664966\",\n \"colHeader-666675-666948\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Percentage\",\n \"text\": \"0.2%\",\n \"location\": {\n \"begin\": 668530,\n \"end\": 668534\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-669065-669073\",\n \"location\": {\n \"begin\": 669065,\n \"end\": 669073\n },\n \"text\": \"$36,274\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 1,\n \"column_index_end\": 1,\n \"row_header_ids\": [\n \"rowHeader-668801-668810\"\n ],\n \"row_header_texts\": [\n \"Americas\"\n ],\n \"row_header_texts_normalized\": [\n \"Americas\"\n ],\n \"column_header_ids\": [\n \"colHeader-664705-664706\",\n \"colHeader-665513-665518\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2019\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2019\"\n ],\n \"attributes\": [\n {\n \"type\": \"Currency\",\n \"text\": \"$36,274\",\n \"location\": {\n \"begin\": 669065,\n \"end\": 669072\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-669330-669338\",\n \"location\": {\n \"begin\": 669330,\n \"end\": 669338\n },\n \"text\": \"$36,994\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 2,\n \"column_index_end\": 2,\n \"row_header_ids\": [\n \"rowHeader-668801-668810\"\n ],\n \"row_header_texts\": [\n \"Americas\"\n ],\n \"row_header_texts_normalized\": [\n \"Americas\"\n ],\n \"column_header_ids\": [\n \"colHeader-664770-664771\",\n \"colHeader-665788-665793\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2018\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2018\"\n ],\n \"attributes\": [\n {\n \"type\": \"Currency\",\n \"text\": \"$36,994\",\n \"location\": {\n \"begin\": 669330,\n \"end\": 669337\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-669591-669598\",\n \"location\": {\n \"begin\": 669591,\n \"end\": 669598\n },\n \"text\": \"(1.9)%\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 3,\n \"column_index_end\": 3,\n \"row_header_ids\": [\n \"rowHeader-668801-668810\"\n ],\n \"row_header_texts\": [\n \"Americas\"\n ],\n \"row_header_texts_normalized\": [\n \"Americas\"\n ],\n \"column_header_ids\": [\n \"colHeader-664835-664836\",\n \"colHeader-666061-666087\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"1.9\",\n \"location\": {\n \"begin\": 669592,\n \"end\": 669595\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-669853-669860\",\n \"location\": {\n \"begin\": 669853,\n \"end\": 669860\n },\n \"text\": \"(1.1)%\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 4,\n \"column_index_end\": 4,\n \"row_header_ids\": [\n \"rowHeader-668801-668810\"\n ],\n \"row_header_texts\": [\n \"Americas\"\n ],\n \"row_header_texts_normalized\": [\n \"Americas\"\n ],\n \"column_header_ids\": [\n \"colHeader-664900-664901\",\n \"colHeader-666356-666404\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"1.1\",\n \"location\": {\n \"begin\": 669854,\n \"end\": 669857\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-670116-670121\",\n \"location\": {\n \"begin\": 670116,\n \"end\": 670121\n },\n \"text\": \"0.8%\",\n \"row_index_begin\": 3,\n \"row_index_end\": 3,\n \"column_index_begin\": 5,\n \"column_index_end\": 5,\n \"row_header_ids\": [\n \"rowHeader-668801-668810\"\n ],\n \"row_header_texts\": [\n \"Americas\"\n ],\n \"row_header_texts_normalized\": [\n \"Americas\"\n ],\n \"column_header_ids\": [\n \"colHeader-664965-664966\",\n \"colHeader-666675-666948\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Percentage\",\n \"text\": \"0.8%\",\n \"location\": {\n \"begin\": 670116,\n \"end\": 670120\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-670663-670670\",\n \"location\": {\n \"begin\": 670663,\n \"end\": 670670\n },\n \"text\": \"24,443\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 1,\n \"column_index_end\": 1,\n \"row_header_ids\": [\n \"rowHeader-670386-670412\"\n ],\n \"row_header_texts\": [\n \"Europe/Middle East/Africa\"\n ],\n \"row_header_texts_normalized\": [\n \"Europe/Middle East/Africa\"\n ],\n \"column_header_ids\": [\n \"colHeader-664705-664706\",\n \"colHeader-665513-665518\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2019\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2019\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"24,443\",\n \"location\": {\n \"begin\": 670663,\n \"end\": 670669\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-670924-670931\",\n \"location\": {\n \"begin\": 670924,\n \"end\": 670931\n },\n \"text\": \"25,491\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 2,\n \"column_index_end\": 2,\n \"row_header_ids\": [\n \"rowHeader-670386-670412\"\n ],\n \"row_header_texts\": [\n \"Europe/Middle East/Africa\"\n ],\n \"row_header_texts_normalized\": [\n \"Europe/Middle East/Africa\"\n ],\n \"column_header_ids\": [\n \"colHeader-664770-664771\",\n \"colHeader-665788-665793\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2018\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2018\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"25,491\",\n \"location\": {\n \"begin\": 670924,\n \"end\": 670930\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-671187-671193\",\n \"location\": {\n \"begin\": 671187,\n \"end\": 671193\n },\n \"text\": \"(4.1)\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 3,\n \"column_index_end\": 3,\n \"row_header_ids\": [\n \"rowHeader-670386-670412\"\n ],\n \"row_header_texts\": [\n \"Europe/Middle East/Africa\"\n ],\n \"row_header_texts_normalized\": [\n \"Europe/Middle East/Africa\"\n ],\n \"column_header_ids\": [\n \"colHeader-664835-664836\",\n \"colHeader-666061-666087\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"4.1\",\n \"location\": {\n \"begin\": 671188,\n \"end\": 671191\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-671449-671453\",\n \"location\": {\n \"begin\": 671449,\n \"end\": 671453\n },\n \"text\": \"0.4\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 4,\n \"column_index_end\": 4,\n \"row_header_ids\": [\n \"rowHeader-670386-670412\"\n ],\n \"row_header_texts\": [\n \"Europe/Middle East/Africa\"\n ],\n \"row_header_texts_normalized\": [\n \"Europe/Middle East/Africa\"\n ],\n \"column_header_ids\": [\n \"colHeader-664900-664901\",\n \"colHeader-666356-666404\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"0.4\",\n \"location\": {\n \"begin\": 671449,\n \"end\": 671452\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-671711-671715\",\n \"location\": {\n \"begin\": 671711,\n \"end\": 671715\n },\n \"text\": \"1.3\",\n \"row_index_begin\": 4,\n \"row_index_end\": 4,\n \"column_index_begin\": 5,\n \"column_index_end\": 5,\n \"row_header_ids\": [\n \"rowHeader-670386-670412\"\n ],\n \"row_header_texts\": [\n \"Europe/Middle East/Africa\"\n ],\n \"row_header_texts_normalized\": [\n \"Europe/Middle East/Africa\"\n ],\n \"column_header_ids\": [\n \"colHeader-664965-664966\",\n \"colHeader-666675-666948\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"1.3\",\n \"location\": {\n \"begin\": 671711,\n \"end\": 671714\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-672244-672251\",\n \"location\": {\n \"begin\": 672244,\n \"end\": 672251\n },\n \"text\": \"16,430\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 1,\n \"column_index_end\": 1,\n \"row_header_ids\": [\n \"rowHeader-671979-671992\"\n ],\n \"row_header_texts\": [\n \"Asia Pacific\"\n ],\n \"row_header_texts_normalized\": [\n \"Asia Pacific\"\n ],\n \"column_header_ids\": [\n \"colHeader-664705-664706\",\n \"colHeader-665513-665518\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2019\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2019\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"16,430\",\n \"location\": {\n \"begin\": 672244,\n \"end\": 672250\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-672505-672512\",\n \"location\": {\n \"begin\": 672505,\n \"end\": 672512\n },\n \"text\": \"17,106\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 2,\n \"column_index_end\": 2,\n \"row_header_ids\": [\n \"rowHeader-671979-671992\"\n ],\n \"row_header_texts\": [\n \"Asia Pacific\"\n ],\n \"row_header_texts_normalized\": [\n \"Asia Pacific\"\n ],\n \"column_header_ids\": [\n \"colHeader-664770-664771\",\n \"colHeader-665788-665793\"\n ],\n \"column_header_texts\": [\n \"\",\n \"2018\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"2018\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"17,106\",\n \"location\": {\n \"begin\": 672505,\n \"end\": 672511\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-672767-672773\",\n \"location\": {\n \"begin\": 672767,\n \"end\": 672773\n },\n \"text\": \"(4.0)\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 3,\n \"column_index_end\": 3,\n \"row_header_ids\": [\n \"rowHeader-671979-671992\"\n ],\n \"row_header_texts\": [\n \"Asia Pacific\"\n ],\n \"row_header_texts_normalized\": [\n \"Asia Pacific\"\n ],\n \"column_header_ids\": [\n \"colHeader-664835-664836\",\n \"colHeader-666061-666087\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"4.0\",\n \"location\": {\n \"begin\": 672768,\n \"end\": 672771\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-673028-673034\",\n \"location\": {\n \"begin\": 673028,\n \"end\": 673034\n },\n \"text\": \"(3.0)\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 4,\n \"column_index_end\": 4,\n \"row_header_ids\": [\n \"rowHeader-671979-671992\"\n ],\n \"row_header_texts\": [\n \"Asia Pacific\"\n ],\n \"row_header_texts_normalized\": [\n \"Asia Pacific\"\n ],\n \"column_header_ids\": [\n \"colHeader-664900-664901\",\n \"colHeader-666356-666404\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"3.0\",\n \"location\": {\n \"begin\": 673029,\n \"end\": 673032\n }\n }\n ]\n },\n {\n \"cell_id\": \"bodyCell-673290-673296\",\n \"location\": {\n \"begin\": 673290,\n \"end\": 673296\n },\n \"text\": \"(2.5)\",\n \"row_index_begin\": 5,\n \"row_index_end\": 5,\n \"column_index_begin\": 5,\n \"column_index_end\": 5,\n \"row_header_ids\": [\n \"rowHeader-671979-671992\"\n ],\n \"row_header_texts\": [\n \"Asia Pacific\"\n ],\n \"row_header_texts_normalized\": [\n \"Asia Pacific\"\n ],\n \"column_header_ids\": [\n \"colHeader-664965-664966\",\n \"colHeader-666675-666948\"\n ],\n \"column_header_texts\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"column_header_texts_normalized\": [\n \"\",\n \"Yr.-to-Yr. Percent Change\\n Excluding Divested Businesses And Adjusted for Currency\"\n ],\n \"attributes\": [\n {\n \"type\": \"Number\",\n \"text\": \"2.5\",\n \"location\": {\n \"begin\": 673291,\n \"end\": 673294\n }\n }\n ]\n }\n ],\n \"contexts\": [\n {\n \"location\": {\n \"begin\": 664050,\n \"end\": 664171\n },\n \"text\": \"In addition to the revenue presentation by reportable segment, we also measure revenue performance on a geographic basis.\"\n },\n {\n \"location\": {\n \"begin\": 673587,\n \"end\": 674274\n },\n \"text\": \"Total revenue of $77,147 million in 2019 decreased 3.1 percent year to year as reported (1 percent adjusted for currency), but increased 0.2 percent excluding divested businesses and adjusted for currency.\"\n },\n {\n \"location\": {\n \"begin\": 674465,\n \"end\": 674948\n },\n \"text\": \"Americas revenue decreased 1.9 percent as reported (1 percent adjusted for currency), but grew 1 percent excluding divested businesses and adjusted for currency.\"\n }\n ],\n \"key_value_pairs\": []\n}\n" ] ], [ [ "That raw output contains everything Allison needs to extract the revenue figures from this document, but it's in a format that's cumbersome to deal with. So Allison uses Text Extensions for Pandas to convert this JSON into a collection of Pandas DataFrames. These DataFrames encode information about the row headers, column headers, and cells that make up the table.", "_____no_output_____" ] ], [ [ "table_data = tp.io.watson.tables.parse_response(ibm_2019_json,\n select_table=\"Geographic Revenue\")\ntable_data.keys()", "_____no_output_____" ], [ "table_data[\"body_cells\"].head(5)", "_____no_output_____" ] ], [ [ "Text Extensions for Pandas can convert these DataFrames into a single Pandas DataFrame that matches the layout of the original table in the document. Allison calls the `make_table()` function to perform that conversion and inspects the output.", "_____no_output_____" ] ], [ [ "revenue_2019_df = tp.io.watson.tables.make_table(table_data)\nrevenue_2019_df", "_____no_output_____" ] ], [ [ "&nbsp; \n\nThe reconstructed dataframe looks good! Here's what the original table in the PDF document looked like:\n![Table: Geographic Revenue (from IBM 2019 annual report)](images/screenshot_table_2019.png)", "_____no_output_____" ], [ "If Allison just wanted to create a DataFrame of 2018/2019 revenue figures, her task would be done. But Allison wants to reconstruct ten years of revenue by geographic region. To do that, she will need to combine information from multiple documents. For tables like this one that have multiple levels of header information, this kind of integration is easier to perform over the \"exploded\" version of the table, where each cell in the table is represented a single row containing all the corresponding header values.\n\nAllison passes the same table data from the 2019 report through the Text Extensions for Pandas function `make_exploded_df()` to produce the exploded represention of the table:", "_____no_output_____" ] ], [ [ "exploded_df, row_header_names, col_header_names = (\n tp.io.watson.tables.make_exploded_df(table_data, col_explode_by=\"concat\"))\nexploded_df", "_____no_output_____" ] ], [ [ "This exploded version of the table is the exact same data, just represented in a different way. If she wants, Allison can convert it back to the format from the original document by calling `pandas.DataFrame.pivot()`:", "_____no_output_____" ] ], [ [ "exploded_df.pivot(index=\"row_header_texts_0\", columns=\"column_header_texts\", values=\"text\")", "_____no_output_____" ] ], [ [ "But because she is about to merge this DataFrame with similar data from other documents, Allison keeps the data in exploded format for now. \n\nAllison's next task is to write some Pandas transformations that will clean and reformat the DataFrame for each source table prior to merging them all together. She uses the 2019 report's data as a test case for creating this code. The first step is to convert the cell values in the Watson Discovery output from text to numeric values. Text Extensions for Pandas includes a more robust version of [`pandas.to_numeric()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_numeric.html) that can handle common idioms for representing currencies and percentages. Allison uses this function, called `convert_cols_to_numeric()`, to convert all the cell values to numbers. She adds a new column \"value\" to her DataFrame to hold these numbers.", "_____no_output_____" ] ], [ [ "exploded_df[\"value\"] = \\\n tp.io.watson.tables.convert_cols_to_numeric(exploded_df[[\"text\"]])\nexploded_df", "_____no_output_____" ] ], [ [ "Now all the cell values have been converted to floating-point numbers, but only some of these numbers represent revenue. Looking at the 2019 data, Allison can see that the revenue numbers have 4-digit years in their column headers. So she filters the DataFrame down to just those rows with 4-digit numbers in the \"column_header_texts\" column.", "_____no_output_____" ] ], [ [ "rows_to_retain = exploded_df[exploded_df[\"column_header_texts\"].str.fullmatch(\"\\d{4}\")].copy()\nrows_to_retain", "_____no_output_____" ] ], [ [ "That's looking good! Now Allison drops the unnecessary columns and gives some more friendly names to the columns that remain.", "_____no_output_____" ] ], [ [ "rows_to_retain.rename(\n columns={\n \"row_header_texts_0\": \"Region\",\n \"column_header_texts\": \"Year\",\n \"value\": \"Revenue\"\n })[[\"Year\", \"Region\", \"Revenue\"]]", "_____no_output_____" ] ], [ [ "The code from the last few cells worked to clean up the 2019 data, so Allison copies and pastes that code into a Python function:", "_____no_output_____" ] ], [ [ "def dataframe_for_file(filename: str):\n with open(f\"{FILES_DIR}/{filename}\", \"r\") as f:\n json_output = json.load(f)\n table_data = tp.io.watson.tables.parse_response(json_output,\n select_table=\"Geographic Revenue\")\n exploded_df, _, _ = tp.io.watson.tables.make_exploded_df(\n table_data, col_explode_by=\"concat\")\n rows_to_retain = exploded_df[exploded_df[\"column_header_texts\"].str.fullmatch(\"\\d{4}\")\n & (exploded_df[\"text\"].str.len() > 0)].copy()\n rows_to_retain[\"value\"] = tp.io.watson.tables.convert_cols_to_numeric(\n rows_to_retain[[\"text\"]])\n rows_to_retain[\"file\"] = filename\n return (\n rows_to_retain.rename(columns={\n \"row_header_texts_0\": \"Region\", \"column_header_texts\": \"Year\", \"value\": \"Revenue\"})\n [[\"Year\", \"Region\", \"Revenue\"]]\n )", "_____no_output_____" ] ], [ [ "Then she calls that function on the Watson Discovery output from the 2019 annual report to verify that it produces the same answer. ", "_____no_output_____" ] ], [ [ "dataframe_for_file(\"2019.json\")", "_____no_output_____" ] ], [ [ "Looks good! Time to run the same function over an entire stack of reports. Allison puts the names of all her Watson Discovery output files into a single Python list.", "_____no_output_____" ] ], [ [ "all_files = sorted([f for f in os.listdir(FILES_DIR) if f.endswith(\".json\")])\nall_files", "_____no_output_____" ] ], [ [ "Note that the annual reports for 2011 and 2014 aren't in the collection of files that Allison has. But that's ok; each report contains the previous year's figures, so Allison can reconstruct the missing data from adjacent years.\n\nAllison calls her `dataframe_for_file()` function on each of the files, then concatenates all of the resulting Pandas DataFrames into a single large DataFrame.", "_____no_output_____" ] ], [ [ "revenue_df = pd.concat([dataframe_for_file(f) for f in all_files])\nrevenue_df", "_____no_output_____" ] ], [ [ "Allison can see that the first four lines of this DataFrame contain total worldwide revenue; and that this total occurred\nunder different names in different documents. Allison is interested in the fine-grained revenue figures, not\nthe totals, so she needs to filter out all these rows with worldwide revenue.\n\nWhat are all the names of geographic regions that IBM annual reports have used over the last ten years?", "_____no_output_____" ] ], [ [ "revenue_df[[\"Region\"]].drop_duplicates()", "_____no_output_____" ] ], [ [ "It looks like all the worldwide revenue figures are under some variation of \"Geographies\" or \"Total revenue\". \nAllison uses Pandas' string matching facilities to filter out the rows whose \"Region\" column contains the \nwords \"geographies\" or \"total\".", "_____no_output_____" ] ], [ [ "geo_revenue_df = (\n revenue_df[~( # \"~\" operator inverts a Pandas selection condition\n (revenue_df[\"Region\"].str.contains(\"geographies\", case=False))\n | (revenue_df[\"Region\"].str.contains(\"total\", case=False))\n )]).copy()\ngeo_revenue_df", "_____no_output_____" ] ], [ [ "Now every row contains a regional revenue figure. What are the regions represented? ", "_____no_output_____" ] ], [ [ "geo_revenue_df[[\"Region\"]].drop_duplicates()", "_____no_output_____" ] ], [ [ "That's strange &mdash; one of the regions is \"Asia Pacifi c\", with a space before the last \"c\". It looks like the PDF conversion on the 2016 annual report added an extra space. Allison uses the function `pandas.Series.replace()` to correct that issue.", "_____no_output_____" ] ], [ [ "geo_revenue_df[\"Region\"] = geo_revenue_df[\"Region\"].replace(\"Asia Pacifi c\", \"Asia Pacific\")\ngeo_revenue_df", "_____no_output_____" ] ], [ [ "Allison inspects the time series of revenue for the \"Americas\" region:", "_____no_output_____" ] ], [ [ "geo_revenue_df[geo_revenue_df[\"Region\"] == \"Americas\"].sort_values(\"Year\")", "_____no_output_____" ] ], [ [ "Every year from 2008 to 2019 is present, but many of the years appear twice. That's to be expected, \nsince each of the annual reports contains two years of geographical revenue figures.\nAllison drops the duplicate values using `pandas.DataFrame.drop_duplicates()`.", "_____no_output_____" ] ], [ [ "geo_revenue_df.drop_duplicates([\"Region\", \"Year\"], inplace=True)\ngeo_revenue_df", "_____no_output_____" ] ], [ [ "Now Allison has a clean and complete set of revenue figures by geographical region for the years 2008-2019.\nShe uses Pandas' `pandas.DataFrame.pivot()` method to convert this data into a compact table.", "_____no_output_____" ] ], [ [ "revenue_table = geo_revenue_df.pivot(index=\"Region\", columns=\"Year\", values=\"Revenue\")\nrevenue_table", "_____no_output_____" ] ], [ [ "Then she uses that table to produce a plot of revenue by region over that 11-year period.", "_____no_output_____" ] ], [ [ "plt.rcParams.update({'font.size': 16})\nrevenue_table.transpose().plot(title=\"Revenue by Geographic Region\",\n ylabel=\"Revenue (Millions of US$)\",\n figsize=(12, 7), ylim=(0, 50000))", "/Users/freiss/opt/miniconda3/envs/pd/lib/python3.7/site-packages/pandas/plotting/_matplotlib/core.py:1235: UserWarning: FixedFormatter should only be used together with FixedLocator\n ax.set_xticklabels(xticklabels)\n" ] ], [ [ "Now Allison has a clear picture of the detailed revenue data that was hidden inside those 1500 pages of PDF\nfiles. As she works on her analyst report, Allison can use the same process to extract DataFrames for\nother financial metrics too!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4af81573e0921f44589e89c7f3a848412192b3d6
148,590
ipynb
Jupyter Notebook
Copy_of_basics.ipynb
ManojKesani/100-Days-Of-ML-Code
1d431f89ce03a908e26c0d617ba7e1d3fc832fe0
[ "MIT" ]
null
null
null
Copy_of_basics.ipynb
ManojKesani/100-Days-Of-ML-Code
1d431f89ce03a908e26c0d617ba7e1d3fc832fe0
[ "MIT" ]
null
null
null
Copy_of_basics.ipynb
ManojKesani/100-Days-Of-ML-Code
1d431f89ce03a908e26c0d617ba7e1d3fc832fe0
[ "MIT" ]
null
null
null
32.837569
1,051
0.501851
[ [ [ "<a href=\"https://colab.research.google.com/github/ManojKesani/100-Days-Of-ML-Code/blob/master/Copy_of_basics.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "<!--NAVIGATION-->\n\n\n<a href=\"https://colab.research.google.com/github/saskeli/x/blob/master/basics.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n\n| - | - | - |\n|-------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|\n| [Exercise 1 (hello world)](<#Exercise-1-(hello-world&#41;>) | [Exercise 2 (compliment)](<#Exercise-2-(compliment&#41;>) | [Exercise 3 (multiplication)](<#Exercise-3-(multiplication&#41;>) |\n| [Exercise 4 (multiplication table)](<#Exercise-4-(multiplication-table&#41;>) | [Exercise 5 (two dice)](<#Exercise-5-(two-dice&#41;>) | [Exercise 6 (triple square)](<#Exercise-6-(triple-square&#41;>) |\n| [Exercise 7 (areas of shapes)](<#Exercise-7-(areas-of-shapes&#41;>) | [Exercise 8 (solve quadratic)](<#Exercise-8-(solve-quadratic&#41;>) | [Exercise 9 (merge)](<#Exercise-9-(merge&#41;>) |\n| [Exercise 10 (detect ranges)](<#Exercise-10-(detect-ranges&#41;>) | [Exercise 11 (interleave)](<#Exercise-11-(interleave&#41;>) | [Exercise 12 (distinct characters)](<#Exercise-12-(distinct-characters&#41;>) |\n| [Exercise 13 (reverse dictionary)](<#Exercise-13-(reverse-dictionary&#41;>) | [Exercise 14 (find matching)](<#Exercise-14-(find-matching&#41;>) | [Exercise 15 (two dice comprehension)](<#Exercise-15-(two-dice-comprehension&#41;>) |\n| [Exercise 16 (transform)](<#Exercise-16-(transform&#41;>) | [Exercise 17 (positive list)](<#Exercise-17-(positive-list&#41;>) | [Exercise 18 (acronyms)](<#Exercise-18-(acronyms&#41;>) |\n| [Exercise 19 (sum equation)](<#Exercise-19-(sum-equation&#41;>) | [Exercise 20 (usemodule)](<#Exercise-20-(usemodule&#41;>) | |\n\n", "_____no_output_____" ], [ "# Python", "_____no_output_____" ], [ "## Basic concepts", "_____no_output_____" ], [ "### Basic input and output", "_____no_output_____" ], [ "The traditional \"Hello, world\" program is very simple in Python. You can run the program by selecting the cell by mouse and pressing control-enter on keyboard. Try editing the string in the quotes and rerunning the program.", "_____no_output_____" ] ], [ [ "print(\"Hello world2!\")", "Hello world2!\n" ] ], [ [ "Multiple strings can be printed. By default, they are concatenated with a space:", "_____no_output_____" ] ], [ [ "print(\"Hello,\", \"John!\", \"How are you?\")", "Hello, John! How are you?\n" ] ], [ [ "In the print function, numerical expression are first evaluated and then automatically converted to strings. Subsequently the strings are concatenated with spaces:", "_____no_output_____" ] ], [ [ "print(1, \"plus\", 2, \"equals\", 1+2)", "1 plus 2 equals 3\n" ] ], [ [ "Reading textual input from the user can be achieved with the input function. The input function is given a string parameter, which is printed and prompts the user to give input. In the example below, the string entered by the user is stored the variable `name`. Try executing the program in the interactive notebook by pressing control-enter!", "_____no_output_____" ] ], [ [ "name=input(\"Give me your name: \")\nprint(\"Hello,\", name)", "Give me your name: Jarkko\nHello, Jarkko\n" ] ], [ [ "### Indentation", "_____no_output_____" ], [ "Repetition is possible with the for loop. Note that the body of for loop is indented with a tabulator or four spaces.\nUnlike in some other languages, braces are not needed to denote the body of the loop. When the indentation stops, the body of the loop ends.", "_____no_output_____" ] ], [ [ "for i in range(3):\n print(\"Hello\")\nprint(\"Bye!\")", "Hello\nHello\nHello\nBye!\n" ] ], [ [ "Indentation applies to other compound statements as well, such as bodies of functions, different branches of an if statement, and while loops. We shall see examples of these later.", "_____no_output_____" ], [ "The `range(3)` expression above actually results with the sequence of integers 0, 1, and 2. So, the range is a half-open interval with the end point excluded from the range. In general, expression range(n) gives integers 0, 1, 2, ..., n-1. Modify the above program to make it also print the value of variable i at each iteration. Rerun the code with control-enter.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 1 (hello world)</div>\nFill in the missing piece in the solution stub file `hello_world.py` in folder `src` to make it print the following:\n\n`Hello, world!`\n\nMake sure you use correct indenting. You can run it with command `python3 src/hello_world.py`.\nIf the output looks good, then you can test it with command `tmc test`. If the tests pass,\nsubmit your solution to the server with command `tmc submit`.\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 2 (compliment)</div>\nFill in the stub solution to make the program work as follows. The program should ask the user for an input, and the print an answer as the examples below show.\n\n```\nWhat country are you from? Sweden\nI have heard that Sweden is a beautiful country.\n\nWhat country are you from? Chile \nI have heard that Chile is a beautiful country.\n```\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 3 (multiplication)</div> \nMake a program that gives the following output. You should use a for loop in your solution.\n\n```\n4 multiplied by 0 is 0\n4 multiplied by 1 is 4\n4 multiplied by 2 is 8\n4 multiplied by 3 is 12\n4 multiplied by 4 is 16\n4 multiplied by 5 is 20\n4 multiplied by 6 is 24\n4 multiplied by 7 is 28\n4 multiplied by 8 is 32\n4 multiplied by 9 is 36\n4 multiplied by 10 is 40\n```\n<hr/>", "_____no_output_____" ], [ "### Variables and data types\n\nWe saw already earlier that assigning a value to variable is very simple:", "_____no_output_____" ] ], [ [ "a=1\nprint(a)", "1\n" ] ], [ [ "Note that we did not need to introduce the variable `a` in any way. No type was given for the variable. Python automatically detected that the type of `a` must be `int` (an integer). We can query the type of a variable with the builtin function `type`:", "_____no_output_____" ] ], [ [ "type(a)", "_____no_output_____" ] ], [ [ "Note also that the type of a variable is not fixed:", "_____no_output_____" ] ], [ [ "a=\"some text\"\ntype(a)", "_____no_output_____" ] ], [ [ "In Python the type of a variable is not attached to the name of the variable, like in C for instance, but instead with the actual value. This is called dynamic typing.", "_____no_output_____" ], [ "![typing.svg](https://github.com/csmastersUH/data_analysis_with_python_2020/blob/master/typing.svg?raw=1)", "_____no_output_____" ], [ "We say that a variable is a name that *refers* to a value or an object, and the assignment operator *binds* a variable name to a value.", "_____no_output_____" ], [ "The basic data types in Python are: `int`, `float`, `complex`, `str` (a string), `bool` (a boolean with values `True` and `False`), and `bytes`. Below are few examples of their use.", "_____no_output_____" ] ], [ [ "i=5\nf=1.5\nb = i==4\nprint(\"Result of the comparison:\", b)\nc=0+2j # Note that j denotes the imaginary unit of complex numbers.\nprint(\"Complex multiplication:\", c*c)\ns=\"conca\" + \"tenation\"\nprint(s)", "Result of the comparison: False\nComplex multiplication: (-4+0j)\nconcatenation\n" ] ], [ [ "The names of the types act as conversion operators between types:", "_____no_output_____" ] ], [ [ "print(int(-2.8))\nprint(float(2))\nprint(int(\"123\"))\nprint(bool(-2), bool(0)) # Zero is interpreted as False\nprint(str(234))", "-2\n2.0\n123\nTrue False\n234\n" ] ], [ [ "A *byte* is a unit of information that can represent numbers between 0 and 255. A byte consists of 8 *bits*, which can in turn represent either 0 or 1. All the data that is stored on disks or transmitted across the internet are sequences of bytes. Normally we don't have to care about bytes, since our strings and other variables are automatically converted to a sequence of bytes when needed to. An example of the correspondence between the usual data types and bytes is the characters in a string. A single character is encoded as a sequence of one or more bytes. For example, in the common [UTF-8](https://en.wikipedia.org/wiki/UTF-8) encoding the character `c` corresponds to the byte with integer value 99 and the character `ä` corresponds to sequence of bytes [195, 164]. An example conversion between characters and bytes:", "_____no_output_____" ] ], [ [ "b=\"ä\".encode(\"utf-8\") # Convert character(s) to a sequence of bytes\nprint(b) # Prints bytes in hexadecimal notation\nprint(list(b)) # Prints bytes in decimal notation", "b'\\xc3\\xa4'\n[195, 164]\n" ], [ "bytes.decode(b, \"utf-8\") # convert sequence of bytes to character(s)", "_____no_output_____" ] ], [ [ "During this course we don't have to care much about bytes, but in some cases, when loading data sets, we might have to specify the encoding if it deviates from the default one.", "_____no_output_____" ], [ "#### Creating strings\nA string is a sequence of characters commonly used to store input or output data in a program. The characters of a string are specified either between single (`'`) or double (`\"`) quotes. This optionality is useful if, for example, a string needs to contain a quotation mark:\n\"I don't want to go!\". You can also achieve this by *escaping* the quotation mark with the backslash: 'I don\\\\'t want to go'.\n\nThe string can also contain other escape sequences like `\\n` for newline and `\\t` for a tabulator. See [literals](https://docs.python.org/3/reference/lexical_analysis.html#literals) for a list of all escape sequences.", "_____no_output_____" ] ], [ [ "print(\"One\\tTwo\\nThree\\tFour\")", "One\tTwo\nThree\tFour\n" ] ], [ [ "A string containing newlines can be easily given within triple double or triple single quotes:", "_____no_output_____" ] ], [ [ "s=\"\"\"A string\nspanning over\nseveral lines\"\"\"", "_____no_output_____" ] ], [ [ "Although we can concatenate strings using the `+` operator, for effiency reasons, one should use the `join` method to concatenate larger number of strings:", "_____no_output_____" ] ], [ [ "a=\"first\"\nb=\"second\"\nprint(a+b)\nprint(\" \".join([a, b, b, a])) # More about the join method later\n", "firstsecond\nfirst second second first\n" ] ], [ [ "Sometimes printing by concatenation from pieces can be clumsy:", "_____no_output_____" ] ], [ [ "print(str(1) + \" plus \" + str(3) + \" is equal to \" + str(4))\n# slightly better\nprint(1, \"plus\", 3, \"is equal to\", 4)", "1 plus 3 is equal to 4\n1 plus 3 is equal to 4\n" ] ], [ [ "The multiple catenation and quotation characters break the flow of thought. *String interpolation* offers somewhat easier syntax.\n\nThere are multiple ways to do sting interpolation:\n\n* Python format strings\n* the `format` method\n* f-strings\n\nExamples of these can be seen below:", "_____no_output_____" ] ], [ [ "print(\"%i plus %i is equal to %i\" % (1, 3, 4)) # Format syntax\n\nprint(\"{} plus {} is equal to {}\".format(1, 3, 4)) # Format method\n\nprint(f\"{1} plus {3} is equal to {4}\") # f-string", "1 plus 3 is equal to 4\n1 plus 3 is equal to 4\n1 plus 3 is equal to 4\n" ] ], [ [ "The `i` format specifier in the format syntacs corresponds to integers and the specifier `f` corresponds to floats. When using f-strings or the `format` method, integers use `d` instead. In format strings specifiers can usually be omitted and are generally used only when specific formatting is required. For example in f-strings `f\"{4:3d}\"` would specify the number 4 left padded with spaces to 3 digits.\n\nIt is often useful to specify the number of decimals when printing floats:", "_____no_output_____" ] ], [ [ "print(\"%.1f %.2f %.3f\" % (1.6, 1.7, 1.8)) # Old style\nprint(\"{:.1f} {:.2f} {:.3f}\".format(1.6, 1.7, 1.8)) # newer style\nprint(f\"{1.6:.1f} {1.7:.2f} {1.8:.3f}\") # f-string", "1.6 1.70 1.800\n1.6 1.70 1.800\n1.6 1.70 1.800\n" ] ], [ [ "The specifier `s` is used for strings. An example:", "_____no_output_____" ] ], [ [ "print(\"%s concatenated with %s produces %s\" % (\"water\", \"melon\", \"water\"+\"melon\"))\nprint(\"{0} concatenated with {1} produces {0}{1}\".format(\"water\", \"melon\"))\nprint(f\"{'water'} concatenated with {'melon'} produces {'water' + 'melon'}\")", "water concatenated with melon produces watermelon\nwater concatenated with melon produces watermelon\nwater concatenated with melon produces watermelon\n" ] ], [ [ "Look [here](https://pyformat.info/#number) for more details about format specifiers, and for comparison between the old and new style of string interpolation.\n\nDifferent ways of string interpolation have different strengths and weaknesses. Generally choosing which to use is a matter of personal preference. On this course examples and model solutions will predominantly use f-strings and the `format` method.", "_____no_output_____" ], [ "### Expressions\nAn *expression* is a piece of Python code that results in a value. It consists of values combined together with *operators*. Values can be literals, such as `1`, `1.2`, `\"text\"`, or variables. Operators include arithmetics operators, comparison operators, function call, indexing, attribute references, among others. Below there are a few examples of expressions:", "_____no_output_____" ], [ "```\n1+2\n7/(2+0.1)\na\ncos(0)\nmylist[1]\nc > 0 and c !=1\n(1,2,3)\na<5\nobj.attr\n(-1)**2 == 1\n```", "_____no_output_____" ], [ "<div class=\"alert alert-warning\">Note that in Python the operator `//` performs integer division and operator `/` performs float division. The `**` operator denotes exponentiation. These operators might therefore behave differently than in many other common languages.</div>", "_____no_output_____" ], [ "As another example the following expression computes the kinetic energy of a non-rotating object:\n`0.5 * mass * velocity**2`", "_____no_output_____" ], [ "### Statements\nStatements are commands that have some effect. For example, a function call (that is not part of another expression) is a statement. Also, the variable assignment is a statement:", "_____no_output_____" ] ], [ [ "i = 5\ni = i+1 # This is a commong idiom to increment the value of i by one\ni += 1 # This is a short-hand for the above", "_____no_output_____" ] ], [ [ "Note that in Python there are no operators `++` or `--` unlike in some other languages.", "_____no_output_____" ], [ "It turns out that the operators `+ - * / // % & | ^ >> << **` have the corresponding *augmented assignment operators* `+= -= *= /= //= %= &= |= ^= >>= <<= **=`", "_____no_output_____" ], [ "Another large set of statements is the flow-control statements such as if-else, for and while loops. We will look into these in the next sections.", "_____no_output_____" ], [ "#### Loops for repetitive tasks\nIn Python we have two kinds of loops: `while` and `for`. We briefly saw the `for` loop earlier. Let's now look at the `while` loop. A `while` loop repeats a set of statements while a given condition holds. An example:", "_____no_output_____" ] ], [ [ "i=1\nwhile i*i < 1000:\n print(\"Square of\", i, \"is\", i*i)\n i = i + 1\nprint(\"Finished printing all the squares below 1000.\")", "Square of 1 is 1\nSquare of 2 is 4\nSquare of 3 is 9\nSquare of 4 is 16\nSquare of 5 is 25\nSquare of 6 is 36\nSquare of 7 is 49\nSquare of 8 is 64\nSquare of 9 is 81\nSquare of 10 is 100\nSquare of 11 is 121\nSquare of 12 is 144\nSquare of 13 is 169\nSquare of 14 is 196\nSquare of 15 is 225\nSquare of 16 is 256\nSquare of 17 is 289\nSquare of 18 is 324\nSquare of 19 is 361\nSquare of 20 is 400\nSquare of 21 is 441\nSquare of 22 is 484\nSquare of 23 is 529\nSquare of 24 is 576\nSquare of 25 is 625\nSquare of 26 is 676\nSquare of 27 is 729\nSquare of 28 is 784\nSquare of 29 is 841\nSquare of 30 is 900\nSquare of 31 is 961\nFinished printing all the squares below 1000.\n" ] ], [ [ "Note again that the body of the while statement was marked with the indentation.", "_____no_output_____" ], [ "Another way of repeating statements is with the `for` statement. An example", "_____no_output_____" ] ], [ [ "s=0\nfor i in [0,1,2,3,4,5,6,7,8,9]:\n s = s + i\nprint(\"The sum is\", s)", "The sum is 45\n" ] ], [ [ "The `for` loop executes the statements in the block as many times as there are elements in the given list. At each iteration the variable `i` refers to another value from the list in order. Instead of the giving the list explicitly as above, we could have used the *generator* `range(10)` which returns values from the sequence 0,1,...,9 as the for loop asks for a new value. In the most general form the `for` loop goes through all the elements in an *iterable*.\nBesides lists and generators there are other iterables. We will talk about iterables and generators later this week.", "_____no_output_____" ], [ "When one wants to iterate through all the elements in an iterable, then the `for` loop is a natural choice. But sometimes `while` loops offer cleaner solution. For instance, if we want\nto go through all Fibonacci numbers up till a given limit, then it is easier to do with a `while` loop.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 4 (multiplication table)</div>\n\nIn the `main` function print a multiplication table, which is shown below:\n```\n 1 2 3 4 5 6 7 8 9 10\n 2 4 6 8 10 12 14 16 18 20\n 3 6 9 12 15 18 21 24 27 30\n 4 8 12 16 20 24 28 32 36 40\n 5 10 15 20 25 30 35 40 45 50\n 6 12 18 24 30 36 42 48 54 60\n 7 14 21 28 35 42 49 56 63 70\n 8 16 24 32 40 48 56 64 72 80\n 9 18 27 36 45 54 63 72 81 90\n 10 20 30 40 50 60 70 80 90 100\n```\nFor example at row 4 and column 9 we have 4*9=36.\n\nUse two nested for loops to achive this. Note that you can use the following form to stop the `print` function from automatically starting a new line:", "_____no_output_____" ] ], [ [ "print(\"text\", end=\"\")\nprint(\"more text\")", "textmore text\n" ] ], [ [ "Print the numbers in a field with width four, so that the numbers are nicely aligned. For instructions on how adjust the field width refer to [pyformat.info](https://pyformat.info/#number_padding).\n<hr/>", "_____no_output_____" ], [ "#### Decision making with the if statement\nThe if-else statement works as can be expected.\nTry running the below cell by pressing control+enter.", "_____no_output_____" ] ], [ [ "x=input(\"Give an integer: \")\nx=int(x)\nif x >= 0:\n a=x\nelse:\n a=-x\nprint(\"The absolute value of %i is %i\" % (x, a))", "Give an integer: -1\nThe absolute value of -1 is 1\n" ] ], [ [ "The general from of an if-else statement is\n\n```\nif condition1:\n statement1_1\n statement1_2\n ...\nelif condition2:\n statement2_1\n statement2_2\n ...\n...\nelse:\n statementn_1\n statementn_2\n ...\n```", "_____no_output_____" ], [ "Another example:", "_____no_output_____" ] ], [ [ "c=float(input(\"Give a number: \"))\nif c > 0:\n print(\"c is positive\")\nelif c<0:\n print(\"c is negative\")\nelse:\n print(\"c is zero\")", "Give a number: 3\nc is positive\n" ] ], [ [ "#### Breaking and continuing loop\nBreaking the loop, when the wanted element is found, with the `break` statement:", "_____no_output_____" ] ], [ [ "l=[1,3,65,3,-1,56,-10]\nfor x in l:\n if x < 0:\n break\nprint(\"The first negative list element was\", x)", "The first negative list element was -1\n" ] ], [ [ "Stopping current iteration and continuing to the next one with the `continue` statement:", "_____no_output_____" ] ], [ [ "from math import sqrt, log\nl=[1,3,65,3,-1,56,-10]\nfor x in l:\n if x < 0:\n continue\n print(f\"Square root of {x} is {sqrt(x):.3f}\")\n print(f\"Natural logarithm of {x} is {log(x):.4f}\")", "Square root of 1 is 1.000\nNatural logarithm of 1 is 0.0000\nSquare root of 3 is 1.732\nNatural logarithm of 3 is 1.0986\nSquare root of 65 is 8.062\nNatural logarithm of 65 is 4.1744\nSquare root of 3 is 1.732\nNatural logarithm of 3 is 1.0986\nSquare root of 56 is 7.483\nNatural logarithm of 56 is 4.0254\n" ] ], [ [ "#### <div class=\"alert alert-info\">Exercise 5 (two dice)</div>\n\nLet us consider throwing two dice. (A dice can give a value between 1 and 6.) Use two nested `for`\nloops in the `main` function to iterate through all possible combinations the pair of dice can give. \nThere are 36 possible combinations. Print all those combinations as (ordered) pairs that sum to 5. \nFor example, your printout should include the pair `(2,3)`. Print one pair per line.\n<hr/>", "_____no_output_____" ], [ "### Functions\nA function is defined with the `def` statement. Let's do a doubling function.", "_____no_output_____" ] ], [ [ "def double(x):\n \"This function multiplies its argument by two.\"\n return x*2\nprint(double(4), double(1.2), double(\"abc\")) # It even happens to work for strings!", "8 2.4 abcabc\n" ] ], [ [ "The double function takes only one parameter. Notice the *docstring* on the second line. It documents the purpose and usage of the function. Let's try to access it.", "_____no_output_____" ] ], [ [ "print(\"The docstring is:\", double.__doc__)\nhelp(double) # Another way to access the docstring", "The docstring is: This function multiplies its argument by two.\nHelp on function double in module __main__:\n\ndouble(x)\n This function multiplies its argument by two.\n\n" ] ], [ [ "Most of Python's builtin functions, classes, and modules should contain a docstring.", "_____no_output_____" ] ], [ [ "help(print)", "Help on built-in function print in module builtins:\n\nprint(...)\n print(value, ..., sep=' ', end='\\n', file=sys.stdout, flush=False)\n \n Prints the values to a stream, or to sys.stdout by default.\n Optional keyword arguments:\n file: a file-like object (stream); defaults to the current sys.stdout.\n sep: string inserted between values, default a space.\n end: string appended after the last value, default a newline.\n flush: whether to forcibly flush the stream.\n\n" ] ], [ [ "Here's another example function:", "_____no_output_____" ] ], [ [ "def sum_of_squares(a, b):\n \"Computes the sum of arguments squared\"\n return a**2 + b**2\nprint(sum_of_squares(3, 4))", "25\n" ] ], [ [ "<div class=\"alert alert-warning\">Note the terminology: in the function definition the names a and b are called <strong>parameters</strong> of the function; in the function call, however, 3 and 4 are called <strong>arguments</strong> to the function.\n</div>", "_____no_output_____" ], [ "It would be nice that the number of arguments could be arbitrary, not just two. We could pass a list to the function as a parameter.", "_____no_output_____" ] ], [ [ "def sum_of_squares(lst):\n \"Computes the sum of squares of elements in the list given as parameter\"\n s=0\n for x in lst:\n s += x**2\n return s\nprint(sum_of_squares([-2]))\nprint(sum_of_squares([-2,4,5]))", "4\n45\n" ] ], [ [ "This works perfectly! There is however some extra typing with the brackets around the lists. Let's see if we can do better:", "_____no_output_____" ] ], [ [ "def sum_of_squares(*t):\n \"Computes the sum of squares of arbitrary number of arguments\"\n s=0\n for x in t:\n s += x**2\n return s\nprint(sum_of_squares(-2))\nprint(sum_of_squares(-2,4,5))", "4\n45\n" ] ], [ [ "The strange looking argument notation (the star) is called *argument packing*. It packs all the given positional arguments into a tuple `t`. We will encounter tuples again later, but it suffices now to say that tuples are *immutable* lists. With the `for` loop we can iterate through all the elements in the tuple.\n\nConversely, there is also syntax for *argument unpacking*. It has confusingly exactly same notation as argument packing (star), but they are separated by the location where used. Packing happens in the parameter list of the functions definition, and unpacking happens where the function is called:", "_____no_output_____" ] ], [ [ "lst=[1,5,8]\nprint(\"With list unpacked as arguments to the functions:\", sum_of_squares(*lst))\n# print(sum_of_squares(lst)) # Does not work correctly", "With list unpacked as arguments to the functions: 90\n" ] ], [ [ "The second call failed because the function tried to raise the list of numbers to the second power. Inside the function body we have `t=([1,5,8])`, where the parentheses denote a tuple with one element, a list.", "_____no_output_____" ], [ "In addition to positional arguments we have seen so far, a function call can also have *named arguments*. An example will explain this concept best:", "_____no_output_____" ] ], [ [ "def named(a, b, c):\n print(\"First:\", a, \"Second:\", b, \"Third:\", c)\nnamed(5, c=7, b=8)", "First: 5 Second: 8 Third: 7\n" ] ], [ [ "Note that the named arguments didn't need to be in the same order as in the function definition.\nThe named arguments must come after the positional arguments. For example, the following function call is illegal `named(a=5, 7, 8)`.\n\nOne can also specify an optional parameter by giving the parameter a default value. The parameters that have default values must come after those parameters that don't. We saw that the parameters of the `print` function were of form `print(value, ..., sep=' ', end='\\n', file=sys.stdout, flush=False)`. There were four parameters with default values. If some default values don't suit us, we can give them in the function call using the name of the parameter:", "_____no_output_____" ] ], [ [ "print(1, 2, 3, end=' |', sep=' -*- ')\nprint(\"first\", \"second\", \"third\", end=' |', sep=' -*- ')", "1 -*- 2 -*- 3 |first -*- second -*- third |" ] ], [ [ "We did not need to specify all the parameters with default values, only those we wanted to change.\n\nLet's go through another example of using parameters with default values:", "_____no_output_____" ] ], [ [ "def length(*t, degree=2):\n \"\"\"Computes the length of the vector given as parameter. By default, it computes\n the Euclidean distance (degree==2)\"\"\"\n s=0\n for x in t:\n s += abs(x)**degree\n return s**(1/degree)\nprint(length(-4,3))\nprint(length(-4,3, degree=3))", "5.0\n4.497941445275415\n" ] ], [ [ "With the default parameter this is the Euclidean distance, and if $p\\ne 2$ it is called [p-norm](https://en.wikipedia.org/wiki/P-norm).", "_____no_output_____" ], [ "We saw that it was possible to use packing and unpacking of arguments with the * notation, when one wants to specify arbitrary number of *positional arguments*. This is also possible for arbitrary number of named arguments with the `**` notation. We will talk about this more in the data structures section.", "_____no_output_____" ], [ "#### Visibility of variables\nFunction definition creates a new *namespace* (also called local scope). Variables created inside this scope are not available from outside the function definition. Also, the function parameters are only visible inside the function definition. Variables that are not defined inside any function are called `global variables`.\n\nGlobal variable are readable also in local scopes, but an assignment creates a new local variable without rebinding the global variable. If we are inside a function, a local variable hides a global variable by the same name:", "_____no_output_____" ] ], [ [ "i=2 # global variable\ndef f():\n i=3 # this creates a new variable, it does not rebind the global i\n print(i) # This will print 3 \nf()\nprint(i) # This will print 2", "3\n2\n" ] ], [ [ "If you really need to rebind a global variable from a function, use the `global` statement. Example:", "_____no_output_____" ] ], [ [ "i=2\ndef f():\n global i\n i=5 # rebind the global i variable\n print(i) # This will print 5\nf()\nprint(i) # This will print 5", "5\n5\n" ] ], [ [ "Unlike languages like C or C++, Python allows defining a function inside another function. This *nested* function will have nested scope:", "_____no_output_____" ] ], [ [ "def f(): # outer function\n b=2\n def g(): # inner function\n #nonlocal b # Without this nonlocal statement,\n b=3 # this will create a new local variable\n print(b)\n g()\n print(b)\nf()", "3\n2\n" ] ], [ [ "Try first running the above cell and see the result. Then uncomment the nonlocal stamement and run the cell again. The `global` and `nonlocal` statements are similar. The first will force a variable refer to a global variable, and the second will force a variable to refer to the variable in the nearest outer scope (but not the global scope).", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 6 (triple square)</div>\n\nWrite two functions: `triple` and `square`. Function `triple` multiplies its parameter by three. Function `square` raises its parameter to the power of two. For example, we have equalities `triple(5)==15`\nand `square(5)==25`.\n\nPart 1.\n\nIn the `main` function write a `for` loop that iterates through values 1 to 10, and for each value prints its triple and its square. The output should be as follows:\n```\ntriple(1)==3 square(1)==1\ntriple(2)==6 square(2)==4\n...\n```\n\nPart 2.\n\nNow modify this `for` loop so that it stops iteration when the square of a value is larger than the\ntriple of the value, without printing anything in the last iteration.\n\nNote that the test cases check that both functions `triple` and `square` are called exactly once per iteration.\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 7 (areas of shapes)</div>\n\nCreate a program that can compute the areas of three shapes, triangles, rectangles and circles, when\ntheir dimensions are given.\n\nAn endless loop should ask for which shape you want the area be calculated. An empty string as input will exit the loop. \nIf the user gives a\nstring that is none of the given shapes, the message “unknown shape!” should be printed.\nThen it will ask for dimensions for that particular shape. When all the necessary dimensions are\ngiven, it prints the area, and starts the loop all over again. Use format specifier `f` for the area.\n\nWhat happens if you give incorrect dimensions, like giving string \"aa\" as radius? You don't have to check for errors in the input.\n\nExample interaction:\n```\nChoose a shape (triangle, rectangle, circle): triangle\nGive base of the triangle: 20\nGive height of the triangle: 5\nThe area is 50.000000\nChoose a shape (triangle, rectangle, circle): rectangel\nUnknown shape!\nChoose a shape (triangle, rectangle, circle): rectangle\nGive width of the rectangle: 20\nGive height of the rectangle: 4\nThe area is 80.000000\nChoose a shape (triangle, rectangle, circle): circle\nGive radius of the circle: 10\nThe area is 314.159265\nChoose a shape (triangle, rectangle, circle): \n```\n<hr/>", "_____no_output_____" ], [ "### Data structures\nThe main data structures in Python are strings, lists, tuples, dictionaries, and sets. We saw some examples of lists, when we discussed `for` loops. And we saw briefly tuples when we introduced argument packing and unpacking. Let's get into more details now.\n\n#### Sequences\nA *list* contains arbitrary number of elements (even zero) that are stored in sequential order. The elements are separated by commas and written between brackets. The elements don't need to be of the same type. An example of a list with four values:", "_____no_output_____" ] ], [ [ "[2, 100, \"hello\", 1.0]", "_____no_output_____" ] ], [ [ "A *tuple* is fixed length, immutable, and ordered container. Elements of tuple are separated by commas and written between parentheses. Examples of tuples:", "_____no_output_____" ] ], [ [ "(3,) # a singleton\n(1,3) # a pair\n(1, \"hello\", 1.0); # a triple", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-warning\">Note the difference between `(3)` and `(3,)`. Because the parentheses can also be used to group expressions, the first one defines an integer, but the second one defines a tuple with single element.</div>", "_____no_output_____" ], [ "As we can see, both lists and tuples can contain values of different type.\n\nList, tuples, and strings are called *sequences* in Python, and they have several commonalities:\n\n* their length can be queried with the `len` function\n* `min` and `max` function find the minimum and maximum element of a sequence, and `sum` adds all the elements of numbers together\n* Sequences can be concatenated with the `+` operator, and repeated with the `*` operator: `\"hi\"*3==\"hihihi\"`\n* Since sequences are ordered, we can refer to the elements of a sequences by integers using the *indexing* notation: `\"abcd\"[2] == \"c\"`\n* Note that the indexing begins from 0\n* Negative integers start indexing from the end: -1 refers to the last element, -2 refers to the second last, and so on", "_____no_output_____" ], [ "Above we saw that we can access a single element of a sequence using *indexing*. If we want a subsequence of a sequence, we can use the *slicing* syntax. A slice consists of elements of the original sequence, and it is itself a sequence as well. A simple slice is a range of elements:", "_____no_output_____" ] ], [ [ "s=\"abcdefg\"\ns[1:4]", "_____no_output_____" ] ], [ [ "Note that Python ranges exclude the last index. The generic form of a slice is\n`sequence[first:last:step]`. If any of the three parameters are left out, they are set to default values as follows: first=0, last=len(L), step=1. So, for instance \"abcde\"[1:]==\"bcde\". The step parameter selects elements that are step distance apart from each other. For example:", "_____no_output_____" ] ], [ [ "print([0,1,2,3,4,5,6,7,8,9][::3])", "[0, 3, 6, 9]\n" ] ], [ [ "#### <div class=\"alert alert-info\">Exercise 8 (solve quadratic)</div>\n\nIn mathematics, the quadratic equation $ax^2+bx+c=0$ can be solved with the formula \n$x=\\frac{-b\\pm \\sqrt{b^2 -4ac}}{2a}$. \n\nWrite a function `solve_quadratic`, that returns both solutions of a generic quadratic as a pair (2-tuple)\nwhen the coefficients are given as parameters. It should work like this:\n```python\nprint(solve_quadratic(1,-3,2))\n(2.0,1.0)\nprint(solve_quadratic(1,2,1))\n(-1.0,-1.0)\n```\n\nYou may want to use the `math.sqrt` function from the `math` module in your solution. Test that your function works in the main function!\n<hr/>", "_____no_output_____" ], [ "#### Modifying lists\nWe can assign values to elements of a list by indexing or by slicing. An example:", "_____no_output_____" ] ], [ [ "L=[11,13,22,32]\nL[2]=10 # Changes the third element\nprint(L)", "[11, 13, 10, 32]\n" ] ], [ [ "Or we can assign a list to a slice:", "_____no_output_____" ] ], [ [ "L[1:3]=[4]\nprint(L)", "[11, 4, 32]\n" ] ], [ [ "We can also modify a list by using *mutating methods* of the `list` class, namely the methods `append`, `extend`, `insert`, `remove`, `pop`, `reverse`, and `sort`. Try Python's help functionality to find more about these methods: e.g. `help(list.extend)` or `help(list)`.\n\n<div class=\"alert alert-warning\">Note that we cannot perform these modifications on tuples or strings since they are *immutable*</div>", "_____no_output_____" ], [ "#### Generating numerical sequences\nTrivial lists can be tedious to write: `[0,1,2,3,4,5,6]`. The function `range` creates numeric ranges automatically. The above sequence can be generated with the function call `range(7)`. Note again that then end value is not included in the sequence. An example of using the `range` function:", "_____no_output_____" ] ], [ [ "L=range(3)\nfor i in L:\n print(i)\n# Note that L is not a list!\nprint(L)", "0\n1\n2\nrange(0, 3)\n" ] ], [ [ "So `L` is not a list, but it is a sequence. We can for instace access its last element with `L[-1]`. If really needed, then it can be converted to a list with the `list` constructor:", "_____no_output_____" ] ], [ [ "L=range(10)\nprint(list(L))", "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n" ] ], [ [ "<div class=\"alert alert-warning\">Note that using a range consumes less memory than the corresponding list. This is because in a list all the elements are stored in the memory, whereas the range generates the requested elements only when needed. For example, when the for loop asks for the next element from the range at each iteration, only a single element from the range exists in memory at the same time. This makes a big difference when using large ranges, like range(1000000).</div>", "_____no_output_____" ], [ "The `range` function works in similar fashion as slices. So, for instance the step of the sequence can be given:", "_____no_output_____" ] ], [ [ "print(list(range(0, 7, 2)))", "[0, 2, 4, 6]\n" ] ], [ [ "#### Sorting sequences\n\nIn Python there are two ways to sort sequences. The `sort` *method* modifies the original list, whereas the `sorted` *function* returns a new sorted list and leaves the original intact. A couple of examples will demonstrate this:", "_____no_output_____" ] ], [ [ "L=[5,3,7,1]\nL.sort() # here we call the sort method of the object L\nprint(L)\nL2=[6,1,7,3,6]\nprint(sorted(L2))\nprint(L2)", "[1, 3, 5, 7]\n[1, 3, 6, 6, 7]\n[6, 1, 7, 3, 6]\n" ] ], [ [ "The parameter `reverse=True` can be given (both to `sort` and `sorted`) to get descending order of elements:", "_____no_output_____" ] ], [ [ "L=[5,3,7,1]\nprint(sorted(L, reverse=True))", "[7, 5, 3, 1]\n" ] ], [ [ "#### <div class=\"alert alert-info\">Exercise 9 (merge)</div>\n\nSuppose we have two lists `L1` and `L2` that contain integers which are sorted in ascending order.\nCreate a function `merge` that gets these lists as parameters and returns a new sorted list `L` that has\nall the elements of `L1` and `L2`. So, `len(L)` should equal to `len(L1)+len(L2)`. Do this using the\nfact that both lists are already sorted. You can’t use the `sorted` function or the `sort` method in implementing the `merge` method. You can however use these `sorted` in the main function for creating inputs to the `merge` function.\nTest with a couple of examples in the `main` function that your solution works correctly.\n\nNote: In Python argument lists are passed by reference to the function, they are not copied! Make sure you don't modify the original lists of the caller.\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 10 (detect ranges)</div>\n\nCreate a function named `detect_ranges` that gets a list of integers as a parameter. The function\nshould then sort this list, and transform the list into another list where pairs are used for all the\ndetected intervals. So `3,4,5,6` is replaced by the pair `(3,7)`. Numbers that are not part of any\ninterval result just single numbers. The resulting list consists of these numbers and\npairs, separated by commas.\nAn example of how this function works:\n```python\nprint(detect_ranges([2,5,4,8,12,6,7,10,13]))\n[2,(4,9),10,(12,14)]\n```\n\nNote that the second element of the pair does not belong to the range. This is consistent with the way Python's `range` function works. You may assume that no element in the input list appears multiple times.\n<hr/>", "_____no_output_____" ], [ "#### Zipping sequences\n\nThe `zip` function combines two (or more) sequences into one sequence. If, for example, two sequences are zipped together, the resulting sequence contains pairs. In general, if `n` sequences are zipped together, the elements of the resulting sequence contains `n`-tuples. An example of this:", "_____no_output_____" ] ], [ [ "L1=[1,2,3]\nL2=[\"first\", \"second\", \"third\"]\nprint(zip(L1, L2)) # Note that zip does not return a list, like range\nprint(list(zip(L1, L2))) # Convert to a list", "<zip object at 0x7fb8141907c8>\n[(1, 'first'), (2, 'second'), (3, 'third')]\n" ] ], [ [ "Here's another example of using the `zip` function.", "_____no_output_____" ] ], [ [ "days=\"Monday Tuesday Wednesday Thursday Friday Saturday Sunday\".split()\nweathers=\"rainy rainy sunny cloudy rainy sunny sunny\".split()\ntemperatures=[10,12,12,9,9,11,11]\nfor day, weather, temperature in zip(days,weathers,temperatures):\n print(f\"On {day} it was {weather} and the temperature was {temperature} degrees celsius.\")\n\n# Or equivalently:\n#for t in zip(days,weathers,temperatures):\n# print(\"On {} it was {} and the temperature was {} degrees celsius.\".format(*t))", "On Monday it was rainy and the temperature was 10 degrees celsius.\nOn Tuesday it was rainy and the temperature was 12 degrees celsius.\nOn Wednesday it was sunny and the temperature was 12 degrees celsius.\nOn Thursday it was cloudy and the temperature was 9 degrees celsius.\nOn Friday it was rainy and the temperature was 9 degrees celsius.\nOn Saturday it was sunny and the temperature was 11 degrees celsius.\nOn Sunday it was sunny and the temperature was 11 degrees celsius.\n" ] ], [ [ "If the sequences are not of equal length, then the resulting sequence will be as long as the shortest input sequence is.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 11 (interleave)</div>\n\nWrite function `interleave` that gets arbitrary number of lists as parameters. You may assume that all the lists have equal length. The function should return one list containing all the elements from the input lists interleaved.\nTest your function from the `main` function of the program.\n\nExample:\n`interleave([1,2,3], [20,30,40], ['a', 'b', 'c'])`\nshould return\n`[1, 20, 'a', 2, 30, 'b', 3, 40, 'c']`.\nUse the `zip` function to implement `interleave`. Remember the `extend` method of list objects.\n<hr/>", "_____no_output_____" ], [ "#### Enumerating sequences\n\nIn some other programming languages one iterates through the elements using their indices (0,1, ...) in the sequence. In Python we normally don't need to think about indices when iterating, because the `for` loop allows simpler iteration through the elements. But sometimes you really need to know the index of the current element in the sequence. In this case one uses Python's `enumerate` function. In the next example we would like find the second occurrence of integer 5 in a list.", "_____no_output_____" ] ], [ [ "L=[1,2,98,5,-1,2,0,5,10]\ncounter = 0\nfor i, x in enumerate(L):\n if x == 5:\n counter += 1\n if counter == 2:\n break\nprint(i)", "7\n" ] ], [ [ "The `enumerate(L)` function call can be thought to be equivalent to `zip(range(len(L)), L)`.", "_____no_output_____" ], [ "#### Dictionaries\nA *dictionary* is a dynamic, unordered container. Instead of using integers to access the elements of the container, the dictionary uses *keys* to access the stored *values*. The dictionary can be created by listing the comma separated key-value pairs in braces. Keys and values are separated by a colon. A tuple (key,value) is called an *item* of the dictionary.\n\nLet's demonstrate the dictionary creation and usage:", "_____no_output_____" ] ], [ [ "d={\"key1\":\"value1\", \"key2\":\"value2\"}\nprint(d[\"key1\"])\nprint(d[\"key2\"])", "value1\nvalue2\n" ] ], [ [ "Keys can have different types even in the same container. So the following code is legal:\n`d={1:\"a\", \"z\":1}`. The only restriction is that the keys must be *hashable*. That is, there has to be a mapping from keys to integers. Lists are *not* hashable, but tuples are!\n\nThere are alternative syntaxes for dictionary creation:", "_____no_output_____" ] ], [ [ "dict([(\"key1\", \"value1\"), (\"key2\", \"value2\"), (\"key3\", \"value3\")]) # list of items\ndict(key1=\"value1\", key2=\"value2\", key3=\"value3\");", "_____no_output_____" ] ], [ [ "If a key is not found in a dictionary, the indexing `d[key]` results in an error (*exception* `KeyError`). But an assignment with a non-existing key causes the key to be added in the dictionary associated with the corresponding value:", "_____no_output_____" ] ], [ [ "d={}\nd[2]=\"value\"\nprint(d)", "{2: 'value'}\n" ], [ "# d[1] # This would cause an error", "_____no_output_____" ] ], [ [ "Dictionary object contains several non-mutating methods:\n```\nd.copy()\nd.items()\nd.keys()\nd.values()\nd.get(k[,x])\n```\n\nSome methods mutate the dictionary:\n```\nd.clear()\nd.update(d1)\nd.setdefault(k[,x])\nd.pop(k[,x])\nd.popitem()\n```\n\nTry out some of these in the below cell. You can find more info with `help(dict)` or `help(dict.keys)`.", "_____no_output_____" ] ], [ [ "d=dict(a=1, b=2, c=3, d=4, e=5)\nd.values()", "_____no_output_____" ] ], [ [ "#### Sets\nSet is a dynamic, unordered container. It works a bit like dictionary, but only the keys are stored. And each key can be stored only once. The set requires that the keys to be stored are hashable. Below are a few ways of creating a set:", "_____no_output_____" ] ], [ [ "s={1,1,1}\nprint(s)\ns=set([1,2,2,'a'])\nprint(s)\ns=set() # empty set\nprint(s)\ns.add(7) # add one element\nprint(s)", "{1}\n{1, 2, 'a'}\nset()\n{7}\n" ] ], [ [ "A more useful example:", "_____no_output_____" ] ], [ [ "s=\"mississippi\"\nprint(f\"There are {len(set(s))} distinct characters in {s}\")", "There are 4 distinct characters in mississippi\n" ] ], [ [ "The `set` provides the following non-mutating methods:", "_____no_output_____" ] ], [ [ "s=set()\ns1=set()\ns.copy()\ns.issubset(s1)\ns.issuperset(s1)\ns.union(s1)\ns.intersection(s1)\ns.difference(s1)\ns.symmetric_difference(s1);", "_____no_output_____" ] ], [ [ "The last four operation can be tedious to write to create a more complicated expression. The alternative is to use the corresponding operator forms: `|`, `&`, `-`, and `^`. An example of these:", "_____no_output_____" ] ], [ [ "s=set([1,2,7])\nt=set([2,8,9])\nprint(\"Union:\", s|t)\nprint(\"Intersection:\", s&t)\nprint(\"Difference:\", s-t)\nprint(\"Symmetric difference\", s^t)", "Union: {1, 2, 7, 8, 9}\nIntersection: {2}\nDifference: {1, 7}\nSymmetric difference {1, 7, 8, 9}\n" ] ], [ [ "There are also the following mutating methods:\n```\ns.add(x)\ns.clear()\ns.discard()\ns.pop()\ns.remove(x)\n```\n\nAnd the set operators `|`, `&`, `-`, and `^` have the corresponding mutating, augmented assignment forms: `|=`, `&=`, `-=`, and `^=`.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 12 (distinct characters)</div>\n\nWrite function `distinct_characters` that gets a list of strings as a parameter. It should return a dictionary whose keys are the strings of the input list and the corresponding values are the numbers of distinct characters in the key.\n\nUse the `set` container to temporarily store the distinct characters in a string.\nExample of usage:\n`distinct_characters([\"check\", \"look\", \"try\", \"pop\"])`\nshould return\n`{ \"check\" : 4, \"look\" : 3, \"try\" : 3, \"pop\" : 2}`.\n<hr/>", "_____no_output_____" ], [ "#### Miscellaneous stuff\n\nTo find out whether a container includes an element, the `in` operator can be used. The operator returns a truth value. Some examples of the usage:", "_____no_output_____" ] ], [ [ "print(1 in [1,2])\nd=dict(a=1, b=3)\nprint(\"b\" in d)\ns=set()\nprint(1 in s)\nprint(\"x\" in \"text\")", "True\nTrue\nFalse\nTrue\n" ] ], [ [ "As a special case, for strings the `in` operator can be used to check whether a string is part of another string:", "_____no_output_____" ] ], [ [ "print(\"issi\" in \"mississippi\")\nprint(\"issp\" in \"mississippi\")", "True\nFalse\n" ] ], [ [ "Elements of a container can be unpacked into variables:", "_____no_output_____" ] ], [ [ "first, second = [4,5]\na,b,c = \"bye\"\nprint(c)\nd=dict(a=1, b=3)\nkey1, key2 = d\nprint(key1, key2)", "e\na b\n" ] ], [ [ "In membership testing and unpacking only the keys of a dictionary are used, unless either values or items (like below) are explicitly asked.", "_____no_output_____" ] ], [ [ "for key, value in d.items():\n print(f\"For key '{key}' value {value} was stored\")", "For key 'a' value 1 was stored\nFor key 'b' value 3 was stored\n" ] ], [ [ "To remove the binding of a variable, use the `del` statement. For example:", "_____no_output_____" ] ], [ [ "s=\"hello\"\ndel s\n# print(s) # This would cause an error", "_____no_output_____" ] ], [ [ "To delete an item from a container, the `del` statement can again be applied:", "_____no_output_____" ] ], [ [ "L=[13,23,40,100]\ndel L[1]\nprint(L)", "[13, 40, 100]\n" ] ], [ [ "In similar fashion `del` can be used to delete a slice. Later we will see that `del` can delete attributes from an object.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 13 (reverse dictionary)</div>\n\nLet `d` be a dictionary that has English words as keys and a list of Finnish words as values. So, the\ndictionary can be used to find out the Finnish equivalents of an English word in the following way:\n\n```\nd[\"move\"]\n[\"liikuttaa\"]\nd[\"hide\"]\n[\"piilottaa\", \"salata\"]\n```\n\nMake a function `reverse_dictionary` that creates a Finnish to English dictionary based on a English to Finnish dictionary given as a parameter. The values of the created dictionary should be lists of words. It should work like this:\n```\nd={'move': ['liikuttaa'], 'hide': ['piilottaa', 'salata'], 'six': ['kuusi'], 'fir': ['kuusi']}\nreverse_dictionary(d)\n{'liikuttaa': ['move'], 'piilottaa': ['hide'], 'salata': ['hide'], 'kuusi': ['six', 'fir']}\n```\n\nBe careful with synonyms and homonyms!\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 14 (find matching)</div>\n\nWrite function `find_matching` that gets a list of strings and a search string as parameters. The function should return the indices to those elements in the input list that contain the search string. Use the function `enumerate`.\n\nAn example:\n`find_matching([\"sensitive\", \"engine\", \"rubbish\", \"comment\"], \"en\")`\nshould return the list\n`[0, 1, 3]`.\n<hr/>", "_____no_output_____" ], [ "### Compact way of creating data structures\nWe can now easily create complicated data structures using `for` loops:", "_____no_output_____" ] ], [ [ "L=[]\nfor i in range(10):\n L.append(i**2)\nprint(L)", "[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n" ] ], [ [ "Because this kind of pattern is often used, Python offers a short-hand for this. A *list comprehension* is an expression that allows creating complicated lists on one line. The notation is familiar from mathematics:", "_____no_output_____" ], [ "$\\{a^3 : a \\in \\{1,2, \\ldots, 10\\}\\}$", "_____no_output_____" ], [ "The same written in Python as a list comprehension:", "_____no_output_____" ] ], [ [ "L=[ a**3 for a in range(1,11)]\nprint(L)", "[1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]\n" ] ], [ [ "The generic form of a list comprehension is:\n`[ expression for element in iterable lc-clauses ]`.\nLet's break this syntax into pieces. The iterable can be any sequence (or something more general). The lc-clauses consists of zero or more of the following clauses:\n\n* for elem in iterable\n* if expression\n\nA more complicated example. How would you describe these numbers?", "_____no_output_____" ] ], [ [ "L=[ 100*a + 10*b +c for a in range(0,10)\n for b in range(0,10)\n for c in range(0,10) \n if a <= b <= c]\nprint(L)", "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 33, 34, 35, 36, 37, 38, 39, 44, 45, 46, 47, 48, 49, 55, 56, 57, 58, 59, 66, 67, 68, 69, 77, 78, 79, 88, 89, 99, 111, 112, 113, 114, 115, 116, 117, 118, 119, 122, 123, 124, 125, 126, 127, 128, 129, 133, 134, 135, 136, 137, 138, 139, 144, 145, 146, 147, 148, 149, 155, 156, 157, 158, 159, 166, 167, 168, 169, 177, 178, 179, 188, 189, 199, 222, 223, 224, 225, 226, 227, 228, 229, 233, 234, 235, 236, 237, 238, 239, 244, 245, 246, 247, 248, 249, 255, 256, 257, 258, 259, 266, 267, 268, 269, 277, 278, 279, 288, 289, 299, 333, 334, 335, 336, 337, 338, 339, 344, 345, 346, 347, 348, 349, 355, 356, 357, 358, 359, 366, 367, 368, 369, 377, 378, 379, 388, 389, 399, 444, 445, 446, 447, 448, 449, 455, 456, 457, 458, 459, 466, 467, 468, 469, 477, 478, 479, 488, 489, 499, 555, 556, 557, 558, 559, 566, 567, 568, 569, 577, 578, 579, 588, 589, 599, 666, 667, 668, 669, 677, 678, 679, 688, 689, 699, 777, 778, 779, 788, 789, 799, 888, 889, 899, 999]\n" ] ], [ [ "If one needs only to iterate through the list once, it is more memory efficient to use a *generator expression* instead. The only thing that changes syntactically is that the surrounding brackets are replaced by parentheses:", "_____no_output_____" ] ], [ [ "G = ( 100*a + 10*b + c for a in range(0,10)\n for b in range(0,10)\n for c in range(0,10) \n if a <= b <= c )\nprint(sum(G)) # This iterates through all the elements from the generator\nprint(sum(G)) # It doesn't restart from the beginning, so all elements are already consumed", "60885\n0\n" ] ], [ [ "<div class=\"alert alert-warning\">Note above that one can only iterate through the generator once.</div>", "_____no_output_____" ], [ "Similary a *dictionary comprehension* creates a dictionary:", "_____no_output_____" ] ], [ [ "d={ k : k**2 for k in range(10)}\nprint(d)", "{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}\n" ] ], [ [ "And a *set comprehension* creates a set:", "_____no_output_____" ] ], [ [ "s={ i*j for i in range(10) for j in range(10)}\nprint(s)", "{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16, 18, 20, 21, 24, 25, 27, 28, 30, 32, 35, 36, 40, 42, 45, 48, 49, 54, 56, 63, 64, 72, 81}\n" ] ], [ [ "#### <div class=\"alert alert-info\">Exercise 15 (two dice comprehension)</div>\n\nRedo the earlier exercise which printed all the pairs of two dice results that sum to 5. But this time use a list comprehension. Print one pair per line.\n<hr/>", "_____no_output_____" ], [ "### Processing sequences\nIn this section we will go through some useful tools, that are maybe familiar to you from some functional programming language like *lisp* or *haskell*. These functions rely on functions being first-class objects in Python, that is, you can\n\n* pass a function as a parameter to another function\n* return a function as a return value from some function\n* store a function in a data structure or a variable\n\nWe will talk about `map`, `filter`, and `reduce` functions. We will also cover how to create functions with no name using the *lambda* expressions.", "_____no_output_____" ], [ "#### Map and lambda functions\nThe `map` function gets a list and a function as parameters, and it returns a new list whose\nelements are elements of the original list transformed by the parameter function. For this to work the parameter function must take exactly one value in and return a value out. An example will clarify this concept:", "_____no_output_____" ] ], [ [ "def double(x):\n return 2*x\nL=[12,4,-1]\nprint(map(double, L))", "<map object at 0x7fb81413ef60>\n" ] ], [ [ "The map function returns a map object for efficiency reasons. However, since we only want print the contents, we first convert it to a list and then print it:", "_____no_output_____" ] ], [ [ "print(list(map(double,L)))", "[24, 8, -2]\n" ] ], [ [ "When one reads numeric data from a file or from the internet, the numbers are usually in string form. Before they can be used in computations, they must first be converted to ints or floats.\nA simple example will showcase this.", "_____no_output_____" ] ], [ [ "s=\"12 43 64 6\"\nL=s.split() # The split method of the string class, breaks the string at whitespaces\n # to a list of strings.\nprint(L)\nprint(sum(map(int, L))) # The int function converts a string to an integer", "['12', '43', '64', '6']\n125\n" ] ], [ [ "Sometimes it feels unnecessary to write a function is you are only going to use it in one `map` function call. For example the function", "_____no_output_____" ] ], [ [ "def add_double_and_square(x):\n return 2*x+x**2 ", "_____no_output_____" ] ], [ [ "It is not likely that you will need it elsewhere in your program. The solution is to use an *expression* called *lambda* to define a function with no name. Because it is an expression, we can put it, for instance, in an argument list of a function call. The lambda expression has the form `lambda param1,param2, ... : expression`, where after the lambda keyword you list the parameters of the function, and after the colon is the expression that uses the parameters to compute the return value of the function. Let's replace the above `add_double_and_square` function with a lambda function and apply it to a list using the `map` function.", "_____no_output_____" ] ], [ [ "L=[2,3,5]\nprint(list(map(lambda x : 2*x+x**2, L)))", "[8, 15, 35]\n" ] ], [ [ "#### <div class=\"alert alert-info\">Exercise 16 (transform)</div>\n\nWrite a function `transform` that gets two strings as parameters and returns a list of integers. The function should split the strings into words, and convert these words to integers. This should give two lists of integers. Then the function should return a list whose elements are multiplication of two integers in the respective positions in the lists.\nFor example\n`transform(\"1 5 3\", \"2 6 -1\")`\nshould return the list of integers\n`[2, 30, -3]`.\n\nYou **have** to use `split`, `map`, and `zip` functions/methods. You may assume that the two input strings are in correct format.\n<hr/>", "_____no_output_____" ], [ "#### Filter function\n", "_____no_output_____" ], [ "The `filter` function takes a function and a list as parameters. But unlike with the map construct, now the parameter function must take exactly one parameter and return a truth value (True or False). The `filter` function then creates a new list with only those elements from the original list for which the parameter function returns True. The elements for which the parameter function returns False are filtered out. An example will demonstrate the `filter` function:", "_____no_output_____" ] ], [ [ "def is_odd(x):\n \"\"\"Returns True if x is odd and False if x is even\"\"\"\n return x % 2 == 1 # The % operator returns the remainder of integer division\nL=[1, 4, 5, 9, 10]\nprint(list(filter(is_odd, L)))", "[1, 5, 9]\n" ] ], [ [ "The even elements of the list were filtered out.\n\nNote that the `filter` function is rarely used in modern python since list comprehensions can do the same thing while also doing whatever we want to do with the filtered values.", "_____no_output_____" ] ], [ [ "[l**2 for l in L if is_odd(l)] # squares of odd values", "_____no_output_____" ] ], [ [ "That said, `filter` is a useful function to know.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 17 (positive list)</div>\n\nWrite a function `positive_list` that gets a list of numbers as a parameter, and returns a list with the negative numbers and zero filtered out using the `filter` function.\n\nThe function call `positive_list([2,-2,0,1,-7])` should return the list `[2,1]`. Test your function in the `main` function.\n<hr/>", "_____no_output_____" ], [ "#### The reduce function\nThe `sum` function that returns the sum of a numeric list, can be though to reduce a list to a single element. It does this reduction by repeatedly applying the `+` operator until all the list elements are consumed. For instance, the list `[1,2,3,4]` is reduced by the expression `(((0+1)+2)+3)+4` of repeated applications of the `+` operator. We could implement this with the following function:", "_____no_output_____" ] ], [ [ "def sumreduce(L):\n s=0\n for x in L:\n s = s+x\n return s", "_____no_output_____" ] ], [ [ "Because this is a common pattern, the `reduce` function is a common inclusion in functional programming languages. In Python `reduce` is included in the `functools` module. You give the operator you want to use as a parameter to reduce (addition in the above example). You may also give a starting value of the computation (starting value 0 was used above). \n\nIf no starting value is used, the first element of the iterable is used as the starting value. \n\nWe can now get rid of the separate function `sumreduce` by using the reduce function:", "_____no_output_____" ] ], [ [ "L=[1,2,3,4]\nfrom functools import reduce # import the reduce function from the functools module\nreduce(lambda x,y:x+y, L, 0)", "_____no_output_____" ] ], [ [ "If we wanted to get a product of all numbers in a sequence, we would use", "_____no_output_____" ] ], [ [ "reduce(lambda x,y:x*y, L, 1)", "_____no_output_____" ] ], [ [ "This corresponds to the sequence `(((1*1)*2)*3)*4` of application of operator `*`.", "_____no_output_____" ], [ "<div class=\"alert alert-warning\">Note that use of the starting value is necessary, because we want to be able to reduce lists of length 0 as well. If no starting value is specified when run on an empty list, <code>reduce</code> will raise an exception.</div>", "_____no_output_____" ], [ "## String handling\nWe have already seen how to index, slice, concatenate, and repeat strings. Let's now look into what methods the `str` class offers. In Python strings are immutable. This means that for instance the following assignment is not legal:", "_____no_output_____" ] ], [ [ "s=\"text\"\n# s[0] = \"a\" # This is not legal in Python", "_____no_output_____" ] ], [ [ "Because of the immutability of the strings, the string methods work by returning a value; they don't have any side-effects. In the rest of this section we briefly describe several of these methods. The methods are here divided into five groups.", "_____no_output_____" ], [ "### Classification of strings\nAll the following methods will take no parameters and return a truth value. An empty string will always result in `False`.\n\n* `s.isalnum()` True if all characters are letters or digits\n* `s.isalpha()` True if all characters are letters\n* `s.isdigit()` True if all characters are digits\n* `s.islower()` True if contains letters, and all are lowercase\n* `s.isupper()` True if contains letters, and all are uppercase\n* `s.isspace()` True if all characters are whitespace\n* `s.istitle()` True if uppercase in the beginning of word, elsewhere lowercase", "_____no_output_____" ], [ "### String transformations\nThe following methods do conversions between lower and uppercase characters in the string. All these methods return a new string.\n\n* `s.lower()` Change all letters to lowercase\n* `s.upper()` Change all letters to uppercase\n* `s.capitalize()` Change all letters to capitalcase\n* `s.title()` Change to titlecase\n* `s.swapcase()` Change all uppercase letters to lowercase, and vice versa\n\n\n\n\n\n", "_____no_output_____" ], [ "### Searching for substrings\nAll the following methods get the wanted substring as the\nparameter, except the replace method, which also gets the\nreplacing string as a parameter\n\n* `s.count(substr)` Counts the number of occurences of a substring\n* `s.find(substr)` Finds index of the first occurence of a substring, or -1\n* `s.rfind(substr)` Finds index of the last occurence of a substring, or -1\n* `s.index(substr)` Like find, except ValueError is raised if not found\n* `s.rindex(substr)` Like rfind, except ValueError is raised if not found\n* `s.startswith(substr)` Returns True if string starts with a given substring\n* `s.endswith(substr)` Returns True if string ends with a given substring\n* `s.replace(substr, replacement)` Returns a string where occurences of one string\nare replaced by another\n\nKeep also in mind that the expression `\"issi\" in \"mississippi\"` returns a truth value of whether the first string occurs in the second string.\n\n\n\n\n\n\n", "_____no_output_____" ], [ "### Trimming and adjusting\n* `s.strip(x)` Removes leading and trailing whitespace by default, or characters found in string x\n* `s.lstrip(x)` Same as strip but only leading characters are removed\n* `s.rstrip(x)` Same as strip but only trailing characters are removed\n* `s.ljust(n)` Left justifies string inside a field of length n\n* `s.rjust(n)` Right justifies string inside a field of length n\n* `s.center(n)` Centers string inside a field of length n", "_____no_output_____" ], [ "An example of using the `center` method and string repetition:", "_____no_output_____" ] ], [ [ "L=[1,3,5,7,9,1,1]\nprint(\"-\"*11)\nfor i in L:\n s=\"*\"*i \n print(f\"|{s.center(9)}|\")\nprint(\"-\"*11)", "-----------\n| * |\n| *** |\n| ***** |\n| ******* |\n|*********|\n| * |\n| * |\n-----------\n" ] ], [ [ "### Joining and splitting\nThe `join(seq)` method joins the strings of the sequence `seq`. The string itself is used as a delimitter. An example:", "_____no_output_____" ] ], [ [ "\"--\".join([\"abc\", \"def\", \"ghi\"])", "_____no_output_____" ], [ "L=[str(x) for x in range(100)]\ns=\"\"\nfor x in L:\n s += \" \" + x # Avoid doing this, it creates a new string at every iteration\nprint(s) # Note the redundant initial space\nprint(\" \".join(L)) # This is the correct way of building a string out of smaller strings", " 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99\n0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99\n" ] ], [ [ "<div class=\"alert alert-warning\">If you want to build a string out of smaller strings, then\nfirst put the small strings into a list, and then use the `join` method to catenate the pieces together. It is much more efficient this way. Use the <code>+</code> catenation operator only if you have very few short strings that you want to catenate.</div>", "_____no_output_____" ], [ "Below we can see that for our small (100 element) list, execution is an order of magnitude faster using the `join` method.", "_____no_output_____" ] ], [ [ "%%timeit\ns=\"\"\nfor x in L:\n s += \" \" + x", "11.8 µs ± 114 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n" ], [ "%%timeit\ns = \" \".join(L)", "1.25 µs ± 6.42 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n" ] ], [ [ "`%%timeit` is an IPython [cell magic](https://ipython.readthedocs.io/en/stable/interactive/magics.html) command, that is useful for timing execution in notebooks.", "_____no_output_____" ], [ "The method `split(sep=None)` divides a string into pieces that are separated by the string `sep`. The pieces are returned in a list. For instance, the call `'abc--def--ghi'.split(\"--\")` will result in", "_____no_output_____" ] ], [ [ "'abc--def--ghi'.split(\"--\")", "_____no_output_____" ] ], [ [ "If no parameters are given to the `split` method, then it splits at any sequence of white space.", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 18 (acronyms)</div>\n\nWrite function `acronyms` which takes a string as a parameter and returns a list of acronyms. A word is an acronym if it has length at least two, and all its characters are in uppercase. Before acronym detection, delete punctuation with the `strip` method.\n\nTest this function in the `main` function with the following call:\n```python\nprint(acronyms(\"\"\"For the purposes of the EU General Data Protection Regulation (GDPR), the controller of your personal information is International Business Machines Corporation (IBM Corp.), 1 New Orchard Road, Armonk, New York, United States, unless indicated otherwise. Where IBM Corp. or a subsidiary it controls (not established in the European Economic Area (EEA)) is required to appoint a legal representative in the EEA, the representative for all such cases is IBM United Kingdom Limited, PO Box 41, North Harbour, Portsmouth, Hampshire, United Kingdom PO6 3AU.\"\"\"))\n```\n\nThis should return\n```['EU', 'GDPR', 'IBM', 'IBM', 'EEA', 'EEA', 'IBM', 'PO', 'PO6', '3AU']```\n<hr/>", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 19 (sum equation)</div>\n\nWrite a function `sum_equation` which takes a list of positive integers as parameters and returns a string with an equation of the sum of the elements.\n\nExample:\n`sum_equation([1,5,7])`\nreturns\n`\"1 + 5 + 7 = 13\"`\nObserve, the spaces should be exactly as shown above. For an empty list the function should return the string \"0 = 0\".\n<hr/>", "_____no_output_____" ], [ "## Modules", "_____no_output_____" ], [ "To ease management of large programs, software is divided\ninto smaller pieces. In Python these pieces are called *modules*.\nA module should be a unit that is as independent from other\nmodules as possible.\nEach file in Python corresponds to a module.\nModules can contain classes, objects, functions, ...\nFor example, functions to handle regular expressions are in\nmodule `re`\n\nThe standard library of Python consists of hundreds of\nmodules. Some of the most common standard modules include\n\n* `re`\n* `math`\n* `random`\n* `os`\n* `sys`\n\nAny file with extension `.py` that contains Python source code\nis a module. So, no special notation is needed to create a module.", "_____no_output_____" ], [ "### Using modules\n\nLet’s say that we need to use the cosine function.\nThis function, and many other mathematical functions are\nlocated in the `math` module.\nTo tell Python that we want to access the features offered by\nthis module, we can give the statement `import math`.\nNow the module is loaded into memory.\nWe can now call the function like this:\n```python\nmath.cos(0)\n1.0\n```\n\nNote that we need to include the module name where the `cos`\nfunction is found.\nThis is because other modules may have a function (or other\nattribute of a module) with the same name.\nThis usage of different namespace for each module prevents\nname clashes. For example, functions `gzip.open`, `os.open` are not to be confused\nwith the builtin `open` function.", "_____no_output_____" ], [ "### Breaking the namespace\n\nIf the cosine is needed a lot, then it might be tedious to\nalways specify the namespace, especially if the name of the\nnamespace/module is long.\nFor these cases there is another way of importing modules.\nBring a name to the current scope with\n`from math import cos` statement.\nNow we can use it without the namespace specifier: `cos(1)`.\n\nSeveral names can be imported to the current scope with\n`from math import name1, name2, ...`\nOr even all names of the module with `from math import *`\nThe last form is sensible only in few cases, normally it just\nconfuses things since the user may have no idea what names\nwill be imported.", "_____no_output_____" ], [ "### Module lookup\n\nWhen we try to import a module `mod` with the import\nstatement, the lookup proceeds in the following order:\n\n* Check if it is a builtin module\n* Check if the file `mod.py` is found in any of the folders in\nthe list `sys.path`. The first item in this list is the current\nfolder\n\nWhen Python is started, the `sys.path` list is initialised with\nthe contents of the `PYTHONPATH` environment variable", "_____no_output_____" ], [ "### Module hierarchy\n\nThe standard library contains hundreds of modules.\nHence, it is hard to comprehend what the library includes.\nThe modules therefore need to be organised somehow.\nIn Python the modules can be organised into hierarchies using\n*packages*.\nA package is a module that can contain other packages and\nmodules.\nFor example, the `numpy` package contains subpackages `core`,\n`distutils`, `f2py`, `fft`, `lib`, `linalg`, `ma`, `numarray`, `oldnumeric`,\n`random`, and `testing`.\nAnd package `numpy.linalg` in turn contains modules `linalg`,\n`lapack_lite` and `info`.", "_____no_output_____" ], [ "### Importing from packages\n\nThe statement `import numpy` imports the top-level package `numpy`\nand its subpackages. \n\n* `import numpy.linalg` imports the subpackage only, and\n* `import numpy.linalg.linalg` imports the module only\n\nIf we want to skip the long namespace specification, we can\nuse the form\n\n```python\nfrom numpy.linalg import linalg\n```\n\nor\n\n```python\nfrom numpy.linalg import linalg as lin\n```\n\nif we want to use a different name for the module. The following command imports the function `det` (computes the determinant of a matrix) from the module linalg, which is contained in a subpackage linalg, which belongs to package numpy:\n```python\nfrom numpy.linalg.linalg import det\n```\n\nHad we only imported the top-level package `numpy` we would have to refer to the `det` function with the full name `numpy.linalg.linalg.det`.\n\nHere's a recap of the module hierarchy:\n\n```\nnumpy package\n .\nlinalg subpackage\n .\nlinalg module\n .\n det function\n```", "_____no_output_____" ], [ "### Correspondence between folder and module hierarchies\n\nThe packages are represented by folders in the filesystem.\nThe folder should contain a file named `__init__.py` that\nmakes up the package body. This handles the initialisation of\nthe package.\nThe folder may contain also further folders\n(subpackages) or Python files (normal modules).\n\n```\na/\n __init__.py\n b.py\n c/\n __init__.py\n d.py\n e.py\n```\n![package.svg](https://github.com/csmastersUH/data_analysis_with_python_2020/blob/master/package.svg?raw=1)", "_____no_output_____" ], [ "### Contents of a module\n\nSuppose we have a module named `mod.py`.\nAll the assignments, class definitions with the `class` statement,\nand function definitions with `def` statement will create new\nattributes to this module.\nLet’s import this module from another Python file using the\n`import mod` statement.\nAfter the import we can access the attributes of the module\nobject using the normal dot notation: `mod.f()`,\n`mod.myclass()`, `mod.a`, etc.\nNote that Python doesn’t really have global variables that are\nvisible to all modules. All variables belong to some module\nnamespace.\n\nOne can query the attributes of an object using the `dir` function. With no\nparameters, it shows the attributes of the current module. Try executing `dir()` in\nan IPython shell or in a Jupyter notebook! After that, define the following attributes, and try running `dir()`\nagain:\n\n```python\na=5\ndef f(i):\n return i + 1\n```\n\nThe above definitions created a *data attribute* called `a` and a *function attribute* called `f`.\nWe will talk more about attributes next week when we will talk about objects.\n\nJust like other objects, the module object contains its\nattributes in the dictionary `modulename.__dict__`\nUsually a module contains at least the attributes `__name__` and\n`__file__`. Other common attributes are `__version__`,\n`__author__` and `__doc__` , which contains the docstring of the\nmodule.\nIf the first statement of a file is a string, this is taken as the\ndocstring for that module. Note that the docstring of the module really must be the first non-empty non-comment line.\nThe attribute `__file__` is always the filename of the module.\n\nThe module attribute `__name__` has value `“__main__”` if we in are the main program,\notherwise some other module has imported us and name\nequals `__file__`.", "_____no_output_____" ], [ "In Python it is possible to put statements on the top-level of our module `mod` so that they don't belong to any function. For instance like this:\n\n```python\nfor _ in range(3):\n print(\"Hello\")\n```\n\nBut if somebody imports our module with `import mod`, then all the statements at the top-level will be executed. This may be surprising to the user who imported the module. The user will usually say, explicitly when he/she wants to execute some code from the imported module.\n\nIt is better style to put these statements inside some function. If they don't fit in any other function, then you can use, for example, the function named `main`, like this:\n\n```python\ndef main():\n for _ in range(3):\n print(\"Hello\")\n\nif __name__ == \"__main__\": # We call main only when this module is not being imported, but directly executed\n main() # for example with 'python3 mod.py'\n```\n\nYou probably have seen this mechanism used in the exercise stubs.\nNote that in Python the `main` has no special meaning, it is just our convention to use it here.\nNow if somebody imports `mod`, the `for` loop won't be automatically executed. If we want, we can call it explicitly with `mod.main()`. ", "_____no_output_____" ], [ "```python\nfor _ in range(3):\n print(\"Hello\")\n```", "_____no_output_____" ], [ "#### <div class=\"alert alert-info\">Exercise 20 (usemodule)</div>\n\nCreate your own module as file `triangle.py` in the `src` folder. The module should contain two functions:\n\n* `hypothenuse` which returns the length of the hypothenuse when given the lengths of two other sides of a right-angled triangle\n* `area` which returns the area of the right-angled triangle, when two sides, perpendicular to each other, are given as parameters.\n\nMake sure both the functions and the module have descriptive docstrings. Add also the `__version__` and `__author__` attributes to the module. Call both your functions from the main function (which is in file `usemodule.py`).", "_____no_output_____" ], [ "## Summary\n\n* We have learned that Python's code blocks are denoted by consistent indenting, with spaces or tabs, unlike in many other languages\n* Python's `for` loops goes through all the elements of a container without the need of worrying about the positions (indices) of the elements in the container\n* More generally, an iterable is an object whose elements can be gone through one by one using a `for` loop. Such as `range(1,7)`\n* Python has dynamic typing: the type of a name is known only when we run the program. The type might not be fixed, that is, if a name is created, for example, in a loop, then its type might change at each iteration.\n* Visibility of a name: a name that refers to a variable can disappear in the middle of a code block, if a `del` statement is issued!\n* Python is good at string handling, but remember that if you want to concatenate large number of strings, use the `join` method. Concatenating by the `+` operator multiple times is very inefficient\n* Several useful tools exist to process sequences: `map`, `reduce`, `filter`, `zip`, `enumerate`, and `range`. The unnamed lambda function can be helpful with these tools. Note that these tools (except the `reduce`) don't return lists, but iterables, for efficiency reasons: Most often we don't want to store the result from these tools to a container (such as a list), we may only want to iterate through the result!", "_____no_output_____" ], [ "<!--NAVIGATION-->\n\n\n<a href=\"https://colab.research.google.com/github/saskeli/x/blob/master/basics.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4af81618c92d72b694d6eef6c467b20b2d6e8a78
96,264
ipynb
Jupyter Notebook
tutorials/Tutorial - Canalization - Node Schematas.ipynb
leonardogian/CANA
f070d6da5358f17f55eb80d6bc9185afaee91142
[ "MIT" ]
1
2021-01-08T22:04:05.000Z
2021-01-08T22:04:05.000Z
tutorials/Tutorial - Canalization - Node Schematas.ipynb
leonardogian/CANA
f070d6da5358f17f55eb80d6bc9185afaee91142
[ "MIT" ]
null
null
null
tutorials/Tutorial - Canalization - Node Schematas.ipynb
leonardogian/CANA
f070d6da5358f17f55eb80d6bc9185afaee91142
[ "MIT" ]
null
null
null
51.041357
178
0.462167
[ [ [ "# Tutorial of Node Schematas - PI & TwoSymbol\nVisualization of schematas for simple boolean nodes (automatas)", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "from __future__ import division\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import Image, display\nimport cana\nfrom cana.datasets.bools import *\nfrom cana.drawing.canalizing_map import draw_canalizing_map_graphviz", "_____no_output_____" ], [ "n = OR()\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint()\n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='OR', k=2, inputs=[i1,i2], state=0, outputs='[0,1,1,1]' constant=False)>\nk_r: 0.75 - 0.75\nk_e: 1.25 - 1.25\nk_s: 2.00 - 2.00\n\nk_r: [0.5, 0.5] (upper)\nk_e: [0.5, 0.5] (upper)\n\n" ], [ "n = CONTRADICTION()\nn.name = 'Con'\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint \n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='Con', k=2, inputs=[i1,i2], state=0, outputs='[0,0,0,0]' constant=True)>\nk_r: 2.00 - 2.00\nk_e: 0.00 - 0.00\nk_s: 2.00 - 2.00\n\nk_r: [1.0, 1.0] (upper)\nk_e: [1.0, 1.0] (upper)\n" ], [ "n = XOR()\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint \nfor input in [0,1]:\n for ts,per,sms in n._two_symbols[input]:\n print( 'TS: %s | PermIdx: %s | SameIdx: %s' % (ts, per,sms))\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='XOR', k=2, inputs=[i1,i2], state=0, outputs='[0,1,1,0]' constant=False)>\nk_r: 0.00 - 0.00\nk_e: 2.00 - 2.00\nk_s: 2.00 - 2.00\n\nk_r: [0.0, 0.0] (upper)\nk_e: [0.0, 0.0] (upper)\nTS: 11 | PermIdx: [] | SameIdx: [[0, 1]]\nTS: 00 | PermIdx: [] | SameIdx: [[0, 1]]\nTS: 10 | PermIdx: [[0, 1]] | SameIdx: []\n" ], [ "n = AND()\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint \n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='AND', k=2, inputs=[i1,i2], state=0, outputs='[0,0,0,1]' constant=False)>\nk_r: 0.75 - 0.75\nk_e: 1.25 - 1.25\nk_s: 2.00 - 2.00\n\nk_r: [0.5, 0.5] (upper)\nk_e: [0.5, 0.5] (upper)\n" ], [ "n = COPYx1()\nn.name = 'CPx1'\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint('k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint()\n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='CPx1', k=2, inputs=[i1,i2], state=0, outputs='[0,0,1,1]' constant=False)>\nk_r: 1.00 - 1.00\nk_e: 1.00 - 1.00\nk_s: 0.00 - 0.00\n\nk_r: [0.0, 1.0] (upper)\nk_e: [0.0, 1.0] (upper)\n\n" ], [ "n = RULE90()\nn.name = 'R90'\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint()\n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='R90', k=3, inputs=[i1,i2,i3], state=0, outputs='[0,1,0,1,1,0,1,0]' constant=False)>\nk_r: 1.00 - 1.00\nk_e: 2.00 - 2.00\nk_s: 1.33 - 1.33\n\nk_r: [0.0, 1.0, 0.0] (upper)\nk_e: [0.0, 1.0, 0.0] (upper)\n\n" ], [ "n = RULE110()\nn.name = 'R110'\nprint( n)\nprint( 'k_r: %.2f - %.2f' % (n.input_redundancy(mode='node',bound='upper',norm=False), n.input_redundancy(mode='node',bound='lower',norm=False)))\nprint( 'k_e: %.2f - %.2f' % (n.effective_connectivity(mode='node',bound='upper',norm=False), n.effective_connectivity(mode='node',bound='lower',norm=False)))\nprint( 'k_s: %.2f - %.2f' % (n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False)))\nprint()\n\nprint( 'k_r: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint( 'k_e: %s (upper)' % n.input_redundancy(mode='input',bound='upper'))\nprint()\n\ndfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts')\ndisplay(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-'))\ndraw_canalizing_map_graphviz(n.canalizing_map())", "<BNode(name='R110', k=3, inputs=[i1,i2,i3], state=0, outputs='[0,1,1,1,0,1,1,0]' constant=False)>\nk_r: 0.88 - 0.88\nk_e: 2.12 - 2.12\nk_s: 1.96 - 1.04\n\nk_r: [0.75, 0.25, 0.25] (upper)\nk_e: [0.75, 0.25, 0.25] (upper)\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af81c15c2a42401f534570297828c078b8caf79
1,661
ipynb
Jupyter Notebook
pset_pandas1_wine_reviews/selecting_data/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_pandas1_wine_reviews/selecting_data/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_pandas1_wine_reviews/selecting_data/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
37.75
83
0.334136
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4af8414b863ffc6ac4aa4bf075b46f973b9218e5
6,494
ipynb
Jupyter Notebook
minilabs/simulate-and-generate-empirical-distributions-in-python/empirical_distributions_minilab.ipynb
ebaccay/inferentialthinking
9f839c76062169b9de498c1e044f668e7517ee94
[ "MIT" ]
1
2022-02-24T20:32:17.000Z
2022-02-24T20:32:17.000Z
minilabs/simulate-and-generate-empirical-distributions-in-python/empirical_distributions_minilab.ipynb
ebaccay/inferentialthinking
9f839c76062169b9de498c1e044f668e7517ee94
[ "MIT" ]
null
null
null
minilabs/simulate-and-generate-empirical-distributions-in-python/empirical_distributions_minilab.ipynb
ebaccay/inferentialthinking
9f839c76062169b9de498c1e044f668e7517ee94
[ "MIT" ]
3
2021-03-04T06:44:47.000Z
2021-05-05T06:00:33.000Z
35.102703
639
0.606252
[ [ [ "# Simulate and Generate Empirical Distributions in Python\n## Mini-Lab: Simulations, Empirical Distributions, Sampling", "_____no_output_____" ], [ "Welcome to your next mini-lab! Go ahead an run the following cell to get started. You can do that by clicking on the cell and then clickcing `Run` on the top bar. You can also just press `Shift` + `Enter` to run the cell.", "_____no_output_____" ] ], [ [ "from datascience import *\nimport numpy as np\nimport random\nimport otter\n\ngrader = otter.Notebook(\"m6_l1_tests\")", "_____no_output_____" ] ], [ [ "Let's continue our analysis of COVID-19 data with the same false negative and false positive values of 10% and 5%. For the first task, let's try and create a sample population with 10,000 people. Let's say that 20% of this population has COVID-19. Replace the `...` in function below to create this sample population. The `create_population` function takes in an input `n` and returns a table with `n` rows. These rows can either have `positive` or `negative` as their value. These values indicate whether or not an individual has COVID-19.\n\nFor random number generation, feel free to look up the [NumPy documentation](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.random.html) or the [Python random documentation](https://docs.python.org/3.8/library/random.html).", "_____no_output_____" ] ], [ [ "def create_population(n):\n test_results = ...\n for ...:\n random_num = ...\n if ...:\n disease_result = ...\n else:\n disease_result = ...\n test_results = np.append(test_results, disease_result)\n \n return Table().with_column(\"COVID-19\", test_results)\n\ncovid_population = create_population(...)\ncovid_population.show(5)", "_____no_output_____" ], [ "# There is a chance that this test may fail even with a correct solution due to randomness! \n# Run the above cell again and run the grader again if you think this is the case.\ngrader.check(\"q1\")", "_____no_output_____" ] ], [ [ "Given this population, let's go ahead and randomly test 1000 members. Complete `test_population` below by replacing the `...` with functional code. This function takes in a `population` which is a `datascience` table and a number `n`, where `n` is the number of people that we are testing. Inside the function, we add a column to this table called `Test Results` which contains the test result for each person in the sample based on the false negative and false positve rates given earlier. There is another function called `test_individual` that simplifies `test_population`. You will use `test_individual` within `test_population`.", "_____no_output_____" ] ], [ [ "def test_population(population, n):\n population = ...\n test_results = population.apply(test_individuals, \"COVID-19\")\n population = population.with_column(...)\n return population\n\n\ndef test_individuals(individual):\n random_num = ...\n if individual == \"positive\":\n if ...:\n return ...\n else:\n return ...\n else:\n if ...:\n return ...\n else:\n return ...\n \n\ncovid_sample = ...\ncovid_sample.show(5)", "_____no_output_____" ], [ "# There is a chance that this test may fail even with a correct solution due to randomness! \n# Run the above cell again and run the grader again if you think this is the case.\ngrader.check(\"q2\")", "_____no_output_____" ] ], [ [ "Now that we've simulated a population and sampled this population, let's take a look at our results. We'll pivot first by the `COVID-19` column and then by the `Test Results` column to look at how well our COVID-19 test does using \"real-life\" figures.", "_____no_output_____" ] ], [ [ "covid_sample.pivot(\"COVID-19\", \"Test Results\")", "_____no_output_____" ] ], [ [ "You'll see that though our test correctly identifies the disease most of the time, there are still some instances where our test gets it wrong. It is impossible for a test to have both a 0% false negative rate and a 0% false positive rate. In the case of this disease and testing, which should we prioritize? Driving down the false positive rate or driving down the false negative rate? Is there reason why one should be prioritized over the other? There is no simple answer to these questions, and as data scientists, we'll have to grapple with these issues oursleves and navigate the complex web we call life.", "_____no_output_____" ], [ "Congratulations on finishing! Run the next cell to make sure that you passed all of the test cases.", "_____no_output_____" ] ], [ [ "grader.check_all()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4af843dac3ed857379e2986a2a480c8c1b023e32
14,872
ipynb
Jupyter Notebook
Untitled.ipynb
nassibehgolizadeh/persian-text-GAN
6e622a4a7c3d06903d001040aadb657281a23cd3
[ "MIT" ]
null
null
null
Untitled.ipynb
nassibehgolizadeh/persian-text-GAN
6e622a4a7c3d06903d001040aadb657281a23cd3
[ "MIT" ]
null
null
null
Untitled.ipynb
nassibehgolizadeh/persian-text-GAN
6e622a4a7c3d06903d001040aadb657281a23cd3
[ "MIT" ]
null
null
null
33.122494
366
0.537386
[ [ [ "import getopt\nimport sys\n\nfrom colorama import Fore\n\nfrom models.seqgan.Seqgan import Seqgan", "_____no_output_____" ], [ "def set_gan(gan_name):\n gans = dict()\n gans['seqgan'] = Seqgan\n# gans['gsgan'] = Gsgan\n# gans['textgan'] = TextganMmd\n# gans['leakgan'] = Leakgan\n# gans['rankgan'] = Rankgan\n# gans['maligan'] = Maligan\n# gans['mle'] = Mle\n try:\n Gan = gans[gan_name.lower()]\n gan = Gan()\n gan.vocab_size = 5000\n gan.generate_num = 10000\n return gan\n except KeyError:\n print(Fore.RED + 'Unsupported GAN type: ' + gan_name + Fore.RESET)\n sys.exit(-2)", "_____no_output_____" ], [ "gan=set_gan('Seqgan')", "WARNING: Logging before flag parsing goes to stderr.\nW0117 23:19:41.375349 15292 module_wrapper.py:137] From D:\\Texygen-master\\utils\\utils.py:27: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n\n" ], [ "def set_training(gan, training_method):\n try:\n if training_method == 'oracle':\n gan_func = gan.train_oracle\n elif training_method == 'cfg':\n gan_func = gan.train_cfg\n elif training_method == 'real':\n gan_func = gan.train_real\n else:\n print(Fore.RED + 'Unsupported training setting: ' + training_method + Fore.RESET)\n sys.exit(-3)\n except AttributeError:\n print(Fore.RED + 'Unsupported training setting: ' + training_method + Fore.RESET)\n sys.exit(-3)\n return gan_func\n", "_____no_output_____" ], [ "gan_func=set_training(gan,'oracle')", "_____no_output_____" ], [ "def parse_cmd(argv):\n try:\n opts, args = getopt.getopt(argv, \"hg:t:d:\")\n\n opt_arg = dict(opts)\n if '-h' in opt_arg.keys():\n print('usage: python main.py -g <gan_type>')\n print(' python main.py -g <gan_type> -t <train_type>')\n print(' python main.py -g <gan_type> -t realdata -d <your_data_location>')\n sys.exit(0)\n if not '-g' in opt_arg.keys():\n print('unspecified GAN type, use MLE training only...')\n gan = set_gan('mle')\n else:\n gan = set_gan(opt_arg['-g'])\n if not '-t' in opt_arg.keys():\n gan.train_oracle()\n else:\n gan_func = set_training(gan, opt_arg['-t'])\n if opt_arg['-t'] == 'real' and '-d' in opt_arg.keys():\n gan_func(opt_arg['-d'])\n else:\n gan_func()\n except getopt.GetoptError:\n print('invalid arguments!')\n print('`python main.py -h` for help')\n sys.exit(-1)\n pass\n", "_____no_output_____" ], [ "gan.train_oracle()", "W0117 23:20:25.812914 15292 deprecation.py:323] From D:\\Texygen-master\\utils\\oracle\\OracleLstm.py:48: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.random.categorical` instead.\nW0117 23:20:25.891034 15292 deprecation.py:323] From D:\\Texygen-master\\utils\\oracle\\OracleLstm.py:94: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.cast` instead.\nW0117 23:20:26.094143 15292 module_wrapper.py:137] From D:\\Texygen-master\\models\\seqgan\\SeqganGenerator.py:212: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nW0117 23:20:26.125395 15292 deprecation.py:323] From C:\\Users\\MAHDI\\Anaconda3\\lib\\site-packages\\tensorflow_core\\python\\ops\\math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nW0117 23:20:27.031578 15292 module_wrapper.py:137] From D:\\Texygen-master\\models\\seqgan\\SeqganDiscriminator.py:96: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\nW0117 23:20:27.094071 15292 deprecation.py:506] From D:\\Texygen-master\\models\\seqgan\\SeqganDiscriminator.py:115: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nW0117 23:20:27.109720 15292 deprecation.py:323] From D:\\Texygen-master\\models\\seqgan\\SeqganDiscriminator.py:129: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\n" ], [ "gan.train_oracle.summary()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af84a9de337bb33cb1b14a532a0bee53b6edef4
5,646
ipynb
Jupyter Notebook
.ipynb_checkpoints/Move inorganic to inorganic-checkpoint.ipynb
CalebBell/chemical-metadata
498aff377382d0b8b26ede96d8985c8dd9fd4ca3
[ "MIT" ]
17
2017-10-15T02:30:59.000Z
2022-03-20T18:11:10.000Z
.ipynb_checkpoints/Move inorganic to inorganic-checkpoint.ipynb
CalebBell/chemical-metadata
498aff377382d0b8b26ede96d8985c8dd9fd4ca3
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Move inorganic to inorganic-checkpoint.ipynb
CalebBell/chemical-metadata
498aff377382d0b8b26ede96d8985c8dd9fd4ca3
[ "MIT" ]
2
2020-03-18T03:35:01.000Z
2021-09-14T11:03:17.000Z
44.456693
107
0.655508
[ [ [ "from thermo import *\nimport os\n\ndef parse_formula_CAS(line):\n values = line.rstrip('\\n').split('\\t')\n (pubchemid, CAS, formula, MW, smiles, InChI, InChI_key, iupac_name, common_name) = values[0:9]\n others = values[9:]\n return formula, CAS\n\ndef to_move(line):\n formula, CAS = parse_formula_CAS(line)\n if 'H' in formula and 'C' in formula:\n return False, CAS\n return True, CAS\n\norganic_dir = '/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/'\ninorganic_dir = '/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-inorganic/'\n\nf = open(organic_dir+'db_20171008.tsv')\nfor line in f:\n inorganic, CAS = to_move(line)\n if inorganic:\n mol = os.path.join(organic_dir, 'mol', CAS + '.mol')\n pdf = os.path.join(organic_dir, 'pdf', CAS + '.pdf')\n try:\n os.rename(mol, os.path.join(inorganic_dir, 'mol', CAS + '.mol'))\n except:\n pass\n try:\n os.rename(pdf, os.path.join(inorganic_dir, 'pdf', CAS + '.pdf'))\n except:\n pass\n \n", "/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/10043-01-3.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/12136-45-7.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/1313-82-2.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/1314-80-3.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7632-04-4.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7722-88-5.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7758-29-4.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7784-30-7.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7790-31-0.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/13598-36-2.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7783-07-5.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/360-89-4.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/1309-37-1.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/6303-21-5.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7803-62-5.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7784-42-1.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7782-65-2.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/14808-60-7.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7632-51-1.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7803-51-2.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/10102-44-0.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/1309-42-8.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/1317-43-7.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7783-58-6.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7789-75-5.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7550-45-0.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/12135-76-1.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/10043-52-4.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/10377-58-9.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/7705-07-9.mol\n/home/caleb/Documents/University/CHE3123/chemical-metadata/scifinder-organic/mol/21645-51-2.mol\n" ], [ "# dir(os)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4af85b3f36d2a1f23b2802c9ba8d06dda4e59211
6,813
ipynb
Jupyter Notebook
ch_01/checking_your_setup.ipynb
batshell/Hands-On-Data-Analysis-with-Pandas
ecddbe89c6fbefad2ff736117fd630e6a5569b84
[ "MIT" ]
null
null
null
ch_01/checking_your_setup.ipynb
batshell/Hands-On-Data-Analysis-with-Pandas
ecddbe89c6fbefad2ff736117fd630e6a5569b84
[ "MIT" ]
null
null
null
ch_01/checking_your_setup.ipynb
batshell/Hands-On-Data-Analysis-with-Pandas
ecddbe89c6fbefad2ff736117fd630e6a5569b84
[ "MIT" ]
null
null
null
54.504
1,376
0.623367
[ [ [ "# Checking your setup\nRun through this notebook to make sure your environment is properly setup. Be sure to launch Jupyter from inside the virtual environment.", "_____no_output_____" ] ], [ [ "from check_environment import run_checks\nrun_checks()", "Using Python in D:\\Sysfiles\\Users\\Shailendra\\anaconda3\\envs\\mlenv:\n\u001b[42m[ OK ]\u001b[0m Python is version 3.8.11 (default, Aug 6 2021, 09:57:55) [MSC v.1916 64 bit (AMD64)]\n\n\u001b[41m[FAIL]\u001b[0m graphviz version 0.14.1 is required, but 0.17 installed.\n\u001b[41m[FAIL]\u001b[0m imblearn version 0.7.0 is required, but 0.4.3 installed.\n\u001b[41m[FAIL]\u001b[0m ipympl version 0.6.2 is required, but 0.7.0 installed.\n\u001b[41m[FAIL]\u001b[0m jupyterlab version 3.0.4 is required, but 3.1.7 installed.\n\u001b[41m[FAIL]\u001b[0m matplotlib version 3.3.2 is required, but 3.4.2 installed.\n" ] ], [ [ "*Note: Adapted from Andreas Mueller's [`check_env.ipynb` notebook](https://github.com/amueller/ml-workshop-1-of-4/blob/master/check_env.ipynb).*\n\n<hr>\n<div style=\"overflow: hidden; margin-bottom: 10px;\">\n <div style=\"float: left;\">\n <a href=\"./introduction_to_data_analysis.ipynb\">\n <button>&#8592; Introduction to Data Analysis</button>\n </a>\n </div>\n <div style=\"float: right;\">\n <a href=\"./python_101.ipynb\">\n <button>Python 101</button>\n </a>\n <a href=\"./exercises.ipynb\">\n <button>Exercises &#8594;</button>\n </a>\n </div>\n</div>\n<hr>", "_____no_output_____" ] ], [ [ "conda install -r requirements.txt", "\nNote: you may need to restart the kernel to use updated packages.\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4af8661786f6353b40c43b21fc29a24616e0419b
40,793
ipynb
Jupyter Notebook
keras_sgd_diabetes_dataset.ipynb
cj-asimov12/ai_neural_networks
a89e200822d9136e66ee39da6d84b7c5ded0b7e2
[ "MIT" ]
null
null
null
keras_sgd_diabetes_dataset.ipynb
cj-asimov12/ai_neural_networks
a89e200822d9136e66ee39da6d84b7c5ded0b7e2
[ "MIT" ]
null
null
null
keras_sgd_diabetes_dataset.ipynb
cj-asimov12/ai_neural_networks
a89e200822d9136e66ee39da6d84b7c5ded0b7e2
[ "MIT" ]
null
null
null
38.375353
716
0.357463
[ [ [ "## Keras Assignment - Diabetes Dataset ", "_____no_output_____" ] ], [ [ "\"\"\"\nA. Build a sequential model using Keras on top of this Diabetes dataset to find out if the\npatient has diabetes or not, using ‘Pregnancies’, ‘Glucose’ & ‘BloodPressure’ as\nindependent columns.\na. This model should have 1 hidden layer with 8 nodes\nb. Use Stochastic Gradient as the optimization algorithm\nc. Fit the model, with number of epochs to be 100 and batch size to be 10\nB. Build another sequential model where ‘Outcome’ is the dependent variable and all\nother columns are predictors.\na. This model should have 3 hidden layers with 16 nodes in each layer\nb. Use ‘adam’ as the optimization algorithm\nc. Fit the model, with number of epochs to be 150 and batch size to be 10\n\"\"\"", "_____no_output_____" ], [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense", "_____no_output_____" ], [ "diabetes = pd.read_csv(\"C:\\\\Users\\\\black\\\\Desktop\\\\ai_py\\\\datasets\\\\diabetes.csv\")", "_____no_output_____" ], [ "diabetes", "_____no_output_____" ] ], [ [ "### Selecting independent and dependent variables for model_1 ", "_____no_output_____" ] ], [ [ "X = diabetes.iloc[:,:3]\nX.shape", "_____no_output_____" ], [ "y = diabetes.iloc[:,8:9]\ny.shape", "_____no_output_____" ] ], [ [ "### Splitting data into train and test sets ", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.7, random_state=40)", "_____no_output_____" ] ], [ [ "### Building, optimizing and fitting model_1 ", "_____no_output_____" ] ], [ [ "model_1 = Sequential()\nmodel_1.add(Dense(8, activation=\"relu\", input_dim=3))\nmodel_1.add(Dense(1,activation=\"softmax\"))", "_____no_output_____" ], [ "model_1.compile(optimizer=\"sgd\", loss=\"binary_crossentropy\")", "_____no_output_____" ], [ "model_1.fit(X_train, y_train, epochs=100, batch_size=10)", "Epoch 1/100\n54/54 [==============================] - 0s 1ms/step - loss: 2.2166\nEpoch 2/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6300\nEpoch 3/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6409\nEpoch 4/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6353\nEpoch 5/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6219\nEpoch 6/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6299\nEpoch 7/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6225\nEpoch 8/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6252\nEpoch 9/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6119\nEpoch 10/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6291\nEpoch 11/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6218\nEpoch 12/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6122\nEpoch 13/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6145\nEpoch 14/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6144\nEpoch 15/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6070\nEpoch 16/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6134\nEpoch 17/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6119\nEpoch 18/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6148\nEpoch 19/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6139\nEpoch 20/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6116\nEpoch 21/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6048\nEpoch 22/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6090\nEpoch 23/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6043\nEpoch 24/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6069\nEpoch 25/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6045\nEpoch 26/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6007\nEpoch 27/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6031\nEpoch 28/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6028\nEpoch 29/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6013\nEpoch 30/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5969\nEpoch 31/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6053\nEpoch 32/100\n54/54 [==============================] - ETA: 0s - loss: 0.621 - 0s 1ms/step - loss: 0.6074\nEpoch 33/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5994\nEpoch 34/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6014\nEpoch 35/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5854\nEpoch 36/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5997\nEpoch 37/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5960\nEpoch 38/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5943\nEpoch 39/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5909\nEpoch 40/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6045\nEpoch 41/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5959\nEpoch 42/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5854\nEpoch 43/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5967\nEpoch 44/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5979\nEpoch 45/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5979\nEpoch 46/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5870\nEpoch 47/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5945\nEpoch 48/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5960\nEpoch 49/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5924\nEpoch 50/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6056\nEpoch 51/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5828\nEpoch 52/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5842\nEpoch 53/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5973\nEpoch 54/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5860\nEpoch 55/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5926\nEpoch 56/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5780\nEpoch 57/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5807\nEpoch 58/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5857\nEpoch 59/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5916\nEpoch 60/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5860\nEpoch 61/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5898\nEpoch 62/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5853\nEpoch 63/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5886\nEpoch 64/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5992\nEpoch 65/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5927\nEpoch 66/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6041\nEpoch 67/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5807\nEpoch 68/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5775\nEpoch 69/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5838\nEpoch 70/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5796\nEpoch 71/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5878\nEpoch 72/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6045\nEpoch 73/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5947\nEpoch 74/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5862\nEpoch 75/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5983\nEpoch 76/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5763\nEpoch 77/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5976\nEpoch 78/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5776\nEpoch 79/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5776\nEpoch 80/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5825\nEpoch 81/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5887\nEpoch 82/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5846\nEpoch 83/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5978\nEpoch 84/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5768\nEpoch 85/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5805\nEpoch 86/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5863\nEpoch 87/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6041\nEpoch 88/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.6017\nEpoch 89/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5788\nEpoch 90/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5813\nEpoch 91/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5878\nEpoch 92/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5623\nEpoch 93/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5845\nEpoch 94/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5899\nEpoch 95/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5674\nEpoch 96/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5777\nEpoch 97/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5806\nEpoch 98/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5877\nEpoch 99/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5882\nEpoch 100/100\n54/54 [==============================] - 0s 1ms/step - loss: 0.5693\n" ] ], [ [ "### Selecting independent and dependent variables for model_2 ", "_____no_output_____" ] ], [ [ "X = diabetes.iloc[:,:8]\nX.shape", "_____no_output_____" ], [ "y = diabetes.iloc[:,8:9]\ny.shape", "_____no_output_____" ] ], [ [ "### Splitting data into train and test sets ", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.7, random_state=40)", "_____no_output_____" ] ], [ [ "### Building, optimizing and fitting model_2 ", "_____no_output_____" ] ], [ [ "model_2 = Sequential()\nmodel_2.add(Dense(16, activation=\"relu\", input_dim=8))\nmodel_2.add(Dense(16, activation=\"relu\"))\nmodel_2.add(Dense(16, activation=\"relu\"))\nmodel_2.add(Dense(1,activation=\"softmax\"))", "_____no_output_____" ], [ "model_2.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")", "_____no_output_____" ], [ "model_2.fit(X_train, y_train, epochs=150, batch_size=10)", "Epoch 1/150\n54/54 [==============================] - 0s 1ms/step - loss: 3.4974\nEpoch 2/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.7364\nEpoch 3/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6890\nEpoch 4/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6552\nEpoch 5/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6514\nEpoch 6/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6076\nEpoch 7/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6138\nEpoch 8/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6084\nEpoch 9/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6124\nEpoch 10/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6030\nEpoch 11/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6103\nEpoch 12/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6383\nEpoch 13/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6012\nEpoch 14/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6420\nEpoch 15/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6026\nEpoch 16/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5847\nEpoch 17/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5793\nEpoch 18/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5745\nEpoch 19/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5818\nEpoch 20/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5562\nEpoch 21/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5916\nEpoch 22/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.6183\nEpoch 23/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5504\nEpoch 24/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5441\nEpoch 25/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5619\nEpoch 26/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5622\nEpoch 27/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5514\nEpoch 28/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5385\nEpoch 29/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5447\nEpoch 30/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5858\nEpoch 31/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5379\nEpoch 32/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5532\nEpoch 33/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5269\nEpoch 34/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5373\nEpoch 35/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5323A: 0s - loss: 0.529\nEpoch 36/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5267\nEpoch 37/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5176\nEpoch 38/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5652\nEpoch 39/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5138A: 0s - loss: 0.514\nEpoch 40/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5345\nEpoch 41/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5253\nEpoch 42/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5260\nEpoch 43/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5121\nEpoch 44/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5024\nEpoch 45/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5174\nEpoch 46/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5202\nEpoch 47/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5096\nEpoch 48/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5070\nEpoch 49/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5019\nEpoch 50/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5239\nEpoch 51/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5021\nEpoch 52/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5263\nEpoch 53/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5116\nEpoch 54/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5223\nEpoch 55/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5089\nEpoch 56/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5008\nEpoch 57/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4952\nEpoch 58/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5180\nEpoch 59/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5055\nEpoch 60/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4962\nEpoch 61/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4994\nEpoch 62/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5011\nEpoch 63/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4908\nEpoch 64/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4964\nEpoch 65/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4971\nEpoch 66/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4944\nEpoch 67/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5048\nEpoch 68/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4907\nEpoch 69/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4887\nEpoch 70/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4798\nEpoch 71/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5084\nEpoch 72/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5053\nEpoch 73/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4796\nEpoch 74/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4745\nEpoch 75/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4938\nEpoch 76/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4865\nEpoch 77/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4880\nEpoch 78/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.5038A: 0s - loss: 0.501\nEpoch 79/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4799\nEpoch 80/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4928\nEpoch 81/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4803\nEpoch 82/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4829\nEpoch 83/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4731\nEpoch 84/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4714\nEpoch 85/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4636\nEpoch 86/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4812\nEpoch 87/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4699\nEpoch 88/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4825\nEpoch 89/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4758\nEpoch 90/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4581\nEpoch 91/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4610\nEpoch 92/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4625\nEpoch 93/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4721\nEpoch 94/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4747\nEpoch 95/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4664\nEpoch 96/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4697\nEpoch 97/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4888\nEpoch 98/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4544\nEpoch 99/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4526\nEpoch 100/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4702\nEpoch 101/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4632\nEpoch 102/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4889\nEpoch 103/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4671\nEpoch 104/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4608\nEpoch 105/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4817\nEpoch 106/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4565\nEpoch 107/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4649\nEpoch 108/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4620\nEpoch 109/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4658\nEpoch 110/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4396\nEpoch 111/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4800\nEpoch 112/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4521\nEpoch 113/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4612\nEpoch 114/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4563\nEpoch 115/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4591\nEpoch 116/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4515\nEpoch 117/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4502\nEpoch 118/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4671\nEpoch 119/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4433\nEpoch 120/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4515\nEpoch 121/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4336\nEpoch 122/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4433\nEpoch 123/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4482\nEpoch 124/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4677\nEpoch 125/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4521\nEpoch 126/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4501\nEpoch 127/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4509\nEpoch 128/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4463\nEpoch 129/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4420\nEpoch 130/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4492\nEpoch 131/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4434\nEpoch 132/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4425\nEpoch 133/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4712\nEpoch 134/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4312\nEpoch 135/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4438\nEpoch 136/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4428\nEpoch 137/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4645\nEpoch 138/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4771\nEpoch 139/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4427\nEpoch 140/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4309\nEpoch 141/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4330A: 0s - loss: 0.421\nEpoch 142/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4344\nEpoch 143/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4339\nEpoch 144/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4667\nEpoch 145/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4300\nEpoch 146/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4300\nEpoch 147/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4344\nEpoch 148/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4372\nEpoch 149/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4201\nEpoch 150/150\n54/54 [==============================] - 0s 1ms/step - loss: 0.4356\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4af8671678d812d4aecf795a4a96e347f1ed2259
44,983
ipynb
Jupyter Notebook
Python/tensorflow/DeepLearningZeroToAll/Lab11-0-cnn_basics.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
Python/tensorflow/DeepLearningZeroToAll/Lab11-0-cnn_basics.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
Python/tensorflow/DeepLearningZeroToAll/Lab11-0-cnn_basics.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
85.356736
5,972
0.813085
[ [ [ "# Lab 11 CNN(Convolutional Nueral Network)", "_____no_output_____" ], [ "## Lab11-0-cnn_basics", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "sess = tf.InteractiveSession()\nimage = np.array([[[[1],[2],[3]],\n [[4],[5],[6]], \n [[7],[8],[9]]]], dtype=np.float32)\nprint(image.shape)\nplt.imshow(image.reshape(3,3), cmap='Greys')", "(1, 3, 3, 1)\n" ] ], [ [ "## 1 filter (2,2,1,1) with padding: VALID\n\nweight.shape = 1 filter (2 , 2 , 1, 1)\n![image](https://cloud.githubusercontent.com/assets/901975/24833375/c0d9c262-1cf9-11e7-9efc-5dd6fe0fedb0.png)", "_____no_output_____" ] ], [ [ "print(\"imag:\\n\", image) # image의 형태 확인\nprint(\"image.shape\", image.shape)\nweight = tf.constant([[[[1.]],[[1.]]],\n [[[1.]],[[1.]]]])\nprint(\"weight.shape\", weight.shape)\nconv2d = tf.nn.conv2d(image, weight, strides=[1, 1, 1, 1], padding='VALID') # padding 안준 경우 => convolution 전에 비해 1씩 줄어듬\nconv2d_img = conv2d.eval()\nprint(\"conv2d_img.shape\", conv2d_img.shape) # shape이 2x2x1 로 변화\nprint(conv2d_img) # 1개의 filter를 거친 결과\nconv2d_img = np.swapaxes(conv2d_img, 0, 3) # 0과 3축을 transpose ??? 왜하는 거지?? 안해도 됨\n# print(\"conv2_img transpose\\n\", conv2d_img) # 바뀌었는지 확인\nfor i, one_img in enumerate(conv2d_img): # enumerate(리스트) : 리스트의 index와 원소를 같이 반환해줌\n print(one_img.reshape(2,2))\n plt.subplot(1,2,i+1), plt.imshow(one_img.reshape(2,2), cmap='gray') # plt.subplot(1,2, i+1) : R의 par(mfrow=c(1,2))와 같은 기능", "imag:\n [[[[ 1.]\n [ 2.]\n [ 3.]]\n\n [[ 4.]\n [ 5.]\n [ 6.]]\n\n [[ 7.]\n [ 8.]\n [ 9.]]]]\nimage.shape (1, 3, 3, 1)\nweight.shape (2, 2, 1, 1)\nconv2d_img.shape (1, 2, 2, 1)\n[[[[ 12.]\n [ 16.]]\n\n [[ 24.]\n [ 28.]]]]\n[[ 12. 16.]\n [ 24. 28.]]\n" ] ], [ [ "## 1 filter (2,2,1,1) with padding:SAME\n![image](https://cloud.githubusercontent.com/assets/901975/24833381/fd01869e-1cf9-11e7-9d59-df08c7c6e5c4.png)", "_____no_output_____" ] ], [ [ "# print(\"imag:\\n\", image)\nprint(\"image.shape\", image.shape)\n\nweight = tf.constant([[[[1.]],[[1.]]],\n [[[1.]],[[1.]]]])\nprint(\"weight.shape\", weight.shape)\nconv2d = tf.nn.conv2d(image, weight, strides=[1, 1, 1, 1], padding='SAME') # padding을 준 경우 (0 padding) => convolution 전과 후의 shape이 같음\nconv2d_img = conv2d.eval()\nprint(\"conv2d_img.shape\", conv2d_img.shape) # 3x3x1\nconv2d_img = np.swapaxes(conv2d_img, 0, 3)\nfor i, one_img in enumerate(conv2d_img):\n print(one_img.reshape(3,3))\n plt.subplot(1,2,i+1), plt.imshow(one_img.reshape(3,3), cmap='gray')", "image.shape (1, 3, 3, 1)\nweight.shape (2, 2, 1, 1)\nconv2d_img.shape (1, 3, 3, 1)\n[[ 12. 16. 9.]\n [ 24. 28. 15.]\n [ 15. 17. 9.]]\n" ] ], [ [ "## 3 filters (2,2,1,3)", "_____no_output_____" ] ], [ [ "# print(\"imag:\\n\", image)\nprint(\"image.shape\", image.shape)\n\nweight = tf.constant([[[[1.,10.,-1.]],[[1.,10.,-1.]]],\n [[[1.,10.,-1.]],[[1.,10.,-1.]]]])\nprint(\"weight.shape\", weight.shape) # 2x2의 weight을 3개 사용(filter가 3개)\nconv2d = tf.nn.conv2d(image, weight, strides=[1, 1, 1, 1], padding='SAME') # 1 stride, zero padding 사용\nconv2d_img = conv2d.eval()\nprint(\"conv2d_img.shape\", conv2d_img.shape) # 3x3x3\nconv2d_img = np.swapaxes(conv2d_img, 0, 3)\nfor i, one_img in enumerate(conv2d_img):\n print(one_img.reshape(3,3))\n plt.subplot(1,3,i+1), plt.imshow(one_img.reshape(3,3), cmap='gray')", "image.shape (1, 3, 3, 1)\nweight.shape (2, 2, 1, 3)\nconv2d_img.shape (1, 3, 3, 3)\n[[ 12. 16. 9.]\n [ 24. 28. 15.]\n [ 15. 17. 9.]]\n[[ 120. 160. 90.]\n [ 240. 280. 150.]\n [ 150. 170. 90.]]\n[[-12. -16. -9.]\n [-24. -28. -15.]\n [-15. -17. -9.]]\n" ] ], [ [ "## MAX POOLING\n![image](https://cloud.githubusercontent.com/assets/901975/23337676/bd154da2-fc30-11e6-888c-d86bc2206066.png)\n\n![image](https://cloud.githubusercontent.com/assets/901975/23340355/a4bd3c08-fc6f-11e6-8a99-1e3bbbe86733.png)\n", "_____no_output_____" ] ], [ [ "image = np.array([[[[4],[3]],\n [[2],[1]]]], dtype=np.float32)\npool = tf.nn.max_pool(image, ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1], padding='VALID')\nprint(pool.shape)\nprint(pool.eval())", "(1, 1, 1, 1)\n[[[[ 4.]]]]\n" ] ], [ [ "## SAME: Zero paddings\n\n![image](https://cloud.githubusercontent.com/assets/901975/23340337/71b27652-fc6f-11e6-96ef-760998755f77.png)", "_____no_output_____" ] ], [ [ "image = np.array([[[[4],[3]],\n [[2],[1]]]], dtype=np.float32)\npool = tf.nn.max_pool(image, ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1], padding='SAME')\nprint(pool.shape)\nprint(pool.eval())", "(1, 2, 2, 1)\n[[[[ 4.]\n [ 3.]]\n\n [[ 2.]\n [ 1.]]]]\n" ] ], [ [ "### MNIST data로 간단한 CNN 구현", "_____no_output_____" ] ], [ [ "from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset", "WARNING:tensorflow:From <ipython-input-8-c19fd52a49e8>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From C:\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From C:\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-images-idx3-ubyte.gz\nWARNING:tensorflow:From C:\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From C:\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From C:\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" ], [ "img = mnist.train.images[0].reshape(28,28)\nplt.imshow(img, cmap='gray')", "_____no_output_____" ], [ "# 1번 convolution\nsess = tf.InteractiveSession()\n\nimg = img.reshape(-1,28,28,1)\nW1 = tf.Variable(tf.random_normal([3, 3, 1, 5], stddev=0.01)) # 3x3 filter 5개 사용\nconv2d = tf.nn.conv2d(img, W1, strides=[1, 2, 2, 1], padding='SAME') # 2 stride, zero padding\nprint(conv2d)\nsess.run(tf.global_variables_initializer())\nconv2d_img = conv2d.eval()\nconv2d_img = np.swapaxes(conv2d_img, 0, 3)\nfor i, one_img in enumerate(conv2d_img):\n plt.subplot(1,5,i+1), plt.imshow(one_img.reshape(14,14), cmap='gray')", "C:\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1645: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).\n warnings.warn('An interactive session is already active. This can '\n" ], [ "# pooling\npool = tf.nn.max_pool(conv2d, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 2 stride, zero padding, 2x2의 크기로 max pooling 시행\nprint(pool)\nsess.run(tf.global_variables_initializer())\npool_img = pool.eval()\npool_img = np.swapaxes(pool_img, 0, 3)\nfor i, one_img in enumerate(pool_img):\n plt.subplot(1,5,i+1), plt.imshow(one_img.reshape(7, 7), cmap='gray')", "Tensor(\"MaxPool_2:0\", shape=(1, 7, 7, 5), dtype=float32)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4af86f5a08a24951cad6e3d6ffe4b21081acc3f1
21,358
ipynb
Jupyter Notebook
notebooks/hypersearch.ipynb
ZhouLigao/PyTorchText
66cc1602ef61907030228b97726174462070889d
[ "MIT" ]
1,136
2017-08-16T08:49:04.000Z
2022-03-25T09:49:41.000Z
notebooks/hypersearch.ipynb
xiaolinpeter/PyTorchText
dcadcba44af0c3731b82d9db1c77f2968d4feac0
[ "MIT" ]
13
2017-08-29T10:13:03.000Z
2019-12-19T05:37:17.000Z
notebooks/hypersearch.ipynb
xiaolinpeter/PyTorchText
dcadcba44af0c3731b82d9db1c77f2968d4feac0
[ "MIT" ]
383
2017-08-16T08:45:55.000Z
2022-03-30T06:34:09.000Z
91.273504
620
0.755033
[ [ [ "import json\nimport os\nimport sys\nsys.path.append('../')\nfrom utils import get_score\nimport torch as t \nimport numpy as np\nfrom glob import glob\nimport pickle", "_____no_output_____" ], [ "from glob import glob\ndata_root=\"/data_ssd/zhihu/result/search_stack/\"\nfiles_path=glob(data_root+\"*val.pth\")\nfiles_path.sort()\nprint len(files_path)", "27\n" ], [ "files_weight1=[]\ninitial_weight=[]\nfor file in files_path:\n if 'weight5' not in file:\n files_weight1.append(file)\n if 'MultiModel' in file:\n initial_weight.append(5)\n else:\n initial_weight.append(1)\nprint len(files_weight1)", "22\n" ], [ "for f,w in zip(files_weight1,initial_weight):\n print f,w", "/data_ssd/zhihu/result/search_stack/CNNText_tmp0.4024_char_val.pth 1\n/data_ssd/zhihu/result/search_stack/CNNText_tmp0.4109_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/DeepText0.4103_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/DeepText_word_0.410051_aug_val.pth 1\n/data_ssd/zhihu/result/search_stack/FastText3_word_weight1_0.40853_val.pth 1\n/data_ssd/zhihu/result/search_stack/LSTMText0.4031_char_val.pth 1\n/data_ssd/zhihu/result/search_stack/LSTMText0.4119_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/LSTMText0.41368_aug_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/MultiCNNTextBNDeep_aug_char_0.38738_val.pth 1\n/data_ssd/zhihu/result/search_stack/MultiModel2_0.42560_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModel2w2c_0.4213_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModel_0.4171859_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModel_0.41987_word_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModel_0.42169_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModelall_0.4235_aug_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModelallfast_0.41652_aug_val.pth 5\n/data_ssd/zhihu/result/search_stack/MultiModelallfast_0.419088_aug_val.pth 5\n/data_ssd/zhihu/result/search_stack/RCNN0.39854_aug_char_val.pth 1\n/data_ssd/zhihu/result/search_stack/RCNN0.41344_aug_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/RCNN_0.4037_char_val.pth 1\n/data_ssd/zhihu/result/search_stack/RCNN_0.4115_word_val.pth 1\n/data_ssd/zhihu/result/search_stack/inception0.41254_aug_word_val.pth 1\n" ], [ "model_num=10\nprobs=[t.load(r).float() for r in files_weight1[:model_num]]\ntest_data_path='/home/a/code/pytorch/zhihu/ddd/val.npz'\nindex2qid = np.load(test_data_path)['index2qid'].item()\nlabel_path=\"/home/a/code/pytorch/zhihu/ddd/labels.json\"\nwith open(label_path) as f: \n labels_info = json.load(f)\nqid2label = labels_info['d']\ntrue_labels = [qid2label[index2qid[2999967-200000+ii]] for ii in range(200000)]\ndel labels_info\ndel qid2label\ndel index2qid", "_____no_output_____" ], [ "def target(args):\n r=0\n for r_,k_ in enumerate(args):\n if r_<model_num:\n r +=k_*probs[r_]\n else:\n tmp=t.load(files_path[r_]).cuda().float()\n r=r+k_*tmp.cpu()\n result = r.topk(5,1)[1]\n predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]\n score,_,_,_ = get_score(predict_label_and_marked_label_list)\n print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]\n return -score", "_____no_output_____" ], [ "max_evals=100\nfrom hyperopt import hp, fmin, rand, tpe, space_eval\nlist_space = [hp.normal('a'+str(rr),1,0.2) for rr in range(len(files_path))]\nfrom hyperopt import Trials\ntrials_to_keep=Trials()\nbest = fmin(target,list_space,algo=tpe.suggest,max_evals=max_evals, trials = trials_to_keep)\noutput = open('trials_to_keep__weight1_model'+'.pkl', 'wb')\npickle.dump(trials_to_keep, output)", "((0.5767518711990203, 1.0535449863681772, 1.387628321556626, 0.8174540261504482, 0.46505220685010107, 0.84297799521111, 0.6365171155277725, 1.1619862524737483, 1.1629124711170251, 0.794240785899519, 1.3500216705996364, 0.7161582546569272, 0.8632489643123853, 1.0658517932708935, 0.7141997070591544, 1.1636288618440052, 0.8801467587212355, 0.9337439124613484, 0.7588341161043034, 1.0673229231836008, 0.9405260442990232, 1.0030074530136603, 1.233610926900131, 1.072457858205536, 0.8331957511909095, 0.8314988827650005, 1.3867110853019984), 0.4280465816350192, [118235, 66455, 43228, 29651, 21732])\n((0.9564174440498158, 1.0489559058574427, 0.7518749608094916, 1.0320464666633937, 0.9422874525955014, 1.0165090602024747, 1.0795518519765972, 0.8555932208868637, 1.2167276710281532, 1.419475183806352, 0.8838074782881737, 0.9468634334505386, 0.9691711235479938, 1.2110952652578792, 0.7660644586038812, 1.0678274437017292, 1.2004882120913785, 0.5873016262887354, 0.9972641287871216, 1.232087128048666, 0.5932669739562024, 0.8098430856930219, 1.2051749413279973, 0.6420124200617219, 1.0545220992911364, 1.090085158125075, 1.0434103800999925), 0.42835162928900966, [118317, 66486, 43171, 29730, 21802])\n((1.0396513912630647, 0.8152262036738671, 1.0151365556914107, 1.2222465705150265, 0.9237941653792502, 0.9145421492980752, 0.6709491327992462, 1.2240849176199395, 1.0882775208707203, 0.869310447972495, 0.8248709664979141, 1.0551567188255369, 0.9795312841684296, 1.1384864120379838, 0.6615855975033067, 0.9041668420703758, 0.9409216973018301, 1.1963661170777584, 1.0462748877740031, 1.0602561544349756, 1.219934932274373, 1.1146465247368262, 1.1756708688103288, 0.8091468678973395, 0.9344045282007267, 0.965189294016293, 0.7383995097742351), 0.4284971950755775, [118304, 66546, 43253, 29710, 21796])\n((1.0436207282403736, 1.1177847631655402, 0.7334644864020412, 0.5945583990768201, 1.1372775086544988, 0.9632482066449859, 1.185226546600236, 0.8547364731457139, 1.30567736183814, 1.0110489855369813, 1.156395824385718, 1.1807329353099296, 1.082701789584947, 0.9506669823106011, 0.8869190234648182, 1.3647780699602885, 0.9039268562212461, 0.9460467404143632, 0.49440395906757384, 1.2964158337990341, 0.9153578125760934, 0.7129527994158111, 1.1121794477398834, 0.7173885303546791, 0.8230543892941533, 1.060142871834187, 0.8274794389621235), 0.4284150098949782, [118296, 66537, 43173, 29779, 21768])\n((0.5603631370737425, 0.8877480056408759, 0.6558796529976378, 0.9886597888439004, 0.7477189610940553, 0.9046442608037218, 1.1992797282334795, 0.9588351253009877, 0.8294738023991932, 0.8437792101171209, 0.726724382887004, 1.2479942869290668, 0.683801073639847, 1.2098993886177676, 0.880619627297341, 0.7743649769768184, 1.096963809518645, 1.0204426915730789, 0.6681841600547878, 0.8049403488765382, 0.7980888274225503, 0.9599736009871855, 0.9897139328258182, 1.00187041793589, 0.9055640200005174, 1.4299287953224522, 0.9083399053181029), 0.4280640672955356, [118288, 66488, 43152, 29569, 21804])\n((0.8181619176741017, 0.9603357616332574, 0.8529903017325247, 0.966414166751806, 1.1477376869155627, 1.1243840294964045, 1.0927341716877685, 0.7462533057566294, 1.070288021044587, 0.9613682629977099, 1.3379171222667483, 0.9797534573713241, 1.0736052052577671, 1.2015391766065553, 1.1999794167439248, 1.203713309438228, 0.6152656113672589, 1.1720092968894404, 0.9968549106073766, 0.7980843446738006, 1.0951280389429952, 0.8181570938154149, 1.002099384294379, 1.182470659184026, 1.2146632196270613, 0.524237921153635, 1.024643251448422), 0.4281804320010674, [118283, 66440, 43183, 29726, 21759])\n((1.0960666582614125, 1.0579464482932797, 0.7834675994465389, 0.7903979165219251, 1.0413698326079688, 0.9456771022668258, 1.2255402916263032, 0.981313398006131, 0.852836243313798, 1.0756165440565637, 0.9403243119778307, 1.1503181341281592, 1.3105311830651543, 0.9348305757304137, 1.145807479022634, 1.1539449084733495, 0.9299528722380997, 0.6977885489944271, 1.1342172639449408, 1.389700852172688, 1.125297312045533, 0.9052773530037408, 0.9275938410331107, 1.2350001740419887, 0.8817933545128104, 0.8747652188780648, 1.2708864419865462), 0.4281017406924649, [118295, 66551, 43147, 29695, 21630])\n((1.0909774724122734, 1.1632946861879934, 0.8330937007459174, 1.2141352874659104, 1.3750239953251706, 0.8589485022199985, 1.0099166146388197, 0.9883074696777358, 1.0022839542828343, 0.776102193528407, 1.3413938899307376, 0.5692934122989379, 0.7176046834721495, 1.191582367245624, 0.7117175367466215, 0.9969798276049461, 0.672524361937618, 1.1569266954256807, 0.8912431960524776, 1.1350824964340147, 0.9726674348820902, 0.9947130191981621, 1.1323252165215676, 1.2500124809804412, 0.8858636818693135, 0.9011142886592992, 0.8718946284173767), 0.428951938455713, [118390, 66534, 43289, 29789, 21924])\n((0.9361006325341297, 0.7177671821986886, 1.1179556311823295, 0.861709780027206, 0.9429927555188609, 0.8821795069774191, 0.8410789772892293, 1.155421410686322, 0.8514129481784698, 1.192912860121629, 1.0636717336781352, 0.9522577242903932, 0.893136774708443, 1.2393469304819282, 1.3127825950348493, 0.9358325901097644, 1.2831212486148287, 1.1345971754491724, 0.8577949155879633, 0.9371835106885185, 0.6525743122175998, 0.9736165404423865, 1.4273092586434721, 0.9429513420478964, 0.7745209654177264, 1.0321206862261756, 1.2045688075773624), 0.42829769905968956, [118310, 66550, 43140, 29710, 21752])\n((1.0375872975886782, 1.1266258135132174, 1.257528811908275, 0.7126774977636814, 1.0350254091244158, 1.1450919600018103, 1.3314230648304846, 1.720986718975292, 0.5611567163165201, 1.1446875287185685, 1.046818745735875, 1.095238785121044, 1.0865878836682188, 1.090712317508523, 0.849033186686565, 0.8394915422381706, 1.1063798797082427, 1.0891893199379992, 1.0443234852412517, 1.3244008469618627, 0.9265194841669582, 0.8597205704217158, 0.7752412053380625, 0.9093594787023889, 0.7430527628256622, 1.11777911727874, 0.5899281435946844), 0.4287229616371521, [118322, 66623, 43187, 29770, 21866])\n((1.0742605955227833, 1.0682419526997646, 0.727248995316519, 0.7716229587350121, 1.1041876521277691, 0.8145663783524189, 0.9753185524861384, 1.2894101806832785, 1.1082271222350224, 1.277231681915051, 0.7337335528480802, 0.837210928876828, 1.0336346098160607, 1.1998204854833125, 1.270638179818906, 1.007861567287892, 0.9060395704204294, 1.0911707062348353, 1.008795433431396, 0.9558740498522629, 0.9587532819963107, 1.0942540727296848, 0.6038289863254356, 0.839704420058772, 1.1006514541954684, 1.14227088756331, 0.7270124183449314), 0.4286704252183209, [118320, 66563, 43291, 29682, 21876])\n((0.921355616535115, 1.2635944032433883, 1.163549280639701, 1.1677987169774517, 1.279552847964594, 0.9709836238348459, 1.1374443724855785, 1.2567197074516967, 0.7707326898951614, 0.8490424278697067, 0.576091026633405, 1.2915227921198216, 0.9194392652640613, 0.5120469491134019, 1.0505450689751525, 1.0803489437604752, 0.4217651963615686, 0.9931542979917196, 1.0521120733408396, 1.0967711532975704, 1.0311241977368202, 1.061097279261768, 0.916071883378249, 1.1078959807136524, 1.0761781897514162, 1.1916623122851995, 1.038368500785104), 0.4281014997593507, [118232, 66555, 43140, 29584, 21827])\n((0.8363859998153391, 0.968392492715505, 1.0631838754618983, 0.918861158275894, 1.017703394393451, 1.0764602173391697, 0.7210739477309421, 0.8485620548570673, 1.1509960691464665, 0.8722613732556246, 1.0230183780428093, 0.8109473776435979, 0.5205968403323541, 1.1966119229423686, 1.1782735765520647, 0.8215029549887057, 0.9053541058426728, 0.8117809688278214, 0.9177601828703651, 0.5734335667743669, 0.796373298455189, 0.969474300743803, 1.0755821421164158, 1.0439930428906756, 1.2383220222636562, 1.099832276043002, 1.2392361385601287), 0.4281050659660867, [118273, 66447, 43169, 29620, 21830])\n((1.0296686961406414, 1.2782244853907003, 1.2501647640675655, 1.2403838469104085, 1.0780295046650372, 1.0587047252750859, 1.2458020016586324, 1.2789441028099233, 1.0804801853394084, 1.1624201004390586, 1.2904461028210137, 1.003599590510022, 1.1945289180637169, 0.8397214333142977, 1.0264040833492132, 0.7386697396726365, 1.308397507493346, 1.0017690927125833, 0.9539847918439729, 0.6697360003518279, 0.8950169462219368, 1.126775843327582, 0.9759219393743628, 0.8738735310325617, 0.8057288711494516, 1.0054094595440621, 0.44890014536963785), 0.4288343253436665, [118328, 66567, 43276, 29777, 21904])\n((0.8521361193934514, 1.1037378158469435, 0.8728092728092407, 0.8673405782797443, 1.1160843025850589, 0.973274034857726, 0.9493745273690213, 0.8934372038391502, 1.1363568636226888, 1.0686341742329142, 0.9296050024455486, 0.9410160038361773, 0.7674216786545758, 0.8418684298678523, 0.9141477350235993, 1.0280465534195466, 1.1163021413227046, 1.4259901790437957, 1.3059288757704905, 1.0041475141166456, 1.0624299808147348, 1.1976957811419418, 1.0397239597130472, 1.158171759202412, 0.9450307976884158, 1.1545976308902526, 0.7346935621252547), 0.42862579733103073, [118360, 66545, 43298, 29707, 21778])\n((1.0049306680168337, 1.125106791722849, 1.2659471196498153, 1.1244502188839252, 1.0104630661693734, 1.053844199352109, 1.0344022756855036, 1.022972521164066, 1.1048288075861075, 0.833568825038338, 1.332337527265459, 1.3312288015392817, 1.1112902678961725, 0.9936867963378746, 1.3152705126000823, 0.7967982127653749, 0.9948443521348109, 1.262346599887762, 0.809348771595009, 0.7653213088403509, 1.050741951969147, 0.6667091702743495, 1.2695060878119722, 0.8687059429446946, 0.7652879831172674, 0.6051083028637301, 1.0235813580449669), 0.4281648492277763, [118237, 66495, 43141, 29742, 21773])\n((0.9439444089496458, 1.2643502738479635, 1.0981549682783176, 1.1535473276857933, 0.8590885431465769, 0.803822120317691, 1.2364706007873727, 0.999995817026468, 0.7616972718129256, 1.0647190703593277, 1.031256982080224, 0.7003975968936051, 1.4249272027399031, 0.9661740669923161, 1.0784221422100273, 0.7222909036907985, 0.9759302968975554, 0.9623363581963587, 0.8438962956706538, 0.9799820756758097, 1.0631232064584024, 1.1109672420454957, 0.8180911091130315, 0.3740752190642169, 0.6369347813943915, 1.1972483002232597, 1.0087215906713636), 0.42866310837214805, [118302, 66575, 43170, 29762, 21926])\n((1.1448566710143657, 1.1344063780038023, 1.0438750895405038, 1.0471380245152693, 1.3959556951404437, 0.8114342366840643, 0.8227616315983749, 1.228760836327871, 0.7601976432848806, 0.9140147177692148, 1.1382896942701592, 1.1552555964647135, 0.8146841628603113, 0.934080701839752, 0.8406695574084118, 0.6392142015240838, 1.1112428207739713, 1.1454791600790009, 0.7422563699227398, 1.306223598901489, 1.0056352843819367, 1.0430851566572166, 0.956723270825816, 1.14072237200691, 0.876572010416691, 1.0420998013009128, 1.200946583651936), 0.42822724488062136, [118258, 66547, 43165, 29643, 21811])\n((0.9682900034089849, 0.8944788565242395, 1.1424695733472774, 1.2097622579405274, 1.1838163654766434, 0.7712945895802327, 0.9987356706877797, 1.0459832860702756, 1.169801749407878, 1.3835554994368156, 0.9262485000863981, 1.0239760837079603, 1.1642004660326255, 0.7171660380110471, 1.2198435336331255, 0.9914586253312075, 1.0579685211673633, 1.165435321457424, 1.139935380380353, 1.1334999024891825, 0.8217591931057884, 0.8399501402618965, 0.9128560075182771, 0.831783823981961, 0.9368891285033222, 1.0695093106544433, 0.9907731908584041), 0.4283486770466729, [118284, 66513, 43187, 29695, 21831])\n((0.7142889794136915, 1.1857323261893113, 0.7583448946957471, 0.8586304369265904, 1.0173651383657554, 1.1475195360898676, 1.1430776079259468, 0.7865028816271515, 1.1976094401221742, 0.833036936596818, 1.1647559554604765, 0.9057551056279207, 0.8774045640984214, 1.316259711656861, 0.8826876196373359, 0.8754813876423517, 0.7550863115958579, 0.9839947093718612, 1.1694089035085011, 0.8313909263089377, 0.8040666063950112, 1.138072121288114, 0.9633824760139279, 1.1394564569892485, 0.9273606444547856, 1.1701525924936067, 0.9874737029270793), 0.4284862218442231, [118319, 66515, 43221, 29741, 21805])\n((1.133586351724914, 1.50553706140452, 1.5929445904941075, 1.6496567173923746, 1.5461892161064401, 0.579468566919412, 1.4991573358634755, 1.409549875309423, 1.0049633153704678, 0.5341050477510588, 1.4130295160266297, 0.3245984799285148, 1.2110830864417623, 1.5912282713299675, 0.25685591462716617, 0.4525979977623273, 1.450772134823792, 1.2576510831904293, 0.9241870903365496, 0.6190777133510634, 0.8729928252882753, 1.4687202014309415, 1.1446305290031749, 1.933303416480393, 0.7145319840105882, 0.9722557373354799, 0.09723813892992061), 0.4292862660939269, [118306, 66699, 43205, 29907, 22069])\n((1.2265553712293205, 1.6853060752500384, 1.7389560687469239, 1.8883074298584193, 1.8901796356028546, 0.3986334535773056, 1.7210304536348418, 1.4316303457558086, 0.9918747729369464, 0.07991582970906486, 1.915426191300876, 0.028778640687859847, 0.6338606696712282, 1.8613689024334066, 0.004502871431663447, 0.26396412744210535, 1.6652790614683415, 1.3217547537874714, 0.8911080211044674, 0.41949152360499065, 0.8588674070125663, 1.8227498540466323, 1.1368839156627115, 2.1203560639832935, 0.3512101663565764, 0.9266671560857133, -0.10212785221932139), 0.42909729518875406, [118317, 66598, 43100, 29958, 22083])\n((1.2957942777340852, 1.8956587348660159, 1.8797880806787033, 1.927135827524312, 2.4082382955373647, 0.18247199406812306, 1.9564552728098694, 1.4400604208311971, 0.9892161857392544, -0.24040183615693123, 2.3935525670702886, -0.12014519550697775, 0.501679629581739, 2.3401681585549987, -0.1538140232877496, -0.12678502715580586, 1.8750306814642714, 1.3470638798930443, 0.9252222081125101, -0.0798725030783699, 0.8584864356120623, 1.85936180701325, 1.1488598487395656, 2.4589601239251224, 0.02108568849425896, 0.9429424379970861, -0.28158514187764133), 0.4282534603788571, [118303, 66475, 42912, 29667, 22095])\n((1.2306384742422627, 1.5909741893344214, 1.6944812993290441, 1.662285856895472, 1.895188053580105, 0.4725550723728165, 1.645441796088941, 1.4472204835494829, 0.9931597514466172, 0.2755808147854392, 1.8760830386866907, 0.23087505325727006, 0.3218643496752165, 1.7900053810851841, 0.17994814506279125, 0.2760878875655698, 1.584523623473219, 1.6695897765751768, 0.8829682964258307, 0.3011883538184936, 0.7483510176453497, 1.5476062911215882, 1.2890598820416812, 1.8538176149211465, 0.2846176072545592, 0.9480514148776051, 0.06505422195345575), 0.42937608222769774, [118332, 66628, 43147, 29937, 22213])\n((1.2033203118537514, 1.5634683208836848, 1.6549648629083618, 1.65408620526703, 1.6596764430486408, 0.5116633970668553, 1.565428546007525, 1.447746336491962, 0.967363601061701, 0.4745222906112492, 1.729370608593019, 0.2603776176235985, -0.09563969616839424, 1.623186280320116, 0.21816912492833568, 0.3279947825081814, 1.6094399538232784, 2.019239373147481, 0.8595801929413844, 0.23634444661585755, 0.7233506864788657, 1.5415140657754784, 1.3012022620536692, 1.830739831579588, 0.2966987618531856, 0.9806570602058069, 0.07402629803182295), 0.42944389111657866, [118323, 66661, 43174, 29969, 22178])\n" ], [ "best", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4af8801b549545b5e56b140390641ba0457deef3
167,884
ipynb
Jupyter Notebook
notebooks/appa-real-age-baseline-model-1.0.ipynb
DucTranVan/face-attributes
6ec6222362ab831d384a253ad1f562fdd24d7caf
[ "MIT" ]
null
null
null
notebooks/appa-real-age-baseline-model-1.0.ipynb
DucTranVan/face-attributes
6ec6222362ab831d384a253ad1f562fdd24d7caf
[ "MIT" ]
null
null
null
notebooks/appa-real-age-baseline-model-1.0.ipynb
DucTranVan/face-attributes
6ec6222362ab831d384a253ad1f562fdd24d7caf
[ "MIT" ]
null
null
null
235.792135
63,756
0.915972
[ [ [ "# Experiment set up\n1. Create dataset: sequence of preporcessed examples ready to feed to neuralnet \n2. Create dataloader: define how dataset is loaded to neuralnet (batch size, order, computation optimizing ...)\n3. Create model : a bunch of matrixes math to transform input tensor to output tensor\n4. Training:\n + Forward \n + Calculate loss, metrics batch\n + Backward\n ", "_____no_output_____" ], [ "# Import necessary packages", "_____no_output_____" ] ], [ [ "import os\nimport glob\nimport sys\nimport random\n\nimport matplotlib.pylab as plt\nfrom PIL import Image, ImageDraw\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms.functional as TF\n\nimport numpy as np\n\nfrom sklearn.model_selection import ShuffleSplit\n\ntorch.manual_seed(0)\nnp.random.seed(0)\nrandom.seed(0)\n\n%matplotlib inline\n\nsys.path.insert(0, '..')\nfrom src.models.utils import FaceDataset", "_____no_output_____" ] ], [ [ "# Create a transformer", "_____no_output_____" ] ], [ [ "\ndef resize_img_label(image,label,target_size=(256,256)):\n w_orig,h_orig = image.size \n w_target,h_target = target_size\n \n # resize image and label\n image_new = TF.resize(image,target_size)\n return image_new,label\n\ndef transformer(image, label, params):\n image,label = resize_img_label(image,label,params[\"target_size\"]) \n image = TF.to_tensor(image)\n return image, label\n\n", "_____no_output_____" ] ], [ [ "# Create Data loader", "_____no_output_____" ] ], [ [ "trans_params_train = {\n \"target_size\" : (112, 112),\n}\n\n\ntrans_params_val={\n \"target_size\" : (112, 112), \n}\n\npath2data = \"/home/Data/appa-real/processed/\"\n\n", "_____no_output_____" ], [ "# create data set\ntrain_ds = FaceDataset(path2data + \"train.csv\", transformer, trans_params_train)\nval_ds = FaceDataset(path2data + \"valid.csv\", transformer, trans_params_val)\n\nprint(len(train_ds))\nprint(len(val_ds))", "3995\n1500\n" ], [ "import matplotlib.pyplot as plt\ndef show(img,label=None):\n npimg = img.numpy().transpose((1,2,0))\n plt.imshow(npimg)\n if label is not None:\n label = label.view(-1,2)\n for point in label:\n x,y= point\n plt.plot(x,y,'b+',markersize=10)", "_____no_output_____" ], [ "\nplt.figure(figsize=(10,10))\nfor img,label in train_ds:\n show(img,label)\n break", "_____no_output_____" ], [ "plt.figure(figsize=(10,10))\nfor img,label in val_ds:\n show(img,label)\n break", "_____no_output_____" ], [ "from torch.utils.data import DataLoader\ntrain_dl = DataLoader(train_ds, batch_size = 32, shuffle=True)\nval_dl = DataLoader(val_ds, batch_size = 256, shuffle=False)", "_____no_output_____" ], [ "for img_b, label_b in train_dl:\n print(img_b.shape,img_b.dtype)\n print(label_b.shape)\n break", "torch.Size([32, 3, 112, 112]) torch.float32\ntorch.Size([32, 2])\n" ], [ "for img, label in val_dl:\n print(label.shape)\n break", "torch.Size([256, 2])\n" ] ], [ [ "\n# Create Model", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n", "_____no_output_____" ], [ "class Net(nn.Module):\n def __init__(self, params):\n super(Net, self).__init__()\n \n def forward(self, x):\n return x", "_____no_output_____" ], [ "def __init__(self, params):\n super(Net, self).__init__()\n\n C_in,H_in,W_in=params[\"input_shape\"]\n init_f=params[\"initial_filters\"] \n num_outputs=params[\"num_outputs\"] \n\n self.conv1 = nn.Conv2d(C_in, init_f, kernel_size=3, stride=2, padding=1)\n self.conv2 = nn.Conv2d(init_f+C_in, 2*init_f, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(3*init_f+C_in, 4*init_f, kernel_size=3, padding=1)\n self.conv4 = nn.Conv2d(7*init_f+C_in, 8*init_f, kernel_size=3, padding=1)\n self.conv5 = nn.Conv2d(15*init_f+C_in, 16*init_f, kernel_size=3, padding=1)\n self.fc1 = nn.Linear(16*init_f, num_outputs)", "_____no_output_____" ], [ "def forward(self, x):\n identity=F.avg_pool2d(x,4,4)\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = torch.cat((x, identity), dim=1)\n\n identity=F.avg_pool2d(x,2,2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = torch.cat((x, identity), dim=1)\n\n identity=F.avg_pool2d(x,2,2)\n x = F.relu(self.conv3(x))\n x = F.max_pool2d(x, 2, 2)\n x = torch.cat((x, identity), dim=1)\n \n identity=F.avg_pool2d(x,2,2)\n x = F.relu(self.conv4(x))\n x = F.max_pool2d(x, 2, 2)\n x = torch.cat((x, identity), dim=1)\n\n x = F.relu(self.conv5(x))\n\n x=F.adaptive_avg_pool2d(x,1)\n x = x.reshape(x.size(0), -1)\n\n x = self.fc1(x)\n return x\n", "_____no_output_____" ], [ "Net.__init__= __init__\nNet.forward = forward\n\nparams_model={\n \"input_shape\": (3,112,112),\n \"initial_filters\": 64, \n \"num_outputs\": 1,\n }\n\nmodel = Net(params_model)\ndevice = torch.device(\"cuda\")\nmodel = model.to(device)", "_____no_output_____" ] ], [ [ "# Create optimizer", "_____no_output_____" ] ], [ [ "from torch import optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nopt = optim.Adam(model.parameters(), lr=1e-3)\nlr_scheduler = ReduceLROnPlateau(opt, mode='min',factor=0.5, patience=10,verbose=1)", "_____no_output_____" ] ], [ [ "# Training", "_____no_output_____" ] ], [ [ "\nfrom src.models import experiment\n\nperformance = experiment.Performance()\n\npath2models= \"../models/weights.pt\"\nparams = experiment.Prams(num_epochs=30, path2weights=path2models, device=device,optimizer=opt, lr_scheduler=lr_scheduler, sanity_check=False)\n\npipeline = experiment.Pipeline(model, train_dl, val_dl, performance, params)\nmodel, performance = pipeline.train_val()\n\n", "Epoch 0/29, current lr=0.001\nCopied best model weights!\ntrain loss: 355.666443\nval loss: 222.023209\n----------\nEpoch 1/29, current lr=0.001\ntrain loss: 224.171616\nval loss: 226.707413\n----------\nEpoch 2/29, current lr=0.001\nCopied best model weights!\ntrain loss: 218.197189\nval loss: 210.922318\n----------\nEpoch 3/29, current lr=0.001\ntrain loss: 217.412689\nval loss: 216.407242\n----------\nEpoch 4/29, current lr=0.001\ntrain loss: 210.356964\nval loss: 231.861145\n----------\nEpoch 5/29, current lr=0.001\nCopied best model weights!\ntrain loss: 206.775681\nval loss: 210.176773\n----------\nEpoch 6/29, current lr=0.001\nCopied best model weights!\ntrain loss: 202.496002\nval loss: 201.345398\n----------\nEpoch 7/29, current lr=0.001\ntrain loss: 196.161591\nval loss: 206.422577\n----------\nEpoch 8/29, current lr=0.001\ntrain loss: 189.900513\nval loss: 205.470001\n----------\nEpoch 9/29, current lr=0.001\nCopied best model weights!\ntrain loss: 190.349564\nval loss: 184.869812\n----------\nEpoch 10/29, current lr=0.001\ntrain loss: 183.799393\nval loss: 186.311417\n----------\nEpoch 11/29, current lr=0.001\ntrain loss: 176.098846\nval loss: 207.550705\n----------\nEpoch 12/29, current lr=0.001\ntrain loss: 166.744217\nval loss: 189.447708\n----------\nEpoch 13/29, current lr=0.001\nCopied best model weights!\ntrain loss: 166.770935\nval loss: 184.279037\n----------\nEpoch 14/29, current lr=0.001\nCopied best model weights!\ntrain loss: 155.724228\nval loss: 171.834335\n----------\nEpoch 15/29, current lr=0.001\ntrain loss: 145.751556\nval loss: 172.904785\n----------\nEpoch 16/29, current lr=0.001\nCopied best model weights!\ntrain loss: 140.227264\nval loss: 162.894440\n----------\nEpoch 17/29, current lr=0.001\ntrain loss: 128.161179\nval loss: 172.378296\n----------\nEpoch 18/29, current lr=0.001\nCopied best model weights!\ntrain loss: 123.962883\nval loss: 157.747253\n----------\nEpoch 19/29, current lr=0.001\ntrain loss: 116.687950\nval loss: 163.850418\n----------\nEpoch 20/29, current lr=0.001\ntrain loss: 99.209335\nval loss: 166.619675\n----------\nEpoch 21/29, current lr=0.001\nCopied best model weights!\ntrain loss: 89.445580\nval loss: 156.399963\n----------\nEpoch 22/29, current lr=0.001\ntrain loss: 79.365463\nval loss: 178.156311\n----------\nEpoch 23/29, current lr=0.001\ntrain loss: 71.973213\nval loss: 170.491669\n----------\nEpoch 24/29, current lr=0.001\ntrain loss: 59.475723\nval loss: 164.639191\n----------\nEpoch 25/29, current lr=0.001\ntrain loss: 48.449894\nval loss: 159.491684\n----------\nEpoch 26/29, current lr=0.001\ntrain loss: 40.194500\nval loss: 159.965485\n----------\nEpoch 27/29, current lr=0.001\ntrain loss: 31.065962\nval loss: 163.129669\n----------\nEpoch 28/29, current lr=0.001\ntrain loss: 28.227829\nval loss: 162.355606\n----------\nEpoch 29/29, current lr=0.001\ntrain loss: 19.984631\nval loss: 161.138596\n----------\n" ], [ "loss_hist, metric_history = performance.loss_history, performance.metrics_history", "_____no_output_____" ], [ "# Train-Validation Progress\nnum_epochs= 30\n\n# plot loss progress\nplt.title(\"Train-Val Loss\")\nplt.plot(range(1,num_epochs+1),loss_hist[\"train\"],label=\"train\")\nplt.plot(range(1,num_epochs+1),loss_hist[\"val\"],label=\"val\")\nplt.ylabel(\"Loss\")\nplt.xlabel(\"Training Epochs\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# plot accuracy progress\nplt.title(\"Val mae\")\nplt.plot(range(1,num_epochs+1),metric_history[\"val\"],label=\"val\")\nplt.plot(range(1,num_epochs+1),metric_history[\"train\"],label=\"train\")\nplt.ylabel(\"MAE\")\nplt.xlabel(\"Training Epochs\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "min(metric_history[\"val\"])", "_____no_output_____" ], [ "min(loss_hist[\"val\"])", "_____no_output_____" ], [ "min(loss_hist[\"train\"])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]